| @@ -1,1467 +1,1467 @@ | | | @@ -1,1467 +1,1467 @@ |
1 | /* $NetBSD: cryptosoft.c,v 1.53 2019/07/11 23:27:24 christos Exp $ */ | | 1 | /* $NetBSD: cryptosoft.c,v 1.54 2019/10/12 00:49:30 christos Exp $ */ |
2 | /* $FreeBSD: src/sys/opencrypto/cryptosoft.c,v 1.2.2.1 2002/11/21 23:34:23 sam Exp $ */ | | 2 | /* $FreeBSD: src/sys/opencrypto/cryptosoft.c,v 1.2.2.1 2002/11/21 23:34:23 sam Exp $ */ |
3 | /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */ | | 3 | /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */ |
4 | | | 4 | |
5 | /* | | 5 | /* |
6 | * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) | | 6 | * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) |
7 | * | | 7 | * |
8 | * This code was written by Angelos D. Keromytis in Athens, Greece, in | | 8 | * This code was written by Angelos D. Keromytis in Athens, Greece, in |
9 | * February 2000. Network Security Technologies Inc. (NSTI) kindly | | 9 | * February 2000. Network Security Technologies Inc. (NSTI) kindly |
10 | * supported the development of this code. | | 10 | * supported the development of this code. |
11 | * | | 11 | * |
12 | * Copyright (c) 2000, 2001 Angelos D. Keromytis | | 12 | * Copyright (c) 2000, 2001 Angelos D. Keromytis |
13 | * | | 13 | * |
14 | * Permission to use, copy, and modify this software with or without fee | | 14 | * Permission to use, copy, and modify this software with or without fee |
15 | * is hereby granted, provided that this entire notice is included in | | 15 | * is hereby granted, provided that this entire notice is included in |
16 | * all source code copies of any software which is or includes a copy or | | 16 | * all source code copies of any software which is or includes a copy or |
17 | * modification of this software. | | 17 | * modification of this software. |
18 | * | | 18 | * |
19 | * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR | | 19 | * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR |
20 | * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY | | 20 | * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY |
21 | * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE | | 21 | * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE |
22 | * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR | | 22 | * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR |
23 | * PURPOSE. | | 23 | * PURPOSE. |
24 | */ | | 24 | */ |
25 | | | 25 | |
26 | #include <sys/cdefs.h> | | 26 | #include <sys/cdefs.h> |
27 | __KERNEL_RCSID(0, "$NetBSD: cryptosoft.c,v 1.53 2019/07/11 23:27:24 christos Exp $"); | | 27 | __KERNEL_RCSID(0, "$NetBSD: cryptosoft.c,v 1.54 2019/10/12 00:49:30 christos Exp $"); |
28 | | | 28 | |
29 | #include <sys/param.h> | | 29 | #include <sys/param.h> |
30 | #include <sys/systm.h> | | 30 | #include <sys/systm.h> |
31 | #include <sys/malloc.h> | | 31 | #include <sys/malloc.h> |
32 | #include <sys/mbuf.h> | | 32 | #include <sys/mbuf.h> |
33 | #include <sys/sysctl.h> | | 33 | #include <sys/sysctl.h> |
34 | #include <sys/errno.h> | | 34 | #include <sys/errno.h> |
35 | #include <sys/cprng.h> | | 35 | #include <sys/cprng.h> |
36 | #include <sys/module.h> | | 36 | #include <sys/module.h> |
37 | #include <sys/device.h> | | 37 | #include <sys/device.h> |
38 | | | 38 | |
39 | #ifdef _KERNEL_OPT | | 39 | #ifdef _KERNEL_OPT |
40 | #include "opt_ocf.h" | | 40 | #include "opt_ocf.h" |
41 | #endif | | 41 | #endif |
42 | | | 42 | |
43 | #include <opencrypto/cryptodev.h> | | 43 | #include <opencrypto/cryptodev.h> |
44 | #include <opencrypto/cryptosoft.h> | | 44 | #include <opencrypto/cryptosoft.h> |
45 | #include <opencrypto/xform.h> | | 45 | #include <opencrypto/xform.h> |
46 | | | 46 | |
47 | #include <opencrypto/cryptosoft_xform.c> | | 47 | #include <opencrypto/cryptosoft_xform.c> |
48 | | | 48 | |
49 | #include "ioconf.h" | | 49 | #include "ioconf.h" |
50 | | | 50 | |
51 | union authctx { | | 51 | union authctx { |
52 | MD5_CTX md5ctx; | | 52 | MD5_CTX md5ctx; |
53 | SHA1_CTX sha1ctx; | | 53 | SHA1_CTX sha1ctx; |
54 | RMD160_CTX rmd160ctx; | | 54 | RMD160_CTX rmd160ctx; |
55 | SHA256_CTX sha256ctx; | | 55 | SHA256_CTX sha256ctx; |
56 | SHA384_CTX sha384ctx; | | 56 | SHA384_CTX sha384ctx; |
57 | SHA512_CTX sha512ctx; | | 57 | SHA512_CTX sha512ctx; |
58 | aesxcbc_ctx aesxcbcctx; | | 58 | aesxcbc_ctx aesxcbcctx; |
59 | AES_GMAC_CTX aesgmacctx; | | 59 | AES_GMAC_CTX aesgmacctx; |
60 | }; | | 60 | }; |
61 | | | 61 | |
62 | struct swcr_data **swcr_sessions = NULL; | | 62 | struct swcr_data **swcr_sessions = NULL; |
63 | u_int32_t swcr_sesnum = 0; | | 63 | u_int32_t swcr_sesnum = 0; |
64 | int32_t swcr_id = -1; | | 64 | int32_t swcr_id = -1; |
65 | | | 65 | |
66 | #define COPYBACK(x, a, b, c, d) \ | | 66 | #define COPYBACK(x, a, b, c, d) \ |
67 | (x) == CRYPTO_BUF_MBUF ? m_copyback((struct mbuf *)a,b,c,d) \ | | 67 | (x) == CRYPTO_BUF_MBUF ? m_copyback((struct mbuf *)a,b,c,d) \ |
68 | : cuio_copyback((struct uio *)a,b,c,d) | | 68 | : cuio_copyback((struct uio *)a,b,c,d) |
69 | #define COPYDATA(x, a, b, c, d) \ | | 69 | #define COPYDATA(x, a, b, c, d) \ |
70 | (x) == CRYPTO_BUF_MBUF ? m_copydata((struct mbuf *)a,b,c,d) \ | | 70 | (x) == CRYPTO_BUF_MBUF ? m_copydata((struct mbuf *)a,b,c,d) \ |
71 | : cuio_copydata((struct uio *)a,b,c,d) | | 71 | : cuio_copydata((struct uio *)a,b,c,d) |
72 | | | 72 | |
73 | static int swcr_encdec(struct cryptodesc *, const struct swcr_data *, void *, int); | | 73 | static int swcr_encdec(struct cryptodesc *, const struct swcr_data *, void *, int); |
74 | static int swcr_compdec(struct cryptodesc *, const struct swcr_data *, void *, int, int *); | | 74 | static int swcr_compdec(struct cryptodesc *, const struct swcr_data *, void *, int, int *); |
75 | static int swcr_combined(struct cryptop *, int); | | 75 | static int swcr_combined(struct cryptop *, int); |
76 | static int swcr_process(void *, struct cryptop *, int); | | 76 | static int swcr_process(void *, struct cryptop *, int); |
77 | static int swcr_newsession(void *, u_int32_t *, struct cryptoini *); | | 77 | static int swcr_newsession(void *, u_int32_t *, struct cryptoini *); |
78 | static int swcr_freesession(void *, u_int64_t); | | 78 | static int swcr_freesession(void *, u_int64_t); |
79 | | | 79 | |
80 | static int swcryptoattach_internal(void); | | 80 | static int swcryptoattach_internal(void); |
81 | | | 81 | |
82 | /* | | 82 | /* |
83 | * Apply a symmetric encryption/decryption algorithm. | | 83 | * Apply a symmetric encryption/decryption algorithm. |
84 | */ | | 84 | */ |
85 | static int | | 85 | static int |
86 | swcr_encdec(struct cryptodesc *crd, const struct swcr_data *sw, void *bufv, | | 86 | swcr_encdec(struct cryptodesc *crd, const struct swcr_data *sw, void *bufv, |
87 | int outtype) | | 87 | int outtype) |
88 | { | | 88 | { |
89 | char *buf = bufv; | | 89 | char *buf = bufv; |
90 | unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat; | | 90 | unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat; |
91 | unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN]; | | 91 | unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN]; |
92 | const struct swcr_enc_xform *exf; | | 92 | const struct swcr_enc_xform *exf; |
93 | int i, k, j, blks, ivlen; | | 93 | int i, k, j, blks, ivlen; |
94 | int count, ind; | | 94 | int count, ind; |
95 | | | 95 | |
96 | exf = sw->sw_exf; | | 96 | exf = sw->sw_exf; |
97 | blks = exf->enc_xform->blocksize; | | 97 | blks = exf->enc_xform->blocksize; |
98 | ivlen = exf->enc_xform->ivsize; | | 98 | ivlen = exf->enc_xform->ivsize; |
99 | KASSERT(exf->reinit ? ivlen <= blks : ivlen == blks); | | 99 | KASSERT(exf->reinit ? ivlen <= blks : ivlen == blks); |
100 | | | 100 | |
101 | /* Check for non-padded data */ | | 101 | /* Check for non-padded data */ |
102 | if (crd->crd_len % blks) | | 102 | if (crd->crd_len % blks) |
103 | return EINVAL; | | 103 | return EINVAL; |
104 | | | 104 | |
105 | /* Initialize the IV */ | | 105 | /* Initialize the IV */ |
106 | if (crd->crd_flags & CRD_F_ENCRYPT) { | | 106 | if (crd->crd_flags & CRD_F_ENCRYPT) { |
107 | /* IV explicitly provided ? */ | | 107 | /* IV explicitly provided ? */ |
108 | if (crd->crd_flags & CRD_F_IV_EXPLICIT) { | | 108 | if (crd->crd_flags & CRD_F_IV_EXPLICIT) { |
109 | memcpy(iv, crd->crd_iv, ivlen); | | 109 | memcpy(iv, crd->crd_iv, ivlen); |
110 | if (exf->reinit) | | 110 | if (exf->reinit) |
111 | exf->reinit(sw->sw_kschedule, iv, 0); | | 111 | exf->reinit(sw->sw_kschedule, iv, 0); |
112 | } else if (exf->reinit) { | | 112 | } else if (exf->reinit) { |
113 | exf->reinit(sw->sw_kschedule, 0, iv); | | 113 | exf->reinit(sw->sw_kschedule, 0, iv); |
114 | } else { | | 114 | } else { |
115 | /* Get random IV */ | | 115 | /* Get random IV */ |
116 | for (i = 0; | | 116 | for (i = 0; |
117 | i + sizeof (u_int32_t) <= EALG_MAX_BLOCK_LEN; | | 117 | i + sizeof (u_int32_t) <= EALG_MAX_BLOCK_LEN; |
118 | i += sizeof (u_int32_t)) { | | 118 | i += sizeof (u_int32_t)) { |
119 | u_int32_t temp = cprng_fast32(); | | 119 | u_int32_t temp = cprng_fast32(); |
120 | | | 120 | |
121 | memcpy(iv + i, &temp, sizeof(u_int32_t)); | | 121 | memcpy(iv + i, &temp, sizeof(u_int32_t)); |
122 | } | | 122 | } |
123 | /* | | 123 | /* |
124 | * What if the block size is not a multiple | | 124 | * What if the block size is not a multiple |
125 | * of sizeof (u_int32_t), which is the size of | | 125 | * of sizeof (u_int32_t), which is the size of |
126 | * what arc4random() returns ? | | 126 | * what arc4random() returns ? |
127 | */ | | 127 | */ |
128 | if (EALG_MAX_BLOCK_LEN % sizeof (u_int32_t) != 0) { | | 128 | if (EALG_MAX_BLOCK_LEN % sizeof (u_int32_t) != 0) { |
129 | u_int32_t temp = cprng_fast32(); | | 129 | u_int32_t temp = cprng_fast32(); |
130 | | | 130 | |
131 | bcopy (&temp, iv + i, | | 131 | bcopy (&temp, iv + i, |
132 | EALG_MAX_BLOCK_LEN - i); | | 132 | EALG_MAX_BLOCK_LEN - i); |
133 | } | | 133 | } |
134 | } | | 134 | } |
135 | | | 135 | |
136 | /* Do we need to write the IV */ | | 136 | /* Do we need to write the IV */ |
137 | if (!(crd->crd_flags & CRD_F_IV_PRESENT)) { | | 137 | if (!(crd->crd_flags & CRD_F_IV_PRESENT)) { |
138 | COPYBACK(outtype, buf, crd->crd_inject, ivlen, iv); | | 138 | COPYBACK(outtype, buf, crd->crd_inject, ivlen, iv); |
139 | } | | 139 | } |
140 | | | 140 | |
141 | } else { /* Decryption */ | | 141 | } else { /* Decryption */ |
142 | /* IV explicitly provided ? */ | | 142 | /* IV explicitly provided ? */ |
143 | if (crd->crd_flags & CRD_F_IV_EXPLICIT) | | 143 | if (crd->crd_flags & CRD_F_IV_EXPLICIT) |
144 | memcpy(iv, crd->crd_iv, ivlen); | | 144 | memcpy(iv, crd->crd_iv, ivlen); |
145 | else { | | 145 | else { |
146 | /* Get IV off buf */ | | 146 | /* Get IV off buf */ |
147 | COPYDATA(outtype, buf, crd->crd_inject, ivlen, iv); | | 147 | COPYDATA(outtype, buf, crd->crd_inject, ivlen, iv); |
148 | } | | 148 | } |
149 | if (exf->reinit) | | 149 | if (exf->reinit) |
150 | exf->reinit(sw->sw_kschedule, iv, 0); | | 150 | exf->reinit(sw->sw_kschedule, iv, 0); |
151 | } | | 151 | } |
152 | | | 152 | |
153 | ivp = iv; | | 153 | ivp = iv; |
154 | | | 154 | |
155 | if (outtype == CRYPTO_BUF_CONTIG) { | | 155 | if (outtype == CRYPTO_BUF_CONTIG) { |
156 | if (exf->reinit) { | | 156 | if (exf->reinit) { |
157 | for (i = crd->crd_skip; | | 157 | for (i = crd->crd_skip; |
158 | i < crd->crd_skip + crd->crd_len; i += blks) { | | 158 | i < crd->crd_skip + crd->crd_len; i += blks) { |
159 | if (crd->crd_flags & CRD_F_ENCRYPT) { | | 159 | if (crd->crd_flags & CRD_F_ENCRYPT) { |
160 | exf->encrypt(sw->sw_kschedule, buf + i); | | 160 | exf->encrypt(sw->sw_kschedule, buf + i); |
161 | } else { | | 161 | } else { |
162 | exf->decrypt(sw->sw_kschedule, buf + i); | | 162 | exf->decrypt(sw->sw_kschedule, buf + i); |
163 | } | | 163 | } |
164 | } | | 164 | } |
165 | } else if (crd->crd_flags & CRD_F_ENCRYPT) { | | 165 | } else if (crd->crd_flags & CRD_F_ENCRYPT) { |
166 | for (i = crd->crd_skip; | | 166 | for (i = crd->crd_skip; |
167 | i < crd->crd_skip + crd->crd_len; i += blks) { | | 167 | i < crd->crd_skip + crd->crd_len; i += blks) { |
168 | /* XOR with the IV/previous block, as appropriate. */ | | 168 | /* XOR with the IV/previous block, as appropriate. */ |
169 | if (i == crd->crd_skip) | | 169 | if (i == crd->crd_skip) |
170 | for (k = 0; k < blks; k++) | | 170 | for (k = 0; k < blks; k++) |
171 | buf[i + k] ^= ivp[k]; | | 171 | buf[i + k] ^= ivp[k]; |
172 | else | | 172 | else |
173 | for (k = 0; k < blks; k++) | | 173 | for (k = 0; k < blks; k++) |
174 | buf[i + k] ^= buf[i + k - blks]; | | 174 | buf[i + k] ^= buf[i + k - blks]; |
175 | exf->encrypt(sw->sw_kschedule, buf + i); | | 175 | exf->encrypt(sw->sw_kschedule, buf + i); |
176 | } | | 176 | } |
177 | } else { /* Decrypt */ | | 177 | } else { /* Decrypt */ |
178 | /* | | 178 | /* |
179 | * Start at the end, so we don't need to keep the encrypted | | 179 | * Start at the end, so we don't need to keep the encrypted |
180 | * block as the IV for the next block. | | 180 | * block as the IV for the next block. |
181 | */ | | 181 | */ |
182 | for (i = crd->crd_skip + crd->crd_len - blks; | | 182 | for (i = crd->crd_skip + crd->crd_len - blks; |
183 | i >= crd->crd_skip; i -= blks) { | | 183 | i >= crd->crd_skip; i -= blks) { |
184 | exf->decrypt(sw->sw_kschedule, buf + i); | | 184 | exf->decrypt(sw->sw_kschedule, buf + i); |
185 | | | 185 | |
186 | /* XOR with the IV/previous block, as appropriate */ | | 186 | /* XOR with the IV/previous block, as appropriate */ |
187 | if (i == crd->crd_skip) | | 187 | if (i == crd->crd_skip) |
188 | for (k = 0; k < blks; k++) | | 188 | for (k = 0; k < blks; k++) |
189 | buf[i + k] ^= ivp[k]; | | 189 | buf[i + k] ^= ivp[k]; |
190 | else | | 190 | else |
191 | for (k = 0; k < blks; k++) | | 191 | for (k = 0; k < blks; k++) |
192 | buf[i + k] ^= buf[i + k - blks]; | | 192 | buf[i + k] ^= buf[i + k - blks]; |
193 | } | | 193 | } |
194 | } | | 194 | } |
195 | | | 195 | |
196 | return 0; | | 196 | return 0; |
197 | } else if (outtype == CRYPTO_BUF_MBUF) { | | 197 | } else if (outtype == CRYPTO_BUF_MBUF) { |
198 | struct mbuf *m = (struct mbuf *) buf; | | 198 | struct mbuf *m = (struct mbuf *) buf; |
199 | | | 199 | |
200 | /* Find beginning of data */ | | 200 | /* Find beginning of data */ |
201 | m = m_getptr(m, crd->crd_skip, &k); | | 201 | m = m_getptr(m, crd->crd_skip, &k); |
202 | if (m == NULL) | | 202 | if (m == NULL) |
203 | return EINVAL; | | 203 | return EINVAL; |
204 | | | 204 | |
205 | i = crd->crd_len; | | 205 | i = crd->crd_len; |
206 | | | 206 | |
207 | while (i > 0) { | | 207 | while (i > 0) { |
208 | /* | | 208 | /* |
209 | * If there's insufficient data at the end of | | 209 | * If there's insufficient data at the end of |
210 | * an mbuf, we have to do some copying. | | 210 | * an mbuf, we have to do some copying. |
211 | */ | | 211 | */ |
212 | if (m->m_len < k + blks && m->m_len != k) { | | 212 | if (m->m_len < k + blks && m->m_len != k) { |
213 | m_copydata(m, k, blks, blk); | | 213 | m_copydata(m, k, blks, blk); |
214 | | | 214 | |
215 | /* Actual encryption/decryption */ | | 215 | /* Actual encryption/decryption */ |
216 | if (exf->reinit) { | | 216 | if (exf->reinit) { |
217 | if (crd->crd_flags & CRD_F_ENCRYPT) { | | 217 | if (crd->crd_flags & CRD_F_ENCRYPT) { |
218 | exf->encrypt(sw->sw_kschedule, | | 218 | exf->encrypt(sw->sw_kschedule, |
219 | blk); | | 219 | blk); |
220 | } else { | | 220 | } else { |
221 | exf->decrypt(sw->sw_kschedule, | | 221 | exf->decrypt(sw->sw_kschedule, |
222 | blk); | | 222 | blk); |
223 | } | | 223 | } |
224 | } else if (crd->crd_flags & CRD_F_ENCRYPT) { | | 224 | } else if (crd->crd_flags & CRD_F_ENCRYPT) { |
225 | /* XOR with previous block */ | | 225 | /* XOR with previous block */ |
226 | for (j = 0; j < blks; j++) | | 226 | for (j = 0; j < blks; j++) |
227 | blk[j] ^= ivp[j]; | | 227 | blk[j] ^= ivp[j]; |
228 | | | 228 | |
229 | exf->encrypt(sw->sw_kschedule, blk); | | 229 | exf->encrypt(sw->sw_kschedule, blk); |
230 | | | 230 | |
231 | /* | | 231 | /* |
232 | * Keep encrypted block for XOR'ing | | 232 | * Keep encrypted block for XOR'ing |
233 | * with next block | | 233 | * with next block |
234 | */ | | 234 | */ |
235 | memcpy(iv, blk, blks); | | 235 | memcpy(iv, blk, blks); |
236 | ivp = iv; | | 236 | ivp = iv; |
237 | } else { /* decrypt */ | | 237 | } else { /* decrypt */ |
238 | /* | | 238 | /* |
239 | * Keep encrypted block for XOR'ing | | 239 | * Keep encrypted block for XOR'ing |
240 | * with next block | | 240 | * with next block |
241 | */ | | 241 | */ |
242 | if (ivp == iv) | | 242 | if (ivp == iv) |
243 | memcpy(piv, blk, blks); | | 243 | memcpy(piv, blk, blks); |
244 | else | | 244 | else |
245 | memcpy(iv, blk, blks); | | 245 | memcpy(iv, blk, blks); |
246 | | | 246 | |
247 | exf->decrypt(sw->sw_kschedule, blk); | | 247 | exf->decrypt(sw->sw_kschedule, blk); |
248 | | | 248 | |
249 | /* XOR with previous block */ | | 249 | /* XOR with previous block */ |
250 | for (j = 0; j < blks; j++) | | 250 | for (j = 0; j < blks; j++) |
251 | blk[j] ^= ivp[j]; | | 251 | blk[j] ^= ivp[j]; |
252 | | | 252 | |
253 | if (ivp == iv) | | 253 | if (ivp == iv) |
254 | memcpy(iv, piv, blks); | | 254 | memcpy(iv, piv, blks); |
255 | else | | 255 | else |
256 | ivp = iv; | | 256 | ivp = iv; |
257 | } | | 257 | } |
258 | | | 258 | |
259 | /* Copy back decrypted block */ | | 259 | /* Copy back decrypted block */ |
260 | m_copyback(m, k, blks, blk); | | 260 | m_copyback(m, k, blks, blk); |
261 | | | 261 | |
262 | /* Advance pointer */ | | 262 | /* Advance pointer */ |
263 | m = m_getptr(m, k + blks, &k); | | 263 | m = m_getptr(m, k + blks, &k); |
264 | if (m == NULL) | | 264 | if (m == NULL) |
265 | return EINVAL; | | 265 | return EINVAL; |
266 | | | 266 | |
267 | i -= blks; | | 267 | i -= blks; |
268 | | | 268 | |
269 | /* Could be done... */ | | 269 | /* Could be done... */ |
270 | if (i == 0) | | 270 | if (i == 0) |
271 | break; | | 271 | break; |
272 | } | | 272 | } |
273 | | | 273 | |
274 | /* Skip possibly empty mbufs */ | | 274 | /* Skip possibly empty mbufs */ |
275 | if (k == m->m_len) { | | 275 | if (k == m->m_len) { |
276 | for (m = m->m_next; m && m->m_len == 0; | | 276 | for (m = m->m_next; m && m->m_len == 0; |
277 | m = m->m_next) | | 277 | m = m->m_next) |
278 | ; | | 278 | ; |
279 | k = 0; | | 279 | k = 0; |
280 | } | | 280 | } |
281 | | | 281 | |
282 | /* Sanity check */ | | 282 | /* Sanity check */ |
283 | if (m == NULL) | | 283 | if (m == NULL) |
284 | return EINVAL; | | 284 | return EINVAL; |
285 | | | 285 | |
286 | /* | | 286 | /* |
287 | * Warning: idat may point to garbage here, but | | 287 | * Warning: idat may point to garbage here, but |
288 | * we only use it in the while() loop, only if | | 288 | * we only use it in the while() loop, only if |
289 | * there are indeed enough data. | | 289 | * there are indeed enough data. |
290 | */ | | 290 | */ |
291 | idat = mtod(m, unsigned char *) + k; | | 291 | idat = mtod(m, unsigned char *) + k; |
292 | | | 292 | |
293 | while (m->m_len >= k + blks && i > 0) { | | 293 | while (m->m_len >= k + blks && i > 0) { |
294 | if (exf->reinit) { | | 294 | if (exf->reinit) { |
295 | if (crd->crd_flags & CRD_F_ENCRYPT) { | | 295 | if (crd->crd_flags & CRD_F_ENCRYPT) { |
296 | exf->encrypt(sw->sw_kschedule, | | 296 | exf->encrypt(sw->sw_kschedule, |
297 | idat); | | 297 | idat); |
298 | } else { | | 298 | } else { |
299 | exf->decrypt(sw->sw_kschedule, | | 299 | exf->decrypt(sw->sw_kschedule, |
300 | idat); | | 300 | idat); |
301 | } | | 301 | } |
302 | } else if (crd->crd_flags & CRD_F_ENCRYPT) { | | 302 | } else if (crd->crd_flags & CRD_F_ENCRYPT) { |
303 | /* XOR with previous block/IV */ | | 303 | /* XOR with previous block/IV */ |
304 | for (j = 0; j < blks; j++) | | 304 | for (j = 0; j < blks; j++) |
305 | idat[j] ^= ivp[j]; | | 305 | idat[j] ^= ivp[j]; |
306 | | | 306 | |
307 | exf->encrypt(sw->sw_kschedule, idat); | | 307 | exf->encrypt(sw->sw_kschedule, idat); |
308 | ivp = idat; | | 308 | ivp = idat; |
309 | } else { /* decrypt */ | | 309 | } else { /* decrypt */ |
310 | /* | | 310 | /* |
311 | * Keep encrypted block to be used | | 311 | * Keep encrypted block to be used |
312 | * in next block's processing. | | 312 | * in next block's processing. |
313 | */ | | 313 | */ |
314 | if (ivp == iv) | | 314 | if (ivp == iv) |
315 | memcpy(piv, idat, blks); | | 315 | memcpy(piv, idat, blks); |
316 | else | | 316 | else |
317 | memcpy(iv, idat, blks); | | 317 | memcpy(iv, idat, blks); |
318 | | | 318 | |
319 | exf->decrypt(sw->sw_kschedule, idat); | | 319 | exf->decrypt(sw->sw_kschedule, idat); |
320 | | | 320 | |
321 | /* XOR with previous block/IV */ | | 321 | /* XOR with previous block/IV */ |
322 | for (j = 0; j < blks; j++) | | 322 | for (j = 0; j < blks; j++) |
323 | idat[j] ^= ivp[j]; | | 323 | idat[j] ^= ivp[j]; |
324 | | | 324 | |
325 | if (ivp == iv) | | 325 | if (ivp == iv) |
326 | memcpy(iv, piv, blks); | | 326 | memcpy(iv, piv, blks); |
327 | else | | 327 | else |
328 | ivp = iv; | | 328 | ivp = iv; |
329 | } | | 329 | } |
330 | | | 330 | |
331 | idat += blks; | | 331 | idat += blks; |
332 | k += blks; | | 332 | k += blks; |
333 | i -= blks; | | 333 | i -= blks; |
334 | } | | 334 | } |
335 | } | | 335 | } |
336 | | | 336 | |
337 | return 0; /* Done with mbuf encryption/decryption */ | | 337 | return 0; /* Done with mbuf encryption/decryption */ |
338 | } else if (outtype == CRYPTO_BUF_IOV) { | | 338 | } else if (outtype == CRYPTO_BUF_IOV) { |
339 | struct uio *uio = (struct uio *) buf; | | 339 | struct uio *uio = (struct uio *) buf; |
340 | | | 340 | |
341 | /* Find beginning of data */ | | 341 | /* Find beginning of data */ |
342 | count = crd->crd_skip; | | 342 | count = crd->crd_skip; |
343 | ind = cuio_getptr(uio, count, &k); | | 343 | ind = cuio_getptr(uio, count, &k); |
344 | if (ind == -1) | | 344 | if (ind == -1) |
345 | return EINVAL; | | 345 | return EINVAL; |
346 | | | 346 | |
347 | i = crd->crd_len; | | 347 | i = crd->crd_len; |
348 | | | 348 | |
349 | while (i > 0) { | | 349 | while (i > 0) { |
350 | /* | | 350 | /* |
351 | * If there's insufficient data at the end, | | 351 | * If there's insufficient data at the end, |
352 | * we have to do some copying. | | 352 | * we have to do some copying. |
353 | */ | | 353 | */ |
354 | if (uio->uio_iov[ind].iov_len < k + blks && | | 354 | if (uio->uio_iov[ind].iov_len < k + blks && |
355 | uio->uio_iov[ind].iov_len != k) { | | 355 | uio->uio_iov[ind].iov_len != k) { |
356 | cuio_copydata(uio, k, blks, blk); | | 356 | cuio_copydata(uio, k, blks, blk); |
357 | | | 357 | |
358 | /* Actual encryption/decryption */ | | 358 | /* Actual encryption/decryption */ |
359 | if (exf->reinit) { | | 359 | if (exf->reinit) { |
360 | if (crd->crd_flags & CRD_F_ENCRYPT) { | | 360 | if (crd->crd_flags & CRD_F_ENCRYPT) { |
361 | exf->encrypt(sw->sw_kschedule, | | 361 | exf->encrypt(sw->sw_kschedule, |
362 | blk); | | 362 | blk); |
363 | } else { | | 363 | } else { |
364 | exf->decrypt(sw->sw_kschedule, | | 364 | exf->decrypt(sw->sw_kschedule, |
365 | blk); | | 365 | blk); |
366 | } | | 366 | } |
367 | } else if (crd->crd_flags & CRD_F_ENCRYPT) { | | 367 | } else if (crd->crd_flags & CRD_F_ENCRYPT) { |
368 | /* XOR with previous block */ | | 368 | /* XOR with previous block */ |
369 | for (j = 0; j < blks; j++) | | 369 | for (j = 0; j < blks; j++) |
370 | blk[j] ^= ivp[j]; | | 370 | blk[j] ^= ivp[j]; |
371 | | | 371 | |
372 | exf->encrypt(sw->sw_kschedule, blk); | | 372 | exf->encrypt(sw->sw_kschedule, blk); |
373 | | | 373 | |
374 | /* | | 374 | /* |
375 | * Keep encrypted block for XOR'ing | | 375 | * Keep encrypted block for XOR'ing |
376 | * with next block | | 376 | * with next block |
377 | */ | | 377 | */ |
378 | memcpy(iv, blk, blks); | | 378 | memcpy(iv, blk, blks); |
379 | ivp = iv; | | 379 | ivp = iv; |
380 | } else { /* decrypt */ | | 380 | } else { /* decrypt */ |
381 | /* | | 381 | /* |
382 | * Keep encrypted block for XOR'ing | | 382 | * Keep encrypted block for XOR'ing |
383 | * with next block | | 383 | * with next block |
384 | */ | | 384 | */ |
385 | if (ivp == iv) | | 385 | if (ivp == iv) |
386 | memcpy(piv, blk, blks); | | 386 | memcpy(piv, blk, blks); |
387 | else | | 387 | else |
388 | memcpy(iv, blk, blks); | | 388 | memcpy(iv, blk, blks); |
389 | | | 389 | |
390 | exf->decrypt(sw->sw_kschedule, blk); | | 390 | exf->decrypt(sw->sw_kschedule, blk); |
391 | | | 391 | |
392 | /* XOR with previous block */ | | 392 | /* XOR with previous block */ |
393 | for (j = 0; j < blks; j++) | | 393 | for (j = 0; j < blks; j++) |
394 | blk[j] ^= ivp[j]; | | 394 | blk[j] ^= ivp[j]; |
395 | | | 395 | |
396 | if (ivp == iv) | | 396 | if (ivp == iv) |
397 | memcpy(iv, piv, blks); | | 397 | memcpy(iv, piv, blks); |
398 | else | | 398 | else |
399 | ivp = iv; | | 399 | ivp = iv; |
400 | } | | 400 | } |
401 | | | 401 | |
402 | /* Copy back decrypted block */ | | 402 | /* Copy back decrypted block */ |
403 | cuio_copyback(uio, k, blks, blk); | | 403 | cuio_copyback(uio, k, blks, blk); |
404 | | | 404 | |
405 | count += blks; | | 405 | count += blks; |
406 | | | 406 | |
407 | /* Advance pointer */ | | 407 | /* Advance pointer */ |
408 | ind = cuio_getptr(uio, count, &k); | | 408 | ind = cuio_getptr(uio, count, &k); |
409 | if (ind == -1) | | 409 | if (ind == -1) |
410 | return (EINVAL); | | 410 | return (EINVAL); |
411 | | | 411 | |
412 | i -= blks; | | 412 | i -= blks; |
413 | | | 413 | |
414 | /* Could be done... */ | | 414 | /* Could be done... */ |
415 | if (i == 0) | | 415 | if (i == 0) |
416 | break; | | 416 | break; |
417 | } | | 417 | } |
418 | | | 418 | |
419 | /* | | 419 | /* |
420 | * Warning: idat may point to garbage here, but | | 420 | * Warning: idat may point to garbage here, but |
421 | * we only use it in the while() loop, only if | | 421 | * we only use it in the while() loop, only if |
422 | * there are indeed enough data. | | 422 | * there are indeed enough data. |
423 | */ | | 423 | */ |
424 | idat = ((char *)uio->uio_iov[ind].iov_base) + k; | | 424 | idat = ((char *)uio->uio_iov[ind].iov_base) + k; |
425 | | | 425 | |
426 | while (uio->uio_iov[ind].iov_len >= k + blks && | | 426 | while (uio->uio_iov[ind].iov_len >= k + blks && |
427 | i > 0) { | | 427 | i > 0) { |
428 | if (exf->reinit) { | | 428 | if (exf->reinit) { |
429 | if (crd->crd_flags & CRD_F_ENCRYPT) { | | 429 | if (crd->crd_flags & CRD_F_ENCRYPT) { |
430 | exf->encrypt(sw->sw_kschedule, | | 430 | exf->encrypt(sw->sw_kschedule, |
431 | idat); | | 431 | idat); |
432 | } else { | | 432 | } else { |
433 | exf->decrypt(sw->sw_kschedule, | | 433 | exf->decrypt(sw->sw_kschedule, |
434 | idat); | | 434 | idat); |
435 | } | | 435 | } |
436 | } else if (crd->crd_flags & CRD_F_ENCRYPT) { | | 436 | } else if (crd->crd_flags & CRD_F_ENCRYPT) { |
437 | /* XOR with previous block/IV */ | | 437 | /* XOR with previous block/IV */ |
438 | for (j = 0; j < blks; j++) | | 438 | for (j = 0; j < blks; j++) |
439 | idat[j] ^= ivp[j]; | | 439 | idat[j] ^= ivp[j]; |
440 | | | 440 | |
441 | exf->encrypt(sw->sw_kschedule, idat); | | 441 | exf->encrypt(sw->sw_kschedule, idat); |
442 | ivp = idat; | | 442 | ivp = idat; |
443 | } else { /* decrypt */ | | 443 | } else { /* decrypt */ |
444 | /* | | 444 | /* |
445 | * Keep encrypted block to be used | | 445 | * Keep encrypted block to be used |
446 | * in next block's processing. | | 446 | * in next block's processing. |
447 | */ | | 447 | */ |
448 | if (ivp == iv) | | 448 | if (ivp == iv) |
449 | memcpy(piv, idat, blks); | | 449 | memcpy(piv, idat, blks); |
450 | else | | 450 | else |
451 | memcpy(iv, idat, blks); | | 451 | memcpy(iv, idat, blks); |
452 | | | 452 | |
453 | exf->decrypt(sw->sw_kschedule, idat); | | 453 | exf->decrypt(sw->sw_kschedule, idat); |
454 | | | 454 | |
455 | /* XOR with previous block/IV */ | | 455 | /* XOR with previous block/IV */ |
456 | for (j = 0; j < blks; j++) | | 456 | for (j = 0; j < blks; j++) |
457 | idat[j] ^= ivp[j]; | | 457 | idat[j] ^= ivp[j]; |
458 | | | 458 | |
459 | if (ivp == iv) | | 459 | if (ivp == iv) |
460 | memcpy(iv, piv, blks); | | 460 | memcpy(iv, piv, blks); |
461 | else | | 461 | else |
462 | ivp = iv; | | 462 | ivp = iv; |
463 | } | | 463 | } |
464 | | | 464 | |
465 | idat += blks; | | 465 | idat += blks; |
466 | count += blks; | | 466 | count += blks; |
467 | k += blks; | | 467 | k += blks; |
468 | i -= blks; | | 468 | i -= blks; |
469 | } | | 469 | } |
470 | } | | 470 | } |
471 | return 0; /* Done with mbuf encryption/decryption */ | | 471 | return 0; /* Done with mbuf encryption/decryption */ |
472 | } | | 472 | } |
473 | | | 473 | |
474 | /* Unreachable */ | | 474 | /* Unreachable */ |
475 | return EINVAL; | | 475 | return EINVAL; |
476 | } | | 476 | } |
477 | | | 477 | |
478 | /* | | 478 | /* |
479 | * Compute keyed-hash authenticator. | | 479 | * Compute keyed-hash authenticator. |
480 | */ | | 480 | */ |
481 | int | | 481 | int |
482 | swcr_authcompute(struct cryptop *crp, struct cryptodesc *crd, | | 482 | swcr_authcompute(struct cryptop *crp, struct cryptodesc *crd, |
483 | const struct swcr_data *sw, void *buf, int outtype) | | 483 | const struct swcr_data *sw, void *buf, int outtype) |
484 | { | | 484 | { |
485 | unsigned char aalg[AALG_MAX_RESULT_LEN]; | | 485 | unsigned char aalg[AALG_MAX_RESULT_LEN]; |
486 | const struct swcr_auth_hash *axf; | | 486 | const struct swcr_auth_hash *axf; |
487 | union authctx ctx; | | 487 | union authctx ctx; |
488 | int err; | | 488 | int err; |
489 | | | 489 | |
490 | if (sw->sw_ictx == 0) | | 490 | if (sw->sw_ictx == 0) |
491 | return EINVAL; | | 491 | return EINVAL; |
492 | | | 492 | |
493 | axf = sw->sw_axf; | | 493 | axf = sw->sw_axf; |
494 | | | 494 | |
495 | memcpy(&ctx, sw->sw_ictx, axf->ctxsize); | | 495 | memcpy(&ctx, sw->sw_ictx, axf->ctxsize); |
496 | | | 496 | |
497 | switch (outtype) { | | 497 | switch (outtype) { |
498 | case CRYPTO_BUF_CONTIG: | | 498 | case CRYPTO_BUF_CONTIG: |
499 | axf->Update(&ctx, (char *)buf + crd->crd_skip, crd->crd_len); | | 499 | axf->Update(&ctx, (char *)buf + crd->crd_skip, crd->crd_len); |
500 | break; | | 500 | break; |
501 | case CRYPTO_BUF_MBUF: | | 501 | case CRYPTO_BUF_MBUF: |
502 | err = m_apply((struct mbuf *) buf, crd->crd_skip, crd->crd_len, | | 502 | err = m_apply((struct mbuf *) buf, crd->crd_skip, crd->crd_len, |
503 | (int (*)(void*, void *, unsigned int)) axf->Update, | | 503 | (int (*)(void*, void *, unsigned int))(void *)axf->Update, |
504 | (void *) &ctx); | | 504 | (void *) &ctx); |
505 | if (err) | | 505 | if (err) |
506 | return err; | | 506 | return err; |
507 | break; | | 507 | break; |
508 | case CRYPTO_BUF_IOV: | | 508 | case CRYPTO_BUF_IOV: |
509 | err = cuio_apply((struct uio *) buf, crd->crd_skip, | | 509 | err = cuio_apply((struct uio *) buf, crd->crd_skip, |
510 | crd->crd_len, | | 510 | crd->crd_len, |
511 | (int (*)(void *, void *, unsigned int)) axf->Update, | | 511 | (int (*)(void *, void *, unsigned int))(void *)axf->Update, |
512 | (void *) &ctx); | | 512 | (void *) &ctx); |
513 | if (err) { | | 513 | if (err) { |
514 | return err; | | 514 | return err; |
515 | } | | 515 | } |
516 | break; | | 516 | break; |
517 | default: | | 517 | default: |
518 | return EINVAL; | | 518 | return EINVAL; |
519 | } | | 519 | } |
520 | | | 520 | |
521 | switch (sw->sw_alg) { | | 521 | switch (sw->sw_alg) { |
522 | case CRYPTO_MD5_HMAC: | | 522 | case CRYPTO_MD5_HMAC: |
523 | case CRYPTO_MD5_HMAC_96: | | 523 | case CRYPTO_MD5_HMAC_96: |
524 | case CRYPTO_SHA1_HMAC: | | 524 | case CRYPTO_SHA1_HMAC: |
525 | case CRYPTO_SHA1_HMAC_96: | | 525 | case CRYPTO_SHA1_HMAC_96: |
526 | case CRYPTO_SHA2_256_HMAC: | | 526 | case CRYPTO_SHA2_256_HMAC: |
527 | case CRYPTO_SHA2_384_HMAC: | | 527 | case CRYPTO_SHA2_384_HMAC: |
528 | case CRYPTO_SHA2_512_HMAC: | | 528 | case CRYPTO_SHA2_512_HMAC: |
529 | case CRYPTO_RIPEMD160_HMAC: | | 529 | case CRYPTO_RIPEMD160_HMAC: |
530 | case CRYPTO_RIPEMD160_HMAC_96: | | 530 | case CRYPTO_RIPEMD160_HMAC_96: |
531 | if (sw->sw_octx == NULL) | | 531 | if (sw->sw_octx == NULL) |
532 | return EINVAL; | | 532 | return EINVAL; |
533 | | | 533 | |
534 | axf->Final(aalg, &ctx); | | 534 | axf->Final(aalg, &ctx); |
535 | memcpy(&ctx, sw->sw_octx, axf->ctxsize); | | 535 | memcpy(&ctx, sw->sw_octx, axf->ctxsize); |
536 | axf->Update(&ctx, aalg, axf->auth_hash->hashsize); | | 536 | axf->Update(&ctx, aalg, axf->auth_hash->hashsize); |
537 | axf->Final(aalg, &ctx); | | 537 | axf->Final(aalg, &ctx); |
538 | break; | | 538 | break; |
539 | | | 539 | |
540 | case CRYPTO_MD5_KPDK: | | 540 | case CRYPTO_MD5_KPDK: |
541 | case CRYPTO_SHA1_KPDK: | | 541 | case CRYPTO_SHA1_KPDK: |
542 | if (sw->sw_octx == NULL) | | 542 | if (sw->sw_octx == NULL) |
543 | return EINVAL; | | 543 | return EINVAL; |
544 | | | 544 | |
545 | axf->Update(&ctx, sw->sw_octx, sw->sw_klen); | | 545 | axf->Update(&ctx, sw->sw_octx, sw->sw_klen); |
546 | axf->Final(aalg, &ctx); | | 546 | axf->Final(aalg, &ctx); |
547 | break; | | 547 | break; |
548 | | | 548 | |
549 | case CRYPTO_NULL_HMAC: | | 549 | case CRYPTO_NULL_HMAC: |
550 | case CRYPTO_MD5: | | 550 | case CRYPTO_MD5: |
551 | case CRYPTO_SHA1: | | 551 | case CRYPTO_SHA1: |
552 | case CRYPTO_AES_XCBC_MAC_96: | | 552 | case CRYPTO_AES_XCBC_MAC_96: |
553 | axf->Final(aalg, &ctx); | | 553 | axf->Final(aalg, &ctx); |
554 | break; | | 554 | break; |
555 | } | | 555 | } |
556 | | | 556 | |
557 | /* Inject the authentication data */ | | 557 | /* Inject the authentication data */ |
558 | switch (outtype) { | | 558 | switch (outtype) { |
559 | case CRYPTO_BUF_CONTIG: | | 559 | case CRYPTO_BUF_CONTIG: |
560 | (void)memcpy((char *)buf + crd->crd_inject, aalg, | | 560 | (void)memcpy((char *)buf + crd->crd_inject, aalg, |
561 | axf->auth_hash->authsize); | | 561 | axf->auth_hash->authsize); |
562 | break; | | 562 | break; |
563 | case CRYPTO_BUF_MBUF: | | 563 | case CRYPTO_BUF_MBUF: |
564 | m_copyback((struct mbuf *) buf, crd->crd_inject, | | 564 | m_copyback((struct mbuf *) buf, crd->crd_inject, |
565 | axf->auth_hash->authsize, aalg); | | 565 | axf->auth_hash->authsize, aalg); |
566 | break; | | 566 | break; |
567 | case CRYPTO_BUF_IOV: | | 567 | case CRYPTO_BUF_IOV: |
568 | memcpy(crp->crp_mac, aalg, axf->auth_hash->authsize); | | 568 | memcpy(crp->crp_mac, aalg, axf->auth_hash->authsize); |
569 | break; | | 569 | break; |
570 | default: | | 570 | default: |
571 | return EINVAL; | | 571 | return EINVAL; |
572 | } | | 572 | } |
573 | return 0; | | 573 | return 0; |
574 | } | | 574 | } |
575 | | | 575 | |
576 | /* | | 576 | /* |
577 | * Apply a combined encryption-authentication transformation | | 577 | * Apply a combined encryption-authentication transformation |
578 | */ | | 578 | */ |
579 | static int | | 579 | static int |
580 | swcr_combined(struct cryptop *crp, int outtype) | | 580 | swcr_combined(struct cryptop *crp, int outtype) |
581 | { | | 581 | { |
582 | uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))]; | | 582 | uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))]; |
583 | u_char *blk = (u_char *)blkbuf; | | 583 | u_char *blk = (u_char *)blkbuf; |
584 | u_char aalg[AALG_MAX_RESULT_LEN]; | | 584 | u_char aalg[AALG_MAX_RESULT_LEN]; |
585 | u_char iv[EALG_MAX_BLOCK_LEN]; | | 585 | u_char iv[EALG_MAX_BLOCK_LEN]; |
586 | union authctx ctx; | | 586 | union authctx ctx; |
587 | struct cryptodesc *crd, *crda = NULL, *crde = NULL; | | 587 | struct cryptodesc *crd, *crda = NULL, *crde = NULL; |
588 | struct swcr_data *sw, *swa, *swe = NULL; | | 588 | struct swcr_data *sw, *swa, *swe = NULL; |
589 | const struct swcr_auth_hash *axf = NULL; | | 589 | const struct swcr_auth_hash *axf = NULL; |
590 | const struct swcr_enc_xform *exf = NULL; | | 590 | const struct swcr_enc_xform *exf = NULL; |
591 | void *buf = (void *)crp->crp_buf; | | 591 | void *buf = (void *)crp->crp_buf; |
592 | uint32_t *blkp; | | 592 | uint32_t *blkp; |
593 | int i, blksz = 0, ivlen = 0, len; | | 593 | int i, blksz = 0, ivlen = 0, len; |
594 | | | 594 | |
595 | for (crd = crp->crp_desc; crd; crd = crd->crd_next) { | | 595 | for (crd = crp->crp_desc; crd; crd = crd->crd_next) { |
596 | for (sw = swcr_sessions[crp->crp_sid & 0xffffffff]; | | 596 | for (sw = swcr_sessions[crp->crp_sid & 0xffffffff]; |
597 | sw && sw->sw_alg != crd->crd_alg; | | 597 | sw && sw->sw_alg != crd->crd_alg; |
598 | sw = sw->sw_next) | | 598 | sw = sw->sw_next) |
599 | ; | | 599 | ; |
600 | if (sw == NULL) | | 600 | if (sw == NULL) |
601 | return (EINVAL); | | 601 | return (EINVAL); |
602 | | | 602 | |
603 | switch (sw->sw_alg) { | | 603 | switch (sw->sw_alg) { |
604 | case CRYPTO_AES_GCM_16: | | 604 | case CRYPTO_AES_GCM_16: |
605 | case CRYPTO_AES_GMAC: | | 605 | case CRYPTO_AES_GMAC: |
606 | swe = sw; | | 606 | swe = sw; |
607 | crde = crd; | | 607 | crde = crd; |
608 | exf = swe->sw_exf; | | 608 | exf = swe->sw_exf; |
609 | ivlen = exf->enc_xform->ivsize; | | 609 | ivlen = exf->enc_xform->ivsize; |
610 | break; | | 610 | break; |
611 | case CRYPTO_AES_128_GMAC: | | 611 | case CRYPTO_AES_128_GMAC: |
612 | case CRYPTO_AES_192_GMAC: | | 612 | case CRYPTO_AES_192_GMAC: |
613 | case CRYPTO_AES_256_GMAC: | | 613 | case CRYPTO_AES_256_GMAC: |
614 | swa = sw; | | 614 | swa = sw; |
615 | crda = crd; | | 615 | crda = crd; |
616 | axf = swa->sw_axf; | | 616 | axf = swa->sw_axf; |
617 | if (swa->sw_ictx == 0) | | 617 | if (swa->sw_ictx == 0) |
618 | return (EINVAL); | | 618 | return (EINVAL); |
619 | memcpy(&ctx, swa->sw_ictx, axf->ctxsize); | | 619 | memcpy(&ctx, swa->sw_ictx, axf->ctxsize); |
620 | blksz = axf->auth_hash->blocksize; | | 620 | blksz = axf->auth_hash->blocksize; |
621 | break; | | 621 | break; |
622 | default: | | 622 | default: |
623 | return (EINVAL); | | 623 | return (EINVAL); |
624 | } | | 624 | } |
625 | } | | 625 | } |
626 | if (crde == NULL || crda == NULL) | | 626 | if (crde == NULL || crda == NULL) |
627 | return (EINVAL); | | 627 | return (EINVAL); |
628 | if (outtype == CRYPTO_BUF_CONTIG) | | 628 | if (outtype == CRYPTO_BUF_CONTIG) |
629 | return (EINVAL); | | 629 | return (EINVAL); |
630 | | | 630 | |
631 | /* Initialize the IV */ | | 631 | /* Initialize the IV */ |
632 | if (crde->crd_flags & CRD_F_ENCRYPT) { | | 632 | if (crde->crd_flags & CRD_F_ENCRYPT) { |
633 | /* IV explicitly provided ? */ | | 633 | /* IV explicitly provided ? */ |
634 | if (crde->crd_flags & CRD_F_IV_EXPLICIT) { | | 634 | if (crde->crd_flags & CRD_F_IV_EXPLICIT) { |
635 | memcpy(iv, crde->crd_iv, ivlen); | | 635 | memcpy(iv, crde->crd_iv, ivlen); |
636 | if (exf->reinit) | | 636 | if (exf->reinit) |
637 | exf->reinit(swe->sw_kschedule, iv, 0); | | 637 | exf->reinit(swe->sw_kschedule, iv, 0); |
638 | } else if (exf->reinit) | | 638 | } else if (exf->reinit) |
639 | exf->reinit(swe->sw_kschedule, 0, iv); | | 639 | exf->reinit(swe->sw_kschedule, 0, iv); |
640 | else | | 640 | else |
641 | cprng_fast(iv, ivlen); | | 641 | cprng_fast(iv, ivlen); |
642 | | | 642 | |
643 | /* Do we need to write the IV */ | | 643 | /* Do we need to write the IV */ |
644 | if (!(crde->crd_flags & CRD_F_IV_PRESENT)) | | 644 | if (!(crde->crd_flags & CRD_F_IV_PRESENT)) |
645 | COPYBACK(outtype, buf, crde->crd_inject, ivlen, iv); | | 645 | COPYBACK(outtype, buf, crde->crd_inject, ivlen, iv); |
646 | | | 646 | |
647 | } else { /* Decryption */ | | 647 | } else { /* Decryption */ |
648 | /* IV explicitly provided ? */ | | 648 | /* IV explicitly provided ? */ |
649 | if (crde->crd_flags & CRD_F_IV_EXPLICIT) | | 649 | if (crde->crd_flags & CRD_F_IV_EXPLICIT) |
650 | memcpy(iv, crde->crd_iv, ivlen); | | 650 | memcpy(iv, crde->crd_iv, ivlen); |
651 | else { | | 651 | else { |
652 | /* Get IV off buf */ | | 652 | /* Get IV off buf */ |
653 | COPYDATA(outtype, buf, crde->crd_inject, ivlen, iv); | | 653 | COPYDATA(outtype, buf, crde->crd_inject, ivlen, iv); |
654 | } | | 654 | } |
655 | if (exf->reinit) | | 655 | if (exf->reinit) |
656 | exf->reinit(swe->sw_kschedule, iv, 0); | | 656 | exf->reinit(swe->sw_kschedule, iv, 0); |
657 | } | | 657 | } |
658 | | | 658 | |
659 | /* Supply MAC with IV */ | | 659 | /* Supply MAC with IV */ |
660 | if (axf->Reinit) | | 660 | if (axf->Reinit) |
661 | axf->Reinit(&ctx, iv, ivlen); | | 661 | axf->Reinit(&ctx, iv, ivlen); |
662 | | | 662 | |
663 | /* Supply MAC with AAD */ | | 663 | /* Supply MAC with AAD */ |
664 | for (i = 0; i < crda->crd_len; i += blksz) { | | 664 | for (i = 0; i < crda->crd_len; i += blksz) { |
665 | len = MIN(crda->crd_len - i, blksz); | | 665 | len = MIN(crda->crd_len - i, blksz); |
666 | COPYDATA(outtype, buf, crda->crd_skip + i, len, blk); | | 666 | COPYDATA(outtype, buf, crda->crd_skip + i, len, blk); |
667 | axf->Update(&ctx, blk, len); | | 667 | axf->Update(&ctx, blk, len); |
668 | } | | 668 | } |
669 | | | 669 | |
670 | /* Do encryption/decryption with MAC */ | | 670 | /* Do encryption/decryption with MAC */ |
671 | for (i = 0; i < crde->crd_len; i += blksz) { | | 671 | for (i = 0; i < crde->crd_len; i += blksz) { |
672 | len = MIN(crde->crd_len - i, blksz); | | 672 | len = MIN(crde->crd_len - i, blksz); |
673 | if (len < blksz) | | 673 | if (len < blksz) |
674 | memset(blk, 0, blksz); | | 674 | memset(blk, 0, blksz); |
675 | COPYDATA(outtype, buf, crde->crd_skip + i, len, blk); | | 675 | COPYDATA(outtype, buf, crde->crd_skip + i, len, blk); |
676 | if (crde->crd_flags & CRD_F_ENCRYPT) { | | 676 | if (crde->crd_flags & CRD_F_ENCRYPT) { |
677 | exf->encrypt(swe->sw_kschedule, blk); | | 677 | exf->encrypt(swe->sw_kschedule, blk); |
678 | axf->Update(&ctx, blk, len); | | 678 | axf->Update(&ctx, blk, len); |
679 | } else { | | 679 | } else { |
680 | axf->Update(&ctx, blk, len); | | 680 | axf->Update(&ctx, blk, len); |
681 | exf->decrypt(swe->sw_kschedule, blk); | | 681 | exf->decrypt(swe->sw_kschedule, blk); |
682 | } | | 682 | } |
683 | COPYBACK(outtype, buf, crde->crd_skip + i, len, blk); | | 683 | COPYBACK(outtype, buf, crde->crd_skip + i, len, blk); |
684 | } | | 684 | } |
685 | | | 685 | |
686 | /* Do any required special finalization */ | | 686 | /* Do any required special finalization */ |
687 | switch (crda->crd_alg) { | | 687 | switch (crda->crd_alg) { |
688 | case CRYPTO_AES_128_GMAC: | | 688 | case CRYPTO_AES_128_GMAC: |
689 | case CRYPTO_AES_192_GMAC: | | 689 | case CRYPTO_AES_192_GMAC: |
690 | case CRYPTO_AES_256_GMAC: | | 690 | case CRYPTO_AES_256_GMAC: |
691 | /* length block */ | | 691 | /* length block */ |
692 | memset(blk, 0, blksz); | | 692 | memset(blk, 0, blksz); |
693 | blkp = (uint32_t *)blk + 1; | | 693 | blkp = (uint32_t *)blk + 1; |
694 | *blkp = htobe32(crda->crd_len * 8); | | 694 | *blkp = htobe32(crda->crd_len * 8); |
695 | blkp = (uint32_t *)blk + 3; | | 695 | blkp = (uint32_t *)blk + 3; |
696 | *blkp = htobe32(crde->crd_len * 8); | | 696 | *blkp = htobe32(crde->crd_len * 8); |
697 | axf->Update(&ctx, blk, blksz); | | 697 | axf->Update(&ctx, blk, blksz); |
698 | break; | | 698 | break; |
699 | } | | 699 | } |
700 | | | 700 | |
701 | /* Finalize MAC */ | | 701 | /* Finalize MAC */ |
702 | axf->Final(aalg, &ctx); | | 702 | axf->Final(aalg, &ctx); |
703 | | | 703 | |
704 | /* Inject the authentication data */ | | 704 | /* Inject the authentication data */ |
705 | if (outtype == CRYPTO_BUF_MBUF) | | 705 | if (outtype == CRYPTO_BUF_MBUF) |
706 | COPYBACK(outtype, buf, crda->crd_inject, axf->auth_hash->authsize, aalg); | | 706 | COPYBACK(outtype, buf, crda->crd_inject, axf->auth_hash->authsize, aalg); |
707 | else | | 707 | else |
708 | memcpy(crp->crp_mac, aalg, axf->auth_hash->authsize); | | 708 | memcpy(crp->crp_mac, aalg, axf->auth_hash->authsize); |
709 | | | 709 | |
710 | return (0); | | 710 | return (0); |
711 | } | | 711 | } |
712 | | | 712 | |
713 | /* | | 713 | /* |
714 | * Apply a compression/decompression algorithm | | 714 | * Apply a compression/decompression algorithm |
715 | */ | | 715 | */ |
716 | static int | | 716 | static int |
717 | swcr_compdec(struct cryptodesc *crd, const struct swcr_data *sw, | | 717 | swcr_compdec(struct cryptodesc *crd, const struct swcr_data *sw, |
718 | void *buf, int outtype, int *res_size) | | 718 | void *buf, int outtype, int *res_size) |
719 | { | | 719 | { |
720 | u_int8_t *data, *out; | | 720 | u_int8_t *data, *out; |
721 | const struct swcr_comp_algo *cxf; | | 721 | const struct swcr_comp_algo *cxf; |
722 | int adj; | | 722 | int adj; |
723 | u_int32_t result; | | 723 | u_int32_t result; |
724 | | | 724 | |
725 | cxf = sw->sw_cxf; | | 725 | cxf = sw->sw_cxf; |
726 | | | 726 | |
727 | /* We must handle the whole buffer of data in one time | | 727 | /* We must handle the whole buffer of data in one time |
728 | * then if there is not all the data in the mbuf, we must | | 728 | * then if there is not all the data in the mbuf, we must |
729 | * copy in a buffer. | | 729 | * copy in a buffer. |
730 | */ | | 730 | */ |
731 | | | 731 | |
732 | data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT); | | 732 | data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT); |
733 | if (data == NULL) | | 733 | if (data == NULL) |
734 | return (EINVAL); | | 734 | return (EINVAL); |
735 | COPYDATA(outtype, buf, crd->crd_skip, crd->crd_len, data); | | 735 | COPYDATA(outtype, buf, crd->crd_skip, crd->crd_len, data); |
736 | | | 736 | |
737 | if (crd->crd_flags & CRD_F_COMP) | | 737 | if (crd->crd_flags & CRD_F_COMP) |
738 | result = cxf->compress(data, crd->crd_len, &out); | | 738 | result = cxf->compress(data, crd->crd_len, &out); |
739 | else | | 739 | else |
740 | result = cxf->decompress(data, crd->crd_len, &out, | | 740 | result = cxf->decompress(data, crd->crd_len, &out, |
741 | *res_size); | | 741 | *res_size); |
742 | | | 742 | |
743 | free(data, M_CRYPTO_DATA); | | 743 | free(data, M_CRYPTO_DATA); |
744 | if (result == 0) | | 744 | if (result == 0) |
745 | return EINVAL; | | 745 | return EINVAL; |
746 | | | 746 | |
747 | /* Copy back the (de)compressed data. m_copyback is | | 747 | /* Copy back the (de)compressed data. m_copyback is |
748 | * extending the mbuf as necessary. | | 748 | * extending the mbuf as necessary. |
749 | */ | | 749 | */ |
750 | *res_size = (int)result; | | 750 | *res_size = (int)result; |
751 | /* Check the compressed size when doing compression */ | | 751 | /* Check the compressed size when doing compression */ |
752 | if (crd->crd_flags & CRD_F_COMP && | | 752 | if (crd->crd_flags & CRD_F_COMP && |
753 | sw->sw_alg == CRYPTO_DEFLATE_COMP_NOGROW && | | 753 | sw->sw_alg == CRYPTO_DEFLATE_COMP_NOGROW && |
754 | result >= crd->crd_len) { | | 754 | result >= crd->crd_len) { |
755 | /* Compression was useless, we lost time */ | | 755 | /* Compression was useless, we lost time */ |
756 | free(out, M_CRYPTO_DATA); | | 756 | free(out, M_CRYPTO_DATA); |
757 | return 0; | | 757 | return 0; |
758 | } | | 758 | } |
759 | | | 759 | |
760 | COPYBACK(outtype, buf, crd->crd_skip, result, out); | | 760 | COPYBACK(outtype, buf, crd->crd_skip, result, out); |
761 | if (result < crd->crd_len) { | | 761 | if (result < crd->crd_len) { |
762 | adj = result - crd->crd_len; | | 762 | adj = result - crd->crd_len; |
763 | if (outtype == CRYPTO_BUF_MBUF) { | | 763 | if (outtype == CRYPTO_BUF_MBUF) { |
764 | m_adj((struct mbuf *)buf, adj); | | 764 | m_adj((struct mbuf *)buf, adj); |
765 | } | | 765 | } |
766 | /* Don't adjust the iov_len, it breaks the kmem_free */ | | 766 | /* Don't adjust the iov_len, it breaks the kmem_free */ |
767 | } | | 767 | } |
768 | free(out, M_CRYPTO_DATA); | | 768 | free(out, M_CRYPTO_DATA); |
769 | return 0; | | 769 | return 0; |
770 | } | | 770 | } |
771 | | | 771 | |
772 | /* | | 772 | /* |
773 | * Generate a new software session. | | 773 | * Generate a new software session. |
774 | */ | | 774 | */ |
775 | static int | | 775 | static int |
776 | swcr_newsession(void *arg, u_int32_t *sid, struct cryptoini *cri) | | 776 | swcr_newsession(void *arg, u_int32_t *sid, struct cryptoini *cri) |
777 | { | | 777 | { |
778 | struct swcr_data **swd; | | 778 | struct swcr_data **swd; |
779 | const struct swcr_auth_hash *axf; | | 779 | const struct swcr_auth_hash *axf; |
780 | const struct swcr_enc_xform *txf; | | 780 | const struct swcr_enc_xform *txf; |
781 | const struct swcr_comp_algo *cxf; | | 781 | const struct swcr_comp_algo *cxf; |
782 | u_int32_t i; | | 782 | u_int32_t i; |
783 | int k, error; | | 783 | int k, error; |
784 | | | 784 | |
785 | if (sid == NULL || cri == NULL) | | 785 | if (sid == NULL || cri == NULL) |
786 | return EINVAL; | | 786 | return EINVAL; |
787 | | | 787 | |
788 | if (swcr_sessions) { | | 788 | if (swcr_sessions) { |
789 | for (i = 1; i < swcr_sesnum; i++) | | 789 | for (i = 1; i < swcr_sesnum; i++) |
790 | if (swcr_sessions[i] == NULL) | | 790 | if (swcr_sessions[i] == NULL) |
791 | break; | | 791 | break; |
792 | } else | | 792 | } else |
793 | i = 1; /* NB: to silence compiler warning */ | | 793 | i = 1; /* NB: to silence compiler warning */ |
794 | | | 794 | |
795 | if (swcr_sessions == NULL || i == swcr_sesnum) { | | 795 | if (swcr_sessions == NULL || i == swcr_sesnum) { |
796 | if (swcr_sessions == NULL) { | | 796 | if (swcr_sessions == NULL) { |
797 | i = 1; /* We leave swcr_sessions[0] empty */ | | 797 | i = 1; /* We leave swcr_sessions[0] empty */ |
798 | swcr_sesnum = CRYPTO_SW_SESSIONS; | | 798 | swcr_sesnum = CRYPTO_SW_SESSIONS; |
799 | } else | | 799 | } else |
800 | swcr_sesnum *= 2; | | 800 | swcr_sesnum *= 2; |
801 | | | 801 | |
802 | swd = malloc(swcr_sesnum * sizeof(struct swcr_data *), | | 802 | swd = malloc(swcr_sesnum * sizeof(struct swcr_data *), |
803 | M_CRYPTO_DATA, M_NOWAIT); | | 803 | M_CRYPTO_DATA, M_NOWAIT); |
804 | if (swd == NULL) { | | 804 | if (swd == NULL) { |
805 | /* Reset session number */ | | 805 | /* Reset session number */ |
806 | if (swcr_sesnum == CRYPTO_SW_SESSIONS) | | 806 | if (swcr_sesnum == CRYPTO_SW_SESSIONS) |
807 | swcr_sesnum = 0; | | 807 | swcr_sesnum = 0; |
808 | else | | 808 | else |
809 | swcr_sesnum /= 2; | | 809 | swcr_sesnum /= 2; |
810 | return ENOBUFS; | | 810 | return ENOBUFS; |
811 | } | | 811 | } |
812 | | | 812 | |
813 | memset(swd, 0, swcr_sesnum * sizeof(struct swcr_data *)); | | 813 | memset(swd, 0, swcr_sesnum * sizeof(struct swcr_data *)); |
814 | | | 814 | |
815 | /* Copy existing sessions */ | | 815 | /* Copy existing sessions */ |
816 | if (swcr_sessions) { | | 816 | if (swcr_sessions) { |
817 | memcpy(swd, swcr_sessions, | | 817 | memcpy(swd, swcr_sessions, |
818 | (swcr_sesnum / 2) * sizeof(struct swcr_data *)); | | 818 | (swcr_sesnum / 2) * sizeof(struct swcr_data *)); |
819 | free(swcr_sessions, M_CRYPTO_DATA); | | 819 | free(swcr_sessions, M_CRYPTO_DATA); |
820 | } | | 820 | } |
821 | | | 821 | |
822 | swcr_sessions = swd; | | 822 | swcr_sessions = swd; |
823 | } | | 823 | } |
824 | | | 824 | |
825 | swd = &swcr_sessions[i]; | | 825 | swd = &swcr_sessions[i]; |
826 | *sid = i; | | 826 | *sid = i; |
827 | | | 827 | |
828 | while (cri) { | | 828 | while (cri) { |
829 | *swd = malloc(sizeof **swd, M_CRYPTO_DATA, M_NOWAIT); | | 829 | *swd = malloc(sizeof **swd, M_CRYPTO_DATA, M_NOWAIT); |
830 | if (*swd == NULL) { | | 830 | if (*swd == NULL) { |
831 | swcr_freesession(NULL, i); | | 831 | swcr_freesession(NULL, i); |
832 | return ENOBUFS; | | 832 | return ENOBUFS; |
833 | } | | 833 | } |
834 | memset(*swd, 0, sizeof(struct swcr_data)); | | 834 | memset(*swd, 0, sizeof(struct swcr_data)); |
835 | | | 835 | |
836 | switch (cri->cri_alg) { | | 836 | switch (cri->cri_alg) { |
837 | case CRYPTO_DES_CBC: | | 837 | case CRYPTO_DES_CBC: |
838 | txf = &swcr_enc_xform_des; | | 838 | txf = &swcr_enc_xform_des; |
839 | goto enccommon; | | 839 | goto enccommon; |
840 | case CRYPTO_3DES_CBC: | | 840 | case CRYPTO_3DES_CBC: |
841 | txf = &swcr_enc_xform_3des; | | 841 | txf = &swcr_enc_xform_3des; |
842 | goto enccommon; | | 842 | goto enccommon; |
843 | case CRYPTO_BLF_CBC: | | 843 | case CRYPTO_BLF_CBC: |
844 | txf = &swcr_enc_xform_blf; | | 844 | txf = &swcr_enc_xform_blf; |
845 | goto enccommon; | | 845 | goto enccommon; |
846 | case CRYPTO_CAST_CBC: | | 846 | case CRYPTO_CAST_CBC: |
847 | txf = &swcr_enc_xform_cast5; | | 847 | txf = &swcr_enc_xform_cast5; |
848 | goto enccommon; | | 848 | goto enccommon; |
849 | case CRYPTO_SKIPJACK_CBC: | | 849 | case CRYPTO_SKIPJACK_CBC: |
850 | txf = &swcr_enc_xform_skipjack; | | 850 | txf = &swcr_enc_xform_skipjack; |
851 | goto enccommon; | | 851 | goto enccommon; |
852 | case CRYPTO_RIJNDAEL128_CBC: | | 852 | case CRYPTO_RIJNDAEL128_CBC: |
853 | txf = &swcr_enc_xform_rijndael128; | | 853 | txf = &swcr_enc_xform_rijndael128; |
854 | goto enccommon; | | 854 | goto enccommon; |
855 | case CRYPTO_CAMELLIA_CBC: | | 855 | case CRYPTO_CAMELLIA_CBC: |
856 | txf = &swcr_enc_xform_camellia; | | 856 | txf = &swcr_enc_xform_camellia; |
857 | goto enccommon; | | 857 | goto enccommon; |
858 | case CRYPTO_AES_CTR: | | 858 | case CRYPTO_AES_CTR: |
859 | txf = &swcr_enc_xform_aes_ctr; | | 859 | txf = &swcr_enc_xform_aes_ctr; |
860 | goto enccommon; | | 860 | goto enccommon; |
861 | case CRYPTO_AES_GCM_16: | | 861 | case CRYPTO_AES_GCM_16: |
862 | txf = &swcr_enc_xform_aes_gcm; | | 862 | txf = &swcr_enc_xform_aes_gcm; |
863 | goto enccommon; | | 863 | goto enccommon; |
864 | case CRYPTO_AES_GMAC: | | 864 | case CRYPTO_AES_GMAC: |
865 | txf = &swcr_enc_xform_aes_gmac; | | 865 | txf = &swcr_enc_xform_aes_gmac; |
866 | goto enccommon; | | 866 | goto enccommon; |
867 | case CRYPTO_NULL_CBC: | | 867 | case CRYPTO_NULL_CBC: |
868 | txf = &swcr_enc_xform_null; | | 868 | txf = &swcr_enc_xform_null; |
869 | goto enccommon; | | 869 | goto enccommon; |
870 | enccommon: | | 870 | enccommon: |
871 | error = txf->setkey(&((*swd)->sw_kschedule), | | 871 | error = txf->setkey(&((*swd)->sw_kschedule), |
872 | cri->cri_key, cri->cri_klen / 8); | | 872 | cri->cri_key, cri->cri_klen / 8); |
873 | if (error) { | | 873 | if (error) { |
874 | swcr_freesession(NULL, i); | | 874 | swcr_freesession(NULL, i); |
875 | return error; | | 875 | return error; |
876 | } | | 876 | } |
877 | (*swd)->sw_exf = txf; | | 877 | (*swd)->sw_exf = txf; |
878 | break; | | 878 | break; |
879 | | | 879 | |
880 | case CRYPTO_MD5_HMAC: | | 880 | case CRYPTO_MD5_HMAC: |
881 | axf = &swcr_auth_hash_hmac_md5; | | 881 | axf = &swcr_auth_hash_hmac_md5; |
882 | goto authcommon; | | 882 | goto authcommon; |
883 | case CRYPTO_MD5_HMAC_96: | | 883 | case CRYPTO_MD5_HMAC_96: |
884 | axf = &swcr_auth_hash_hmac_md5_96; | | 884 | axf = &swcr_auth_hash_hmac_md5_96; |
885 | goto authcommon; | | 885 | goto authcommon; |
886 | case CRYPTO_SHA1_HMAC: | | 886 | case CRYPTO_SHA1_HMAC: |
887 | axf = &swcr_auth_hash_hmac_sha1; | | 887 | axf = &swcr_auth_hash_hmac_sha1; |
888 | goto authcommon; | | 888 | goto authcommon; |
889 | case CRYPTO_SHA1_HMAC_96: | | 889 | case CRYPTO_SHA1_HMAC_96: |
890 | axf = &swcr_auth_hash_hmac_sha1_96; | | 890 | axf = &swcr_auth_hash_hmac_sha1_96; |
891 | goto authcommon; | | 891 | goto authcommon; |
892 | case CRYPTO_SHA2_256_HMAC: | | 892 | case CRYPTO_SHA2_256_HMAC: |
893 | axf = &swcr_auth_hash_hmac_sha2_256; | | 893 | axf = &swcr_auth_hash_hmac_sha2_256; |
894 | goto authcommon; | | 894 | goto authcommon; |
895 | case CRYPTO_SHA2_384_HMAC: | | 895 | case CRYPTO_SHA2_384_HMAC: |
896 | axf = &swcr_auth_hash_hmac_sha2_384; | | 896 | axf = &swcr_auth_hash_hmac_sha2_384; |
897 | goto authcommon; | | 897 | goto authcommon; |
898 | case CRYPTO_SHA2_512_HMAC: | | 898 | case CRYPTO_SHA2_512_HMAC: |
899 | axf = &swcr_auth_hash_hmac_sha2_512; | | 899 | axf = &swcr_auth_hash_hmac_sha2_512; |
900 | goto authcommon; | | 900 | goto authcommon; |
901 | case CRYPTO_NULL_HMAC: | | 901 | case CRYPTO_NULL_HMAC: |
902 | axf = &swcr_auth_hash_null; | | 902 | axf = &swcr_auth_hash_null; |
903 | goto authcommon; | | 903 | goto authcommon; |
904 | case CRYPTO_RIPEMD160_HMAC: | | 904 | case CRYPTO_RIPEMD160_HMAC: |
905 | axf = &swcr_auth_hash_hmac_ripemd_160; | | 905 | axf = &swcr_auth_hash_hmac_ripemd_160; |
906 | goto authcommon; | | 906 | goto authcommon; |
907 | case CRYPTO_RIPEMD160_HMAC_96: | | 907 | case CRYPTO_RIPEMD160_HMAC_96: |
908 | axf = &swcr_auth_hash_hmac_ripemd_160_96; | | 908 | axf = &swcr_auth_hash_hmac_ripemd_160_96; |
909 | goto authcommon; /* leave this for safety */ | | 909 | goto authcommon; /* leave this for safety */ |
910 | authcommon: | | 910 | authcommon: |
911 | (*swd)->sw_ictx = malloc(axf->ctxsize, | | 911 | (*swd)->sw_ictx = malloc(axf->ctxsize, |
912 | M_CRYPTO_DATA, M_NOWAIT); | | 912 | M_CRYPTO_DATA, M_NOWAIT); |
913 | if ((*swd)->sw_ictx == NULL) { | | 913 | if ((*swd)->sw_ictx == NULL) { |
914 | swcr_freesession(NULL, i); | | 914 | swcr_freesession(NULL, i); |
915 | return ENOBUFS; | | 915 | return ENOBUFS; |
916 | } | | 916 | } |
917 | | | 917 | |
918 | (*swd)->sw_octx = malloc(axf->ctxsize, | | 918 | (*swd)->sw_octx = malloc(axf->ctxsize, |
919 | M_CRYPTO_DATA, M_NOWAIT); | | 919 | M_CRYPTO_DATA, M_NOWAIT); |
920 | if ((*swd)->sw_octx == NULL) { | | 920 | if ((*swd)->sw_octx == NULL) { |
921 | swcr_freesession(NULL, i); | | 921 | swcr_freesession(NULL, i); |
922 | return ENOBUFS; | | 922 | return ENOBUFS; |
923 | } | | 923 | } |
924 | | | 924 | |
925 | for (k = 0; k < cri->cri_klen / 8; k++) | | 925 | for (k = 0; k < cri->cri_klen / 8; k++) |
926 | cri->cri_key[k] ^= HMAC_IPAD_VAL; | | 926 | cri->cri_key[k] ^= HMAC_IPAD_VAL; |
927 | | | 927 | |
928 | axf->Init((*swd)->sw_ictx); | | 928 | axf->Init((*swd)->sw_ictx); |
929 | axf->Update((*swd)->sw_ictx, cri->cri_key, | | 929 | axf->Update((*swd)->sw_ictx, cri->cri_key, |
930 | cri->cri_klen / 8); | | 930 | cri->cri_klen / 8); |
931 | axf->Update((*swd)->sw_ictx, hmac_ipad_buffer, | | 931 | axf->Update((*swd)->sw_ictx, hmac_ipad_buffer, |
932 | axf->auth_hash->blocksize - (cri->cri_klen / 8)); | | 932 | axf->auth_hash->blocksize - (cri->cri_klen / 8)); |
933 | | | 933 | |
934 | for (k = 0; k < cri->cri_klen / 8; k++) | | 934 | for (k = 0; k < cri->cri_klen / 8; k++) |
935 | cri->cri_key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); | | 935 | cri->cri_key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); |
936 | | | 936 | |
937 | axf->Init((*swd)->sw_octx); | | 937 | axf->Init((*swd)->sw_octx); |
938 | axf->Update((*swd)->sw_octx, cri->cri_key, | | 938 | axf->Update((*swd)->sw_octx, cri->cri_key, |
939 | cri->cri_klen / 8); | | 939 | cri->cri_klen / 8); |
940 | axf->Update((*swd)->sw_octx, hmac_opad_buffer, | | 940 | axf->Update((*swd)->sw_octx, hmac_opad_buffer, |
941 | axf->auth_hash->blocksize - (cri->cri_klen / 8)); | | 941 | axf->auth_hash->blocksize - (cri->cri_klen / 8)); |
942 | | | 942 | |
943 | for (k = 0; k < cri->cri_klen / 8; k++) | | 943 | for (k = 0; k < cri->cri_klen / 8; k++) |
944 | cri->cri_key[k] ^= HMAC_OPAD_VAL; | | 944 | cri->cri_key[k] ^= HMAC_OPAD_VAL; |
945 | (*swd)->sw_axf = axf; | | 945 | (*swd)->sw_axf = axf; |
946 | break; | | 946 | break; |
947 | | | 947 | |
948 | case CRYPTO_MD5_KPDK: | | 948 | case CRYPTO_MD5_KPDK: |
949 | axf = &swcr_auth_hash_key_md5; | | 949 | axf = &swcr_auth_hash_key_md5; |
950 | goto auth2common; | | 950 | goto auth2common; |
951 | | | 951 | |
952 | case CRYPTO_SHA1_KPDK: { | | 952 | case CRYPTO_SHA1_KPDK: { |
953 | unsigned char digest[SHA1_DIGEST_LENGTH]; | | 953 | unsigned char digest[SHA1_DIGEST_LENGTH]; |
954 | CTASSERT(SHA1_DIGEST_LENGTH >= MD5_DIGEST_LENGTH); | | 954 | CTASSERT(SHA1_DIGEST_LENGTH >= MD5_DIGEST_LENGTH); |
955 | axf = &swcr_auth_hash_key_sha1; | | 955 | axf = &swcr_auth_hash_key_sha1; |
956 | auth2common: | | 956 | auth2common: |
957 | (*swd)->sw_ictx = malloc(axf->ctxsize, | | 957 | (*swd)->sw_ictx = malloc(axf->ctxsize, |
958 | M_CRYPTO_DATA, M_NOWAIT); | | 958 | M_CRYPTO_DATA, M_NOWAIT); |
959 | if ((*swd)->sw_ictx == NULL) { | | 959 | if ((*swd)->sw_ictx == NULL) { |
960 | swcr_freesession(NULL, i); | | 960 | swcr_freesession(NULL, i); |
961 | return ENOBUFS; | | 961 | return ENOBUFS; |
962 | } | | 962 | } |
963 | | | 963 | |
964 | /* Store the key so we can "append" it to the payload */ | | 964 | /* Store the key so we can "append" it to the payload */ |
965 | (*swd)->sw_octx = malloc(cri->cri_klen / 8, M_CRYPTO_DATA, | | 965 | (*swd)->sw_octx = malloc(cri->cri_klen / 8, M_CRYPTO_DATA, |
966 | M_NOWAIT); | | 966 | M_NOWAIT); |
967 | if ((*swd)->sw_octx == NULL) { | | 967 | if ((*swd)->sw_octx == NULL) { |
968 | swcr_freesession(NULL, i); | | 968 | swcr_freesession(NULL, i); |
969 | return ENOBUFS; | | 969 | return ENOBUFS; |
970 | } | | 970 | } |
971 | | | 971 | |
972 | (*swd)->sw_klen = cri->cri_klen / 8; | | 972 | (*swd)->sw_klen = cri->cri_klen / 8; |
973 | memcpy((*swd)->sw_octx, cri->cri_key, cri->cri_klen / 8); | | 973 | memcpy((*swd)->sw_octx, cri->cri_key, cri->cri_klen / 8); |
974 | axf->Init((*swd)->sw_ictx); | | 974 | axf->Init((*swd)->sw_ictx); |
975 | axf->Update((*swd)->sw_ictx, cri->cri_key, | | 975 | axf->Update((*swd)->sw_ictx, cri->cri_key, |
976 | cri->cri_klen / 8); | | 976 | cri->cri_klen / 8); |
977 | axf->Final(digest, (*swd)->sw_ictx); | | 977 | axf->Final(digest, (*swd)->sw_ictx); |
978 | (*swd)->sw_axf = axf; | | 978 | (*swd)->sw_axf = axf; |
979 | break; | | 979 | break; |
980 | } | | 980 | } |
981 | | | 981 | |
982 | case CRYPTO_MD5: | | 982 | case CRYPTO_MD5: |
983 | axf = &swcr_auth_hash_md5; | | 983 | axf = &swcr_auth_hash_md5; |
984 | goto auth3common; | | 984 | goto auth3common; |
985 | | | 985 | |
986 | case CRYPTO_SHA1: | | 986 | case CRYPTO_SHA1: |
987 | axf = &swcr_auth_hash_sha1; | | 987 | axf = &swcr_auth_hash_sha1; |
988 | auth3common: | | 988 | auth3common: |
989 | (*swd)->sw_ictx = malloc(axf->ctxsize, | | 989 | (*swd)->sw_ictx = malloc(axf->ctxsize, |
990 | M_CRYPTO_DATA, M_NOWAIT); | | 990 | M_CRYPTO_DATA, M_NOWAIT); |
991 | if ((*swd)->sw_ictx == NULL) { | | 991 | if ((*swd)->sw_ictx == NULL) { |
992 | swcr_freesession(NULL, i); | | 992 | swcr_freesession(NULL, i); |
993 | return ENOBUFS; | | 993 | return ENOBUFS; |
994 | } | | 994 | } |
995 | | | 995 | |
996 | axf->Init((*swd)->sw_ictx); | | 996 | axf->Init((*swd)->sw_ictx); |
997 | (*swd)->sw_axf = axf; | | 997 | (*swd)->sw_axf = axf; |
998 | break; | | 998 | break; |
999 | | | 999 | |
1000 | case CRYPTO_AES_XCBC_MAC_96: | | 1000 | case CRYPTO_AES_XCBC_MAC_96: |
1001 | axf = &swcr_auth_hash_aes_xcbc_mac; | | 1001 | axf = &swcr_auth_hash_aes_xcbc_mac; |
1002 | goto auth4common; | | 1002 | goto auth4common; |
1003 | case CRYPTO_AES_128_GMAC: | | 1003 | case CRYPTO_AES_128_GMAC: |
1004 | axf = &swcr_auth_hash_gmac_aes_128; | | 1004 | axf = &swcr_auth_hash_gmac_aes_128; |
1005 | goto auth4common; | | 1005 | goto auth4common; |
1006 | case CRYPTO_AES_192_GMAC: | | 1006 | case CRYPTO_AES_192_GMAC: |
1007 | axf = &swcr_auth_hash_gmac_aes_192; | | 1007 | axf = &swcr_auth_hash_gmac_aes_192; |
1008 | goto auth4common; | | 1008 | goto auth4common; |
1009 | case CRYPTO_AES_256_GMAC: | | 1009 | case CRYPTO_AES_256_GMAC: |
1010 | axf = &swcr_auth_hash_gmac_aes_256; | | 1010 | axf = &swcr_auth_hash_gmac_aes_256; |
1011 | auth4common: | | 1011 | auth4common: |
1012 | (*swd)->sw_ictx = malloc(axf->ctxsize, | | 1012 | (*swd)->sw_ictx = malloc(axf->ctxsize, |
1013 | M_CRYPTO_DATA, M_NOWAIT); | | 1013 | M_CRYPTO_DATA, M_NOWAIT); |
1014 | if ((*swd)->sw_ictx == NULL) { | | 1014 | if ((*swd)->sw_ictx == NULL) { |
1015 | swcr_freesession(NULL, i); | | 1015 | swcr_freesession(NULL, i); |
1016 | return ENOBUFS; | | 1016 | return ENOBUFS; |
1017 | } | | 1017 | } |
1018 | axf->Init((*swd)->sw_ictx); | | 1018 | axf->Init((*swd)->sw_ictx); |
1019 | axf->Setkey((*swd)->sw_ictx, | | 1019 | axf->Setkey((*swd)->sw_ictx, |
1020 | cri->cri_key, cri->cri_klen / 8); | | 1020 | cri->cri_key, cri->cri_klen / 8); |
1021 | (*swd)->sw_axf = axf; | | 1021 | (*swd)->sw_axf = axf; |
1022 | break; | | 1022 | break; |
1023 | | | 1023 | |
1024 | case CRYPTO_DEFLATE_COMP: | | 1024 | case CRYPTO_DEFLATE_COMP: |
1025 | cxf = &swcr_comp_algo_deflate; | | 1025 | cxf = &swcr_comp_algo_deflate; |
1026 | (*swd)->sw_cxf = cxf; | | 1026 | (*swd)->sw_cxf = cxf; |
1027 | break; | | 1027 | break; |
1028 | | | 1028 | |
1029 | case CRYPTO_DEFLATE_COMP_NOGROW: | | 1029 | case CRYPTO_DEFLATE_COMP_NOGROW: |
1030 | cxf = &swcr_comp_algo_deflate_nogrow; | | 1030 | cxf = &swcr_comp_algo_deflate_nogrow; |
1031 | (*swd)->sw_cxf = cxf; | | 1031 | (*swd)->sw_cxf = cxf; |
1032 | break; | | 1032 | break; |
1033 | | | 1033 | |
1034 | case CRYPTO_GZIP_COMP: | | 1034 | case CRYPTO_GZIP_COMP: |
1035 | cxf = &swcr_comp_algo_gzip; | | 1035 | cxf = &swcr_comp_algo_gzip; |
1036 | (*swd)->sw_cxf = cxf; | | 1036 | (*swd)->sw_cxf = cxf; |
1037 | break; | | 1037 | break; |
1038 | default: | | 1038 | default: |
1039 | swcr_freesession(NULL, i); | | 1039 | swcr_freesession(NULL, i); |
1040 | return EINVAL; | | 1040 | return EINVAL; |
1041 | } | | 1041 | } |
1042 | | | 1042 | |
1043 | (*swd)->sw_alg = cri->cri_alg; | | 1043 | (*swd)->sw_alg = cri->cri_alg; |
1044 | cri = cri->cri_next; | | 1044 | cri = cri->cri_next; |
1045 | swd = &((*swd)->sw_next); | | 1045 | swd = &((*swd)->sw_next); |
1046 | } | | 1046 | } |
1047 | return 0; | | 1047 | return 0; |
1048 | } | | 1048 | } |
1049 | | | 1049 | |
1050 | /* | | 1050 | /* |
1051 | * Free a session. | | 1051 | * Free a session. |
1052 | */ | | 1052 | */ |
1053 | static int | | 1053 | static int |
1054 | swcr_freesession(void *arg, u_int64_t tid) | | 1054 | swcr_freesession(void *arg, u_int64_t tid) |
1055 | { | | 1055 | { |
1056 | struct swcr_data *swd; | | 1056 | struct swcr_data *swd; |
1057 | const struct swcr_enc_xform *txf; | | 1057 | const struct swcr_enc_xform *txf; |
1058 | const struct swcr_auth_hash *axf; | | 1058 | const struct swcr_auth_hash *axf; |
1059 | u_int32_t sid = ((u_int32_t) tid) & 0xffffffff; | | 1059 | u_int32_t sid = ((u_int32_t) tid) & 0xffffffff; |
1060 | | | 1060 | |
1061 | if (sid > swcr_sesnum || swcr_sessions == NULL || | | 1061 | if (sid > swcr_sesnum || swcr_sessions == NULL || |
1062 | swcr_sessions[sid] == NULL) | | 1062 | swcr_sessions[sid] == NULL) |
1063 | return EINVAL; | | 1063 | return EINVAL; |
1064 | | | 1064 | |
1065 | /* Silently accept and return */ | | 1065 | /* Silently accept and return */ |
1066 | if (sid == 0) | | 1066 | if (sid == 0) |
1067 | return 0; | | 1067 | return 0; |
1068 | | | 1068 | |
1069 | while ((swd = swcr_sessions[sid]) != NULL) { | | 1069 | while ((swd = swcr_sessions[sid]) != NULL) { |
1070 | swcr_sessions[sid] = swd->sw_next; | | 1070 | swcr_sessions[sid] = swd->sw_next; |
1071 | | | 1071 | |
1072 | switch (swd->sw_alg) { | | 1072 | switch (swd->sw_alg) { |
1073 | case CRYPTO_DES_CBC: | | 1073 | case CRYPTO_DES_CBC: |
1074 | case CRYPTO_3DES_CBC: | | 1074 | case CRYPTO_3DES_CBC: |
1075 | case CRYPTO_BLF_CBC: | | 1075 | case CRYPTO_BLF_CBC: |
1076 | case CRYPTO_CAST_CBC: | | 1076 | case CRYPTO_CAST_CBC: |
1077 | case CRYPTO_SKIPJACK_CBC: | | 1077 | case CRYPTO_SKIPJACK_CBC: |
1078 | case CRYPTO_RIJNDAEL128_CBC: | | 1078 | case CRYPTO_RIJNDAEL128_CBC: |
1079 | case CRYPTO_CAMELLIA_CBC: | | 1079 | case CRYPTO_CAMELLIA_CBC: |
1080 | case CRYPTO_AES_CTR: | | 1080 | case CRYPTO_AES_CTR: |
1081 | case CRYPTO_AES_GCM_16: | | 1081 | case CRYPTO_AES_GCM_16: |
1082 | case CRYPTO_AES_GMAC: | | 1082 | case CRYPTO_AES_GMAC: |
1083 | case CRYPTO_NULL_CBC: | | 1083 | case CRYPTO_NULL_CBC: |
1084 | txf = swd->sw_exf; | | 1084 | txf = swd->sw_exf; |
1085 | | | 1085 | |
1086 | if (swd->sw_kschedule) | | 1086 | if (swd->sw_kschedule) |
1087 | txf->zerokey(&(swd->sw_kschedule)); | | 1087 | txf->zerokey(&(swd->sw_kschedule)); |
1088 | break; | | 1088 | break; |
1089 | | | 1089 | |
1090 | case CRYPTO_MD5_HMAC: | | 1090 | case CRYPTO_MD5_HMAC: |
1091 | case CRYPTO_MD5_HMAC_96: | | 1091 | case CRYPTO_MD5_HMAC_96: |
1092 | case CRYPTO_SHA1_HMAC: | | 1092 | case CRYPTO_SHA1_HMAC: |
1093 | case CRYPTO_SHA1_HMAC_96: | | 1093 | case CRYPTO_SHA1_HMAC_96: |
1094 | case CRYPTO_SHA2_256_HMAC: | | 1094 | case CRYPTO_SHA2_256_HMAC: |
1095 | case CRYPTO_SHA2_384_HMAC: | | 1095 | case CRYPTO_SHA2_384_HMAC: |
1096 | case CRYPTO_SHA2_512_HMAC: | | 1096 | case CRYPTO_SHA2_512_HMAC: |
1097 | case CRYPTO_RIPEMD160_HMAC: | | 1097 | case CRYPTO_RIPEMD160_HMAC: |
1098 | case CRYPTO_RIPEMD160_HMAC_96: | | 1098 | case CRYPTO_RIPEMD160_HMAC_96: |
1099 | case CRYPTO_NULL_HMAC: | | 1099 | case CRYPTO_NULL_HMAC: |
1100 | axf = swd->sw_axf; | | 1100 | axf = swd->sw_axf; |
1101 | | | 1101 | |
1102 | if (swd->sw_ictx) { | | 1102 | if (swd->sw_ictx) { |
1103 | explicit_memset(swd->sw_ictx, 0, axf->ctxsize); | | 1103 | explicit_memset(swd->sw_ictx, 0, axf->ctxsize); |
1104 | free(swd->sw_ictx, M_CRYPTO_DATA); | | 1104 | free(swd->sw_ictx, M_CRYPTO_DATA); |
1105 | } | | 1105 | } |
1106 | if (swd->sw_octx) { | | 1106 | if (swd->sw_octx) { |
1107 | explicit_memset(swd->sw_octx, 0, axf->ctxsize); | | 1107 | explicit_memset(swd->sw_octx, 0, axf->ctxsize); |
1108 | free(swd->sw_octx, M_CRYPTO_DATA); | | 1108 | free(swd->sw_octx, M_CRYPTO_DATA); |
1109 | } | | 1109 | } |
1110 | break; | | 1110 | break; |
1111 | | | 1111 | |
1112 | case CRYPTO_MD5_KPDK: | | 1112 | case CRYPTO_MD5_KPDK: |
1113 | case CRYPTO_SHA1_KPDK: | | 1113 | case CRYPTO_SHA1_KPDK: |
1114 | axf = swd->sw_axf; | | 1114 | axf = swd->sw_axf; |
1115 | | | 1115 | |
1116 | if (swd->sw_ictx) { | | 1116 | if (swd->sw_ictx) { |
1117 | explicit_memset(swd->sw_ictx, 0, axf->ctxsize); | | 1117 | explicit_memset(swd->sw_ictx, 0, axf->ctxsize); |
1118 | free(swd->sw_ictx, M_CRYPTO_DATA); | | 1118 | free(swd->sw_ictx, M_CRYPTO_DATA); |
1119 | } | | 1119 | } |
1120 | if (swd->sw_octx) { | | 1120 | if (swd->sw_octx) { |
1121 | explicit_memset(swd->sw_octx, 0, swd->sw_klen); | | 1121 | explicit_memset(swd->sw_octx, 0, swd->sw_klen); |
1122 | free(swd->sw_octx, M_CRYPTO_DATA); | | 1122 | free(swd->sw_octx, M_CRYPTO_DATA); |
1123 | } | | 1123 | } |
1124 | break; | | 1124 | break; |
1125 | | | 1125 | |
1126 | case CRYPTO_MD5: | | 1126 | case CRYPTO_MD5: |
1127 | case CRYPTO_SHA1: | | 1127 | case CRYPTO_SHA1: |
1128 | case CRYPTO_AES_XCBC_MAC_96: | | 1128 | case CRYPTO_AES_XCBC_MAC_96: |
1129 | case CRYPTO_AES_128_GMAC: | | 1129 | case CRYPTO_AES_128_GMAC: |
1130 | case CRYPTO_AES_192_GMAC: | | 1130 | case CRYPTO_AES_192_GMAC: |
1131 | case CRYPTO_AES_256_GMAC: | | 1131 | case CRYPTO_AES_256_GMAC: |
1132 | axf = swd->sw_axf; | | 1132 | axf = swd->sw_axf; |
1133 | | | 1133 | |
1134 | if (swd->sw_ictx) { | | 1134 | if (swd->sw_ictx) { |
1135 | explicit_memset(swd->sw_ictx, 0, axf->ctxsize); | | 1135 | explicit_memset(swd->sw_ictx, 0, axf->ctxsize); |
1136 | free(swd->sw_ictx, M_CRYPTO_DATA); | | 1136 | free(swd->sw_ictx, M_CRYPTO_DATA); |
1137 | } | | 1137 | } |
1138 | break; | | 1138 | break; |
1139 | | | 1139 | |
1140 | case CRYPTO_DEFLATE_COMP: | | 1140 | case CRYPTO_DEFLATE_COMP: |
1141 | case CRYPTO_DEFLATE_COMP_NOGROW: | | 1141 | case CRYPTO_DEFLATE_COMP_NOGROW: |
1142 | case CRYPTO_GZIP_COMP: | | 1142 | case CRYPTO_GZIP_COMP: |
1143 | break; | | 1143 | break; |
1144 | } | | 1144 | } |
1145 | | | 1145 | |
1146 | free(swd, M_CRYPTO_DATA); | | 1146 | free(swd, M_CRYPTO_DATA); |
1147 | } | | 1147 | } |
1148 | return 0; | | 1148 | return 0; |
1149 | } | | 1149 | } |
1150 | | | 1150 | |
1151 | /* | | 1151 | /* |
1152 | * Process a software request. | | 1152 | * Process a software request. |
1153 | */ | | 1153 | */ |
1154 | static int | | 1154 | static int |
1155 | swcr_process(void *arg, struct cryptop *crp, int hint) | | 1155 | swcr_process(void *arg, struct cryptop *crp, int hint) |
1156 | { | | 1156 | { |
1157 | struct cryptodesc *crd; | | 1157 | struct cryptodesc *crd; |
1158 | struct swcr_data *sw; | | 1158 | struct swcr_data *sw; |
1159 | u_int32_t lid; | | 1159 | u_int32_t lid; |
1160 | int type; | | 1160 | int type; |
1161 | | | 1161 | |
1162 | /* Sanity check */ | | 1162 | /* Sanity check */ |
1163 | if (crp == NULL) | | 1163 | if (crp == NULL) |
1164 | return EINVAL; | | 1164 | return EINVAL; |
1165 | | | 1165 | |
1166 | if (crp->crp_desc == NULL || crp->crp_buf == NULL) { | | 1166 | if (crp->crp_desc == NULL || crp->crp_buf == NULL) { |
1167 | crp->crp_etype = EINVAL; | | 1167 | crp->crp_etype = EINVAL; |
1168 | goto done; | | 1168 | goto done; |
1169 | } | | 1169 | } |
1170 | | | 1170 | |
1171 | lid = crp->crp_sid & 0xffffffff; | | 1171 | lid = crp->crp_sid & 0xffffffff; |
1172 | if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) { | | 1172 | if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) { |
1173 | crp->crp_etype = ENOENT; | | 1173 | crp->crp_etype = ENOENT; |
1174 | goto done; | | 1174 | goto done; |
1175 | } | | 1175 | } |
1176 | | | 1176 | |
1177 | if (crp->crp_flags & CRYPTO_F_IMBUF) { | | 1177 | if (crp->crp_flags & CRYPTO_F_IMBUF) { |
1178 | type = CRYPTO_BUF_MBUF; | | 1178 | type = CRYPTO_BUF_MBUF; |
1179 | } else if (crp->crp_flags & CRYPTO_F_IOV) { | | 1179 | } else if (crp->crp_flags & CRYPTO_F_IOV) { |
1180 | type = CRYPTO_BUF_IOV; | | 1180 | type = CRYPTO_BUF_IOV; |
1181 | } else { | | 1181 | } else { |
1182 | type = CRYPTO_BUF_CONTIG; | | 1182 | type = CRYPTO_BUF_CONTIG; |
1183 | } | | 1183 | } |
1184 | | | 1184 | |
1185 | /* Go through crypto descriptors, processing as we go */ | | 1185 | /* Go through crypto descriptors, processing as we go */ |
1186 | for (crd = crp->crp_desc; crd; crd = crd->crd_next) { | | 1186 | for (crd = crp->crp_desc; crd; crd = crd->crd_next) { |
1187 | /* | | 1187 | /* |
1188 | * Find the crypto context. | | 1188 | * Find the crypto context. |
1189 | * | | 1189 | * |
1190 | * XXX Note that the logic here prevents us from having | | 1190 | * XXX Note that the logic here prevents us from having |
1191 | * XXX the same algorithm multiple times in a session | | 1191 | * XXX the same algorithm multiple times in a session |
1192 | * XXX (or rather, we can but it won't give us the right | | 1192 | * XXX (or rather, we can but it won't give us the right |
1193 | * XXX results). To do that, we'd need some way of differentiating | | 1193 | * XXX results). To do that, we'd need some way of differentiating |
1194 | * XXX between the various instances of an algorithm (so we can | | 1194 | * XXX between the various instances of an algorithm (so we can |
1195 | * XXX locate the correct crypto context). | | 1195 | * XXX locate the correct crypto context). |
1196 | */ | | 1196 | */ |
1197 | for (sw = swcr_sessions[lid]; | | 1197 | for (sw = swcr_sessions[lid]; |
1198 | sw && sw->sw_alg != crd->crd_alg; | | 1198 | sw && sw->sw_alg != crd->crd_alg; |
1199 | sw = sw->sw_next) | | 1199 | sw = sw->sw_next) |
1200 | ; | | 1200 | ; |
1201 | | | 1201 | |
1202 | /* No such context ? */ | | 1202 | /* No such context ? */ |
1203 | if (sw == NULL) { | | 1203 | if (sw == NULL) { |
1204 | crp->crp_etype = EINVAL; | | 1204 | crp->crp_etype = EINVAL; |
1205 | goto done; | | 1205 | goto done; |
1206 | } | | 1206 | } |
1207 | | | 1207 | |
1208 | switch (sw->sw_alg) { | | 1208 | switch (sw->sw_alg) { |
1209 | case CRYPTO_DES_CBC: | | 1209 | case CRYPTO_DES_CBC: |
1210 | case CRYPTO_3DES_CBC: | | 1210 | case CRYPTO_3DES_CBC: |
1211 | case CRYPTO_BLF_CBC: | | 1211 | case CRYPTO_BLF_CBC: |
1212 | case CRYPTO_CAST_CBC: | | 1212 | case CRYPTO_CAST_CBC: |
1213 | case CRYPTO_SKIPJACK_CBC: | | 1213 | case CRYPTO_SKIPJACK_CBC: |
1214 | case CRYPTO_RIJNDAEL128_CBC: | | 1214 | case CRYPTO_RIJNDAEL128_CBC: |
1215 | case CRYPTO_CAMELLIA_CBC: | | 1215 | case CRYPTO_CAMELLIA_CBC: |
1216 | case CRYPTO_AES_CTR: | | 1216 | case CRYPTO_AES_CTR: |
1217 | if ((crp->crp_etype = swcr_encdec(crd, sw, | | 1217 | if ((crp->crp_etype = swcr_encdec(crd, sw, |
1218 | crp->crp_buf, type)) != 0) | | 1218 | crp->crp_buf, type)) != 0) |
1219 | goto done; | | 1219 | goto done; |
1220 | break; | | 1220 | break; |
1221 | case CRYPTO_NULL_CBC: | | 1221 | case CRYPTO_NULL_CBC: |
1222 | crp->crp_etype = 0; | | 1222 | crp->crp_etype = 0; |
1223 | break; | | 1223 | break; |
1224 | case CRYPTO_MD5_HMAC: | | 1224 | case CRYPTO_MD5_HMAC: |
1225 | case CRYPTO_MD5_HMAC_96: | | 1225 | case CRYPTO_MD5_HMAC_96: |
1226 | case CRYPTO_SHA1_HMAC: | | 1226 | case CRYPTO_SHA1_HMAC: |
1227 | case CRYPTO_SHA1_HMAC_96: | | 1227 | case CRYPTO_SHA1_HMAC_96: |
1228 | case CRYPTO_SHA2_256_HMAC: | | 1228 | case CRYPTO_SHA2_256_HMAC: |
1229 | case CRYPTO_SHA2_384_HMAC: | | 1229 | case CRYPTO_SHA2_384_HMAC: |
1230 | case CRYPTO_SHA2_512_HMAC: | | 1230 | case CRYPTO_SHA2_512_HMAC: |
1231 | case CRYPTO_RIPEMD160_HMAC: | | 1231 | case CRYPTO_RIPEMD160_HMAC: |
1232 | case CRYPTO_RIPEMD160_HMAC_96: | | 1232 | case CRYPTO_RIPEMD160_HMAC_96: |
1233 | case CRYPTO_NULL_HMAC: | | 1233 | case CRYPTO_NULL_HMAC: |
1234 | case CRYPTO_MD5_KPDK: | | 1234 | case CRYPTO_MD5_KPDK: |
1235 | case CRYPTO_SHA1_KPDK: | | 1235 | case CRYPTO_SHA1_KPDK: |
1236 | case CRYPTO_MD5: | | 1236 | case CRYPTO_MD5: |
1237 | case CRYPTO_SHA1: | | 1237 | case CRYPTO_SHA1: |
1238 | case CRYPTO_AES_XCBC_MAC_96: | | 1238 | case CRYPTO_AES_XCBC_MAC_96: |
1239 | if ((crp->crp_etype = swcr_authcompute(crp, crd, sw, | | 1239 | if ((crp->crp_etype = swcr_authcompute(crp, crd, sw, |
1240 | crp->crp_buf, type)) != 0) | | 1240 | crp->crp_buf, type)) != 0) |
1241 | goto done; | | 1241 | goto done; |
1242 | break; | | 1242 | break; |
1243 | | | 1243 | |
1244 | case CRYPTO_AES_GCM_16: | | 1244 | case CRYPTO_AES_GCM_16: |
1245 | case CRYPTO_AES_GMAC: | | 1245 | case CRYPTO_AES_GMAC: |
1246 | case CRYPTO_AES_128_GMAC: | | 1246 | case CRYPTO_AES_128_GMAC: |
1247 | case CRYPTO_AES_192_GMAC: | | 1247 | case CRYPTO_AES_192_GMAC: |
1248 | case CRYPTO_AES_256_GMAC: | | 1248 | case CRYPTO_AES_256_GMAC: |
1249 | crp->crp_etype = swcr_combined(crp, type); | | 1249 | crp->crp_etype = swcr_combined(crp, type); |
1250 | goto done; | | 1250 | goto done; |
1251 | | | 1251 | |
1252 | case CRYPTO_DEFLATE_COMP: | | 1252 | case CRYPTO_DEFLATE_COMP: |
1253 | case CRYPTO_DEFLATE_COMP_NOGROW: | | 1253 | case CRYPTO_DEFLATE_COMP_NOGROW: |
1254 | case CRYPTO_GZIP_COMP: | | 1254 | case CRYPTO_GZIP_COMP: |
1255 | DPRINTF("compdec for %d\n", sw->sw_alg); | | 1255 | DPRINTF("compdec for %d\n", sw->sw_alg); |
1256 | if ((crp->crp_etype = swcr_compdec(crd, sw, | | 1256 | if ((crp->crp_etype = swcr_compdec(crd, sw, |
1257 | crp->crp_buf, type, &crp->crp_olen)) != 0) | | 1257 | crp->crp_buf, type, &crp->crp_olen)) != 0) |
1258 | goto done; | | 1258 | goto done; |
1259 | break; | | 1259 | break; |
1260 | | | 1260 | |
1261 | default: | | 1261 | default: |
1262 | /* Unknown/unsupported algorithm */ | | 1262 | /* Unknown/unsupported algorithm */ |
1263 | crp->crp_etype = EINVAL; | | 1263 | crp->crp_etype = EINVAL; |
1264 | goto done; | | 1264 | goto done; |
1265 | } | | 1265 | } |
1266 | } | | 1266 | } |
1267 | | | 1267 | |
1268 | done: | | 1268 | done: |
1269 | DPRINTF("request %p done\n", crp); | | 1269 | DPRINTF("request %p done\n", crp); |
1270 | crypto_done(crp); | | 1270 | crypto_done(crp); |
1271 | return 0; | | 1271 | return 0; |
1272 | } | | 1272 | } |
1273 | | | 1273 | |
1274 | static void | | 1274 | static void |
1275 | swcr_init(void) | | 1275 | swcr_init(void) |
1276 | { | | 1276 | { |
1277 | swcr_id = crypto_get_driverid(CRYPTOCAP_F_SOFTWARE); | | 1277 | swcr_id = crypto_get_driverid(CRYPTOCAP_F_SOFTWARE); |
1278 | if (swcr_id < 0) { | | 1278 | if (swcr_id < 0) { |
1279 | /* This should never happen */ | | 1279 | /* This should never happen */ |
1280 | panic("Software crypto device cannot initialize!"); | | 1280 | panic("Software crypto device cannot initialize!"); |
1281 | } | | 1281 | } |
1282 | | | 1282 | |
1283 | crypto_register(swcr_id, CRYPTO_DES_CBC, | | 1283 | crypto_register(swcr_id, CRYPTO_DES_CBC, |
1284 | 0, 0, swcr_newsession, swcr_freesession, swcr_process, NULL); | | 1284 | 0, 0, swcr_newsession, swcr_freesession, swcr_process, NULL); |
1285 | #define REGISTER(alg) \ | | 1285 | #define REGISTER(alg) \ |
1286 | crypto_register(swcr_id, alg, 0, 0, NULL, NULL, NULL, NULL) | | 1286 | crypto_register(swcr_id, alg, 0, 0, NULL, NULL, NULL, NULL) |
1287 | | | 1287 | |
1288 | REGISTER(CRYPTO_3DES_CBC); | | 1288 | REGISTER(CRYPTO_3DES_CBC); |
1289 | REGISTER(CRYPTO_BLF_CBC); | | 1289 | REGISTER(CRYPTO_BLF_CBC); |
1290 | REGISTER(CRYPTO_CAST_CBC); | | 1290 | REGISTER(CRYPTO_CAST_CBC); |
1291 | REGISTER(CRYPTO_SKIPJACK_CBC); | | 1291 | REGISTER(CRYPTO_SKIPJACK_CBC); |
1292 | REGISTER(CRYPTO_CAMELLIA_CBC); | | 1292 | REGISTER(CRYPTO_CAMELLIA_CBC); |
1293 | REGISTER(CRYPTO_AES_CTR); | | 1293 | REGISTER(CRYPTO_AES_CTR); |
1294 | REGISTER(CRYPTO_AES_GCM_16); | | 1294 | REGISTER(CRYPTO_AES_GCM_16); |
1295 | REGISTER(CRYPTO_AES_GMAC); | | 1295 | REGISTER(CRYPTO_AES_GMAC); |
1296 | REGISTER(CRYPTO_NULL_CBC); | | 1296 | REGISTER(CRYPTO_NULL_CBC); |
1297 | REGISTER(CRYPTO_MD5_HMAC); | | 1297 | REGISTER(CRYPTO_MD5_HMAC); |
1298 | REGISTER(CRYPTO_MD5_HMAC_96); | | 1298 | REGISTER(CRYPTO_MD5_HMAC_96); |
1299 | REGISTER(CRYPTO_SHA1_HMAC); | | 1299 | REGISTER(CRYPTO_SHA1_HMAC); |
1300 | REGISTER(CRYPTO_SHA1_HMAC_96); | | 1300 | REGISTER(CRYPTO_SHA1_HMAC_96); |
1301 | REGISTER(CRYPTO_SHA2_256_HMAC); | | 1301 | REGISTER(CRYPTO_SHA2_256_HMAC); |
1302 | REGISTER(CRYPTO_SHA2_384_HMAC); | | 1302 | REGISTER(CRYPTO_SHA2_384_HMAC); |
1303 | REGISTER(CRYPTO_SHA2_512_HMAC); | | 1303 | REGISTER(CRYPTO_SHA2_512_HMAC); |
1304 | REGISTER(CRYPTO_RIPEMD160_HMAC); | | 1304 | REGISTER(CRYPTO_RIPEMD160_HMAC); |
1305 | REGISTER(CRYPTO_RIPEMD160_HMAC_96); | | 1305 | REGISTER(CRYPTO_RIPEMD160_HMAC_96); |
1306 | REGISTER(CRYPTO_NULL_HMAC); | | 1306 | REGISTER(CRYPTO_NULL_HMAC); |
1307 | REGISTER(CRYPTO_MD5_KPDK); | | 1307 | REGISTER(CRYPTO_MD5_KPDK); |
1308 | REGISTER(CRYPTO_SHA1_KPDK); | | 1308 | REGISTER(CRYPTO_SHA1_KPDK); |
1309 | REGISTER(CRYPTO_MD5); | | 1309 | REGISTER(CRYPTO_MD5); |
1310 | REGISTER(CRYPTO_SHA1); | | 1310 | REGISTER(CRYPTO_SHA1); |
1311 | REGISTER(CRYPTO_AES_XCBC_MAC_96); | | 1311 | REGISTER(CRYPTO_AES_XCBC_MAC_96); |
1312 | REGISTER(CRYPTO_AES_128_GMAC); | | 1312 | REGISTER(CRYPTO_AES_128_GMAC); |
1313 | REGISTER(CRYPTO_AES_192_GMAC); | | 1313 | REGISTER(CRYPTO_AES_192_GMAC); |
1314 | REGISTER(CRYPTO_AES_256_GMAC); | | 1314 | REGISTER(CRYPTO_AES_256_GMAC); |
1315 | REGISTER(CRYPTO_RIJNDAEL128_CBC); | | 1315 | REGISTER(CRYPTO_RIJNDAEL128_CBC); |
1316 | REGISTER(CRYPTO_DEFLATE_COMP); | | 1316 | REGISTER(CRYPTO_DEFLATE_COMP); |
1317 | REGISTER(CRYPTO_DEFLATE_COMP_NOGROW); | | 1317 | REGISTER(CRYPTO_DEFLATE_COMP_NOGROW); |
1318 | REGISTER(CRYPTO_GZIP_COMP); | | 1318 | REGISTER(CRYPTO_GZIP_COMP); |
1319 | #undef REGISTER | | 1319 | #undef REGISTER |
1320 | } | | 1320 | } |
1321 | | | 1321 | |
1322 | | | 1322 | |
1323 | /* | | 1323 | /* |
1324 | * Pseudo-device init routine for software crypto. | | 1324 | * Pseudo-device init routine for software crypto. |
1325 | */ | | 1325 | */ |
1326 | | | 1326 | |
1327 | void | | 1327 | void |
1328 | swcryptoattach(int num) | | 1328 | swcryptoattach(int num) |
1329 | { | | 1329 | { |
1330 | /* | | 1330 | /* |
1331 | * swcrypto_attach() must be called after attached cpus, because | | 1331 | * swcrypto_attach() must be called after attached cpus, because |
1332 | * it calls softint_establish() through below call path. | | 1332 | * it calls softint_establish() through below call path. |
1333 | * swcr_init() => crypto_get_driverid() => crypto_init() | | 1333 | * swcr_init() => crypto_get_driverid() => crypto_init() |
1334 | * => crypto_init0() | | 1334 | * => crypto_init0() |
1335 | * If softint_establish() is called before attached cpus that ncpu == 0, | | 1335 | * If softint_establish() is called before attached cpus that ncpu == 0, |
1336 | * the softint handler is established to CPU#0 only. | | 1336 | * the softint handler is established to CPU#0 only. |
1337 | * | | 1337 | * |
1338 | * So, swcrypto_attach() must be called from not module_init_class() | | 1338 | * So, swcrypto_attach() must be called from not module_init_class() |
1339 | * but config_finalize() when it is built as builtin module. | | 1339 | * but config_finalize() when it is built as builtin module. |
1340 | */ | | 1340 | */ |
1341 | swcryptoattach_internal(); | | 1341 | swcryptoattach_internal(); |
1342 | } | | 1342 | } |
1343 | | | 1343 | |
1344 | void swcrypto_attach(device_t, device_t, void *); | | 1344 | void swcrypto_attach(device_t, device_t, void *); |
1345 | | | 1345 | |
1346 | void | | 1346 | void |
1347 | swcrypto_attach(device_t parent, device_t self, void *opaque) | | 1347 | swcrypto_attach(device_t parent, device_t self, void *opaque) |
1348 | { | | 1348 | { |
1349 | | | 1349 | |
1350 | swcr_init(); | | 1350 | swcr_init(); |
1351 | | | 1351 | |
1352 | if (!pmf_device_register(self, NULL, NULL)) | | 1352 | if (!pmf_device_register(self, NULL, NULL)) |
1353 | aprint_error_dev(self, "couldn't establish power handler\n"); | | 1353 | aprint_error_dev(self, "couldn't establish power handler\n"); |
1354 | } | | 1354 | } |
1355 | | | 1355 | |
1356 | int swcrypto_detach(device_t, int); | | 1356 | int swcrypto_detach(device_t, int); |
1357 | | | 1357 | |
1358 | int | | 1358 | int |
1359 | swcrypto_detach(device_t self, int flag) | | 1359 | swcrypto_detach(device_t self, int flag) |
1360 | { | | 1360 | { |
1361 | pmf_device_deregister(self); | | 1361 | pmf_device_deregister(self); |
1362 | if (swcr_id >= 0) | | 1362 | if (swcr_id >= 0) |
1363 | crypto_unregister_all(swcr_id); | | 1363 | crypto_unregister_all(swcr_id); |
1364 | return 0; | | 1364 | return 0; |
1365 | } | | 1365 | } |
1366 | | | 1366 | |
1367 | int swcrypto_match(device_t, cfdata_t, void *); | | 1367 | int swcrypto_match(device_t, cfdata_t, void *); |
1368 | | | 1368 | |
1369 | int | | 1369 | int |
1370 | swcrypto_match(device_t parent, cfdata_t data, void *opaque) | | 1370 | swcrypto_match(device_t parent, cfdata_t data, void *opaque) |
1371 | { | | 1371 | { |
1372 | | | 1372 | |
1373 | return 1; | | 1373 | return 1; |
1374 | } | | 1374 | } |
1375 | | | 1375 | |
1376 | MODULE(MODULE_CLASS_DRIVER, swcrypto, | | 1376 | MODULE(MODULE_CLASS_DRIVER, swcrypto, |
1377 | "opencrypto,zlib,blowfish,des,cast128,camellia,skipjack"); | | 1377 | "opencrypto,zlib,blowfish,des,cast128,camellia,skipjack"); |
1378 | | | 1378 | |
1379 | CFDRIVER_DECL(swcrypto, DV_DULL, NULL); | | 1379 | CFDRIVER_DECL(swcrypto, DV_DULL, NULL); |
1380 | | | 1380 | |
1381 | CFATTACH_DECL2_NEW(swcrypto, 0, swcrypto_match, swcrypto_attach, | | 1381 | CFATTACH_DECL2_NEW(swcrypto, 0, swcrypto_match, swcrypto_attach, |
1382 | swcrypto_detach, NULL, NULL, NULL); | | 1382 | swcrypto_detach, NULL, NULL, NULL); |
1383 | | | 1383 | |
1384 | static int swcryptoloc[] = { -1, -1 }; | | 1384 | static int swcryptoloc[] = { -1, -1 }; |
1385 | | | 1385 | |
1386 | static struct cfdata swcrypto_cfdata[] = { | | 1386 | static struct cfdata swcrypto_cfdata[] = { |
1387 | { | | 1387 | { |
1388 | .cf_name = "swcrypto", | | 1388 | .cf_name = "swcrypto", |
1389 | .cf_atname = "swcrypto", | | 1389 | .cf_atname = "swcrypto", |
1390 | .cf_unit = 0, | | 1390 | .cf_unit = 0, |
1391 | .cf_fstate = 0, | | 1391 | .cf_fstate = 0, |
1392 | .cf_loc = swcryptoloc, | | 1392 | .cf_loc = swcryptoloc, |
1393 | .cf_flags = 0, | | 1393 | .cf_flags = 0, |
1394 | .cf_pspec = NULL, | | 1394 | .cf_pspec = NULL, |
1395 | }, | | 1395 | }, |
1396 | { NULL, NULL, 0, 0, NULL, 0, NULL } | | 1396 | { NULL, NULL, 0, 0, NULL, 0, NULL } |
1397 | }; | | 1397 | }; |
1398 | | | 1398 | |
1399 | /* | | 1399 | /* |
1400 | * Internal attach routine. | | 1400 | * Internal attach routine. |
1401 | * Don't call before attached cpus. | | 1401 | * Don't call before attached cpus. |
1402 | */ | | 1402 | */ |
1403 | static int | | 1403 | static int |
1404 | swcryptoattach_internal(void) | | 1404 | swcryptoattach_internal(void) |
1405 | { | | 1405 | { |
1406 | int error; | | 1406 | int error; |
1407 | | | 1407 | |
1408 | error = config_cfdriver_attach(&swcrypto_cd); | | 1408 | error = config_cfdriver_attach(&swcrypto_cd); |
1409 | if (error) { | | 1409 | if (error) { |
1410 | return error; | | 1410 | return error; |
1411 | } | | 1411 | } |
1412 | | | 1412 | |
1413 | error = config_cfattach_attach(swcrypto_cd.cd_name, &swcrypto_ca); | | 1413 | error = config_cfattach_attach(swcrypto_cd.cd_name, &swcrypto_ca); |
1414 | if (error) { | | 1414 | if (error) { |
1415 | config_cfdriver_detach(&swcrypto_cd); | | 1415 | config_cfdriver_detach(&swcrypto_cd); |
1416 | aprint_error("%s: unable to register cfattach\n", | | 1416 | aprint_error("%s: unable to register cfattach\n", |
1417 | swcrypto_cd.cd_name); | | 1417 | swcrypto_cd.cd_name); |
1418 | | | 1418 | |
1419 | return error; | | 1419 | return error; |
1420 | } | | 1420 | } |
1421 | | | 1421 | |
1422 | error = config_cfdata_attach(swcrypto_cfdata, 1); | | 1422 | error = config_cfdata_attach(swcrypto_cfdata, 1); |
1423 | if (error) { | | 1423 | if (error) { |
1424 | config_cfattach_detach(swcrypto_cd.cd_name, | | 1424 | config_cfattach_detach(swcrypto_cd.cd_name, |
1425 | &swcrypto_ca); | | 1425 | &swcrypto_ca); |
1426 | config_cfdriver_detach(&swcrypto_cd); | | 1426 | config_cfdriver_detach(&swcrypto_cd); |
1427 | aprint_error("%s: unable to register cfdata\n", | | 1427 | aprint_error("%s: unable to register cfdata\n", |
1428 | swcrypto_cd.cd_name); | | 1428 | swcrypto_cd.cd_name); |
1429 | | | 1429 | |
1430 | return error; | | 1430 | return error; |
1431 | } | | 1431 | } |
1432 | | | 1432 | |
1433 | (void)config_attach_pseudo(swcrypto_cfdata); | | 1433 | (void)config_attach_pseudo(swcrypto_cfdata); |
1434 | | | 1434 | |
1435 | return 0; | | 1435 | return 0; |
1436 | } | | 1436 | } |
1437 | | | 1437 | |
1438 | static int | | 1438 | static int |
1439 | swcrypto_modcmd(modcmd_t cmd, void *arg) | | 1439 | swcrypto_modcmd(modcmd_t cmd, void *arg) |
1440 | { | | 1440 | { |
1441 | int error = 0; | | 1441 | int error = 0; |
1442 | | | 1442 | |
1443 | switch (cmd) { | | 1443 | switch (cmd) { |
1444 | case MODULE_CMD_INIT: | | 1444 | case MODULE_CMD_INIT: |
1445 | #ifdef _MODULE | | 1445 | #ifdef _MODULE |
1446 | error = swcryptoattach_internal(); | | 1446 | error = swcryptoattach_internal(); |
1447 | #endif | | 1447 | #endif |
1448 | return error; | | 1448 | return error; |
1449 | case MODULE_CMD_FINI: | | 1449 | case MODULE_CMD_FINI: |
1450 | #if 1 | | 1450 | #if 1 |
1451 | // XXX: Need to keep track if we are in use. | | 1451 | // XXX: Need to keep track if we are in use. |
1452 | return ENOTTY; | | 1452 | return ENOTTY; |
1453 | #else | | 1453 | #else |
1454 | error = config_cfdata_detach(swcrypto_cfdata); | | 1454 | error = config_cfdata_detach(swcrypto_cfdata); |
1455 | if (error) { | | 1455 | if (error) { |
1456 | return error; | | 1456 | return error; |
1457 | } | | 1457 | } |
1458 | | | 1458 | |
1459 | config_cfattach_detach(swcrypto_cd.cd_name, &swcrypto_ca); | | 1459 | config_cfattach_detach(swcrypto_cd.cd_name, &swcrypto_ca); |
1460 | config_cfdriver_detach(&swcrypto_cd); | | 1460 | config_cfdriver_detach(&swcrypto_cd); |
1461 | | | 1461 | |
1462 | return 0; | | 1462 | return 0; |
1463 | #endif | | 1463 | #endif |
1464 | default: | | 1464 | default: |
1465 | return ENOTTY; | | 1465 | return ENOTTY; |
1466 | } | | 1466 | } |
1467 | } | | 1467 | } |