Tue Jan 17 03:06:33 2012 UTC ()
leading whitespace too!


(jakllsch)
diff -r1.18 -r1.19 src/sys/arch/x86/x86/via_padlock.c

cvs diff -r1.18 -r1.19 src/sys/arch/x86/x86/via_padlock.c (switch to unified diff)

--- src/sys/arch/x86/x86/via_padlock.c 2012/01/17 03:01:39 1.18
+++ src/sys/arch/x86/x86/via_padlock.c 2012/01/17 03:06:33 1.19
@@ -1,685 +1,685 @@ @@ -1,685 +1,685 @@
1/* $OpenBSD: via.c,v 1.8 2006/11/17 07:47:56 tom Exp $ */ 1/* $OpenBSD: via.c,v 1.8 2006/11/17 07:47:56 tom Exp $ */
2/* $NetBSD: via_padlock.c,v 1.18 2012/01/17 03:01:39 jakllsch Exp $ */ 2/* $NetBSD: via_padlock.c,v 1.19 2012/01/17 03:06:33 jakllsch Exp $ */
3 3
4/*- 4/*-
5 * Copyright (c) 2003 Jason Wright 5 * Copyright (c) 2003 Jason Wright
6 * Copyright (c) 2003, 2004 Theo de Raadt 6 * Copyright (c) 2003, 2004 Theo de Raadt
7 * All rights reserved. 7 * All rights reserved.
8 * 8 *
9 * Permission to use, copy, modify, and distribute this software for any 9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above 10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies. 11 * copyright notice and this permission notice appear in all copies.
12 * 12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */ 20 */
21 21
22#include <sys/cdefs.h> 22#include <sys/cdefs.h>
23__KERNEL_RCSID(0, "$NetBSD: via_padlock.c,v 1.18 2012/01/17 03:01:39 jakllsch Exp $"); 23__KERNEL_RCSID(0, "$NetBSD: via_padlock.c,v 1.19 2012/01/17 03:06:33 jakllsch Exp $");
24 24
25#ifdef _KERNEL_OPT 25#ifdef _KERNEL_OPT
26# include "rnd.h" 26# include "rnd.h"
27# if NRND == 0 27# if NRND == 0
28# error padlock requires rnd pseudo-devices 28# error padlock requires rnd pseudo-devices
29# endif 29# endif
30#endif 30#endif
31 31
32#include <sys/param.h> 32#include <sys/param.h>
33#include <sys/systm.h> 33#include <sys/systm.h>
34#include <sys/signalvar.h> 34#include <sys/signalvar.h>
35#include <sys/kernel.h> 35#include <sys/kernel.h>
36#include <sys/device.h> 36#include <sys/device.h>
37#include <sys/module.h> 37#include <sys/module.h>
38#include <sys/rnd.h> 38#include <sys/rnd.h>
39#include <sys/malloc.h> 39#include <sys/malloc.h>
40#include <sys/mbuf.h> 40#include <sys/mbuf.h>
41#include <sys/cpu.h> 41#include <sys/cpu.h>
42#include <sys/rnd.h> 42#include <sys/rnd.h>
43#include <sys/cprng.h> 43#include <sys/cprng.h>
44 44
45#include <x86/specialreg.h> 45#include <x86/specialreg.h>
46 46
47#include <machine/cpufunc.h> 47#include <machine/cpufunc.h>
48#include <machine/cpuvar.h> 48#include <machine/cpuvar.h>
49 49
50#include <opencrypto/cryptodev.h> 50#include <opencrypto/cryptodev.h>
51#include <opencrypto/cryptosoft.h> 51#include <opencrypto/cryptosoft.h>
52#include <opencrypto/xform.h> 52#include <opencrypto/xform.h>
53#include <crypto/rijndael/rijndael.h> 53#include <crypto/rijndael/rijndael.h>
54 54
55#include <opencrypto/cryptosoft_xform.c> 55#include <opencrypto/cryptosoft_xform.c>
56 56
57#include <x86/via_padlock.h> 57#include <x86/via_padlock.h>
58 58
59static int via_padlock_match(device_t, cfdata_t, void *); 59static int via_padlock_match(device_t, cfdata_t, void *);
60static void via_padlock_attach(device_t, device_t, void *); 60static void via_padlock_attach(device_t, device_t, void *);
61static int via_padlock_detach(device_t, int); 61static int via_padlock_detach(device_t, int);
62static void via_padlock_attach_intr(device_t); 62static void via_padlock_attach_intr(device_t);
63 63
64CFATTACH_DECL_NEW( 64CFATTACH_DECL_NEW(
65 padlock, 65 padlock,
66 sizeof(struct via_padlock_softc), 66 sizeof(struct via_padlock_softc),
67 via_padlock_match, 67 via_padlock_match,
68 via_padlock_attach, 68 via_padlock_attach,
69 via_padlock_detach, 69 via_padlock_detach,
70 NULL 70 NULL
71); 71);
72 72
73int via_padlock_crypto_newsession(void *, uint32_t *, struct cryptoini *); 73int via_padlock_crypto_newsession(void *, uint32_t *, struct cryptoini *);
74int via_padlock_crypto_process(void *, struct cryptop *, int); 74int via_padlock_crypto_process(void *, struct cryptop *, int);
75int via_padlock_crypto_swauth(struct cryptop *, struct cryptodesc *, 75int via_padlock_crypto_swauth(struct cryptop *, struct cryptodesc *,
76 struct swcr_data *, void *); 76 struct swcr_data *, void *);
77int via_padlock_crypto_encdec(struct cryptop *, struct cryptodesc *, 77int via_padlock_crypto_encdec(struct cryptop *, struct cryptodesc *,
78 struct via_padlock_session *, struct via_padlock_softc *, void *); 78 struct via_padlock_session *, struct via_padlock_softc *, void *);
79int via_padlock_crypto_freesession(void *, uint64_t); 79int via_padlock_crypto_freesession(void *, uint64_t);
80static __inline void via_padlock_cbc(void *, void *, void *, void *, int, 80static __inline void via_padlock_cbc(void *, void *, void *, void *, int,
81 void *); 81 void *);
82 82
83static void 83static void
84via_c3_rnd(void *arg) 84via_c3_rnd(void *arg)
85{ 85{
86 struct via_padlock_softc *sc = arg; 86 struct via_padlock_softc *sc = arg;
87 87
88 unsigned int rv, creg0, len = VIAC3_RNG_BUFSIZ; 88 unsigned int rv, creg0, len = VIAC3_RNG_BUFSIZ;
89 static uint32_t buffer[VIAC3_RNG_BUFSIZ + 2]; /* XXX 2? */ 89 static uint32_t buffer[VIAC3_RNG_BUFSIZ + 2]; /* XXX 2? */
90 90
91 /* 91 /*
92 * Sadly, we have to monkey with the coprocessor enable and fault 92 * Sadly, we have to monkey with the coprocessor enable and fault
93 * registers, which are really for the FPU, in order to read 93 * registers, which are really for the FPU, in order to read
94 * from the RNG. 94 * from the RNG.
95 * 95 *
96 * Don't remove CR0_TS from the call below -- comments in the Linux 96 * Don't remove CR0_TS from the call below -- comments in the Linux
97 * driver indicate that the xstorerng instruction can generate 97 * driver indicate that the xstorerng instruction can generate
98 * spurious DNA faults though no FPU or SIMD state is changed 98 * spurious DNA faults though no FPU or SIMD state is changed
99 * even if such a fault is generated. 99 * even if such a fault is generated.
100 * 100 *
101 */ 101 */
102 kpreempt_disable(); 102 kpreempt_disable();
103 x86_disable_intr(); 103 x86_disable_intr();
104 creg0 = rcr0(); 104 creg0 = rcr0();
105 lcr0(creg0 & ~(CR0_EM|CR0_TS)); /* Permit access to SIMD/FPU path */ 105 lcr0(creg0 & ~(CR0_EM|CR0_TS)); /* Permit access to SIMD/FPU path */
106 /* 106 /*
107 * Collect the random data from the C3 RNG into our buffer. 107 * Collect the random data from the C3 RNG into our buffer.
108 * We turn on maximum whitening (is this actually desirable 108 * We turn on maximum whitening (is this actually desirable
109 * if we will feed the data to SHA1?) (%edx[0,1] = "11"). 109 * if we will feed the data to SHA1?) (%edx[0,1] = "11").
110 */ 110 */
111 __asm __volatile("rep xstorerng" 111 __asm __volatile("rep xstorerng"
112 : "=a" (rv) : "d" (3), "D" (buffer), 112 : "=a" (rv) : "d" (3), "D" (buffer),
113 "c" (len * sizeof(int)) : "memory", "cc"); 113 "c" (len * sizeof(int)) : "memory", "cc");
114 /* Put CR0 back how it was */ 114 /* Put CR0 back how it was */
115 lcr0(creg0); 115 lcr0(creg0);
116 x86_enable_intr(); 116 x86_enable_intr();
117 kpreempt_enable(); 117 kpreempt_enable();
118 rnd_add_data(&sc->sc_rnd_source, buffer, len * sizeof(int), 118 rnd_add_data(&sc->sc_rnd_source, buffer, len * sizeof(int),
119 len * sizeof(int)); 119 len * sizeof(int));
120 callout_reset(&sc->sc_rnd_co, sc->sc_rnd_hz, via_c3_rnd, sc); 120 callout_reset(&sc->sc_rnd_co, sc->sc_rnd_hz, via_c3_rnd, sc);
121} 121}
122 122
123static void 123static void
124via_c3_rnd_init(struct via_padlock_softc *sc) 124via_c3_rnd_init(struct via_padlock_softc *sc)
125{ 125{
126 sc->sc_rnd_attached = true; 126 sc->sc_rnd_attached = true;
127 127
128 if (hz >= 100) { 128 if (hz >= 100) {
129 sc->sc_rnd_hz = 10 * hz / 100; 129 sc->sc_rnd_hz = 10 * hz / 100;
130 } else { 130 } else {
131 sc->sc_rnd_hz = 10; 131 sc->sc_rnd_hz = 10;
132 } 132 }
133 /* See hifn7751.c re use of RND_FLAG_NO_ESTIMATE */ 133 /* See hifn7751.c re use of RND_FLAG_NO_ESTIMATE */
134 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev), 134 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev),
135 RND_TYPE_RNG, RND_FLAG_NO_ESTIMATE); 135 RND_TYPE_RNG, RND_FLAG_NO_ESTIMATE);
136 callout_init(&sc->sc_rnd_co, 0); 136 callout_init(&sc->sc_rnd_co, 0);
137 /* Call once to prime the pool early and set callout. */ 137 /* Call once to prime the pool early and set callout. */
138 via_c3_rnd(sc); 138 via_c3_rnd(sc);
139} 139}
140 140
141static void 141static void
142via_c3_ace_init(struct via_padlock_softc *sc) 142via_c3_ace_init(struct via_padlock_softc *sc)
143{ 143{
144 /* 144 /*
145 * There is no reason to call into the kernel to use this 145 * There is no reason to call into the kernel to use this
146 * driver from userspace, because the crypto instructions can 146 * driver from userspace, because the crypto instructions can
147 * be directly accessed there. Setting CRYPTOCAP_F_SOFTWARE 147 * be directly accessed there. Setting CRYPTOCAP_F_SOFTWARE
148 * has approximately the right semantics though the name is 148 * has approximately the right semantics though the name is
149 * confusing (however, consider that crypto via unprivileged 149 * confusing (however, consider that crypto via unprivileged
150 * instructions _is_ "just software" in some sense). 150 * instructions _is_ "just software" in some sense).
151 */ 151 */
152 sc->sc_cid = crypto_get_driverid(CRYPTOCAP_F_SOFTWARE); 152 sc->sc_cid = crypto_get_driverid(CRYPTOCAP_F_SOFTWARE);
153 if (sc->sc_cid < 0) { 153 if (sc->sc_cid < 0) {
154 aprint_error_dev(sc->sc_dev, 154 aprint_error_dev(sc->sc_dev,
155 "could not get a crypto driver ID\n"); 155 "could not get a crypto driver ID\n");
156 return; 156 return;
157 } 157 }
158 158
159 sc->sc_cid_attached = true; 159 sc->sc_cid_attached = true;
160 160
161 /* 161 /*
162 * Ask the opencrypto subsystem to register ourselves. Although 162 * Ask the opencrypto subsystem to register ourselves. Although
163 * we don't support hardware offloading for various HMAC algorithms, 163 * we don't support hardware offloading for various HMAC algorithms,
164 * we will handle them, because opencrypto prefers drivers that 164 * we will handle them, because opencrypto prefers drivers that
165 * support all requested algorithms. 165 * support all requested algorithms.
166 * 166 *
167 * 167 *
168 * XXX We should actually implement the HMAC modes this hardware 168 * XXX We should actually implement the HMAC modes this hardware
169 * XXX can accellerate (wrap its plain SHA1/SHA2 as HMAC) and 169 * XXX can accellerate (wrap its plain SHA1/SHA2 as HMAC) and
170 * XXX strongly consider removing those passed through to cryptosoft. 170 * XXX strongly consider removing those passed through to cryptosoft.
171 * XXX As it stands, we can "steal" sessions from drivers which could 171 * XXX As it stands, we can "steal" sessions from drivers which could
172 * XXX better accellerate them. 172 * XXX better accellerate them.
173 * 173 *
174 * XXX Note the ordering dependency between when this (or any 174 * XXX Note the ordering dependency between when this (or any
175 * XXX crypto driver) attaches and when cryptosoft does. We are 175 * XXX crypto driver) attaches and when cryptosoft does. We are
176 * XXX basically counting on the swcrypto pseudo-device to just 176 * XXX basically counting on the swcrypto pseudo-device to just
177 * XXX happen to attach last, or _it_ will steal every session 177 * XXX happen to attach last, or _it_ will steal every session
178 * XXX from _us_! 178 * XXX from _us_!
179 */ 179 */
180#define REGISTER(alg) \ 180#define REGISTER(alg) \
181 crypto_register(sc->sc_cid, alg, 0, 0, \ 181 crypto_register(sc->sc_cid, alg, 0, 0, \
182 via_padlock_crypto_newsession, via_padlock_crypto_freesession, \ 182 via_padlock_crypto_newsession, via_padlock_crypto_freesession, \
183 via_padlock_crypto_process, sc); 183 via_padlock_crypto_process, sc);
184 184
185 REGISTER(CRYPTO_AES_CBC); 185 REGISTER(CRYPTO_AES_CBC);
186 REGISTER(CRYPTO_MD5_HMAC_96); 186 REGISTER(CRYPTO_MD5_HMAC_96);
187 REGISTER(CRYPTO_MD5_HMAC); 187 REGISTER(CRYPTO_MD5_HMAC);
188 REGISTER(CRYPTO_SHA1_HMAC_96); 188 REGISTER(CRYPTO_SHA1_HMAC_96);
189 REGISTER(CRYPTO_SHA1_HMAC); 189 REGISTER(CRYPTO_SHA1_HMAC);
190 REGISTER(CRYPTO_RIPEMD160_HMAC_96); 190 REGISTER(CRYPTO_RIPEMD160_HMAC_96);
191 REGISTER(CRYPTO_RIPEMD160_HMAC); 191 REGISTER(CRYPTO_RIPEMD160_HMAC);
192 REGISTER(CRYPTO_SHA2_HMAC); 192 REGISTER(CRYPTO_SHA2_HMAC);
193} 193}
194 194
195int 195int
196via_padlock_crypto_newsession(void *arg, uint32_t *sidp, struct cryptoini *cri) 196via_padlock_crypto_newsession(void *arg, uint32_t *sidp, struct cryptoini *cri)
197{ 197{
198 struct cryptoini *c; 198 struct cryptoini *c;
199 struct via_padlock_softc *sc = arg; 199 struct via_padlock_softc *sc = arg;
200 struct via_padlock_session *ses = NULL; 200 struct via_padlock_session *ses = NULL;
201 const struct swcr_auth_hash *axf; 201 const struct swcr_auth_hash *axf;
202 struct swcr_data *swd; 202 struct swcr_data *swd;
203 int sesn, i, cw0; 203 int sesn, i, cw0;
204 204
205 KASSERT(sc != NULL /*, ("via_padlock_crypto_freesession: null softc")*/); 205 KASSERT(sc != NULL /*, ("via_padlock_crypto_freesession: null softc")*/);
206 if (sc == NULL || sidp == NULL || cri == NULL) 206 if (sc == NULL || sidp == NULL || cri == NULL)
207 return (EINVAL); 207 return (EINVAL);
208 208
209 if (sc->sc_sessions == NULL) { 209 if (sc->sc_sessions == NULL) {
210 ses = sc->sc_sessions = malloc(sizeof(*ses), M_DEVBUF, 210 ses = sc->sc_sessions = malloc(sizeof(*ses), M_DEVBUF,
211 M_NOWAIT); 211 M_NOWAIT);
212 if (ses == NULL) 212 if (ses == NULL)
213 return (ENOMEM); 213 return (ENOMEM);
214 sesn = 0; 214 sesn = 0;
215 sc->sc_nsessions = 1; 215 sc->sc_nsessions = 1;
216 } else { 216 } else {
217 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) { 217 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
218 if (sc->sc_sessions[sesn].ses_used == 0) { 218 if (sc->sc_sessions[sesn].ses_used == 0) {
219 ses = &sc->sc_sessions[sesn]; 219 ses = &sc->sc_sessions[sesn];
220 break; 220 break;
221 } 221 }
222 } 222 }
223 223
224 if (ses == NULL) { 224 if (ses == NULL) {
225 sesn = sc->sc_nsessions; 225 sesn = sc->sc_nsessions;
226 ses = malloc((sesn + 1) * sizeof(*ses), M_DEVBUF, 226 ses = malloc((sesn + 1) * sizeof(*ses), M_DEVBUF,
227 M_NOWAIT); 227 M_NOWAIT);
228 if (ses == NULL) 228 if (ses == NULL)
229 return (ENOMEM); 229 return (ENOMEM);
230 memcpy(ses, sc->sc_sessions, sesn * sizeof(*ses)); 230 memcpy(ses, sc->sc_sessions, sesn * sizeof(*ses));
231 memset(sc->sc_sessions, 0, sesn * sizeof(*ses)); 231 memset(sc->sc_sessions, 0, sesn * sizeof(*ses));
232 free(sc->sc_sessions, M_DEVBUF); 232 free(sc->sc_sessions, M_DEVBUF);
233 sc->sc_sessions = ses; 233 sc->sc_sessions = ses;
234 ses = &sc->sc_sessions[sesn]; 234 ses = &sc->sc_sessions[sesn];
235 sc->sc_nsessions++; 235 sc->sc_nsessions++;
236 } 236 }
237 } 237 }
238 238
239 memset(ses, 0, sizeof(*ses)); 239 memset(ses, 0, sizeof(*ses));
240 ses->ses_used = 1; 240 ses->ses_used = 1;
241 241
242 for (c = cri; c != NULL; c = c->cri_next) { 242 for (c = cri; c != NULL; c = c->cri_next) {
243 switch (c->cri_alg) { 243 switch (c->cri_alg) {
244 case CRYPTO_AES_CBC: 244 case CRYPTO_AES_CBC:
245 switch (c->cri_klen) { 245 switch (c->cri_klen) {
246 case 128: 246 case 128:
247 cw0 = C3_CRYPT_CWLO_KEY128; 247 cw0 = C3_CRYPT_CWLO_KEY128;
248 break; 248 break;
249 case 192: 249 case 192:
250 cw0 = C3_CRYPT_CWLO_KEY192; 250 cw0 = C3_CRYPT_CWLO_KEY192;
251 break; 251 break;
252 case 256: 252 case 256:
253 cw0 = C3_CRYPT_CWLO_KEY256; 253 cw0 = C3_CRYPT_CWLO_KEY256;
254 break; 254 break;
255 default: 255 default:
256 return (EINVAL); 256 return (EINVAL);
257 } 257 }
258 cw0 |= C3_CRYPT_CWLO_ALG_AES | 258 cw0 |= C3_CRYPT_CWLO_ALG_AES |
259 C3_CRYPT_CWLO_KEYGEN_SW | 259 C3_CRYPT_CWLO_KEYGEN_SW |
260 C3_CRYPT_CWLO_NORMAL; 260 C3_CRYPT_CWLO_NORMAL;
261 261
262 cprng_fast(ses->ses_iv, sizeof(ses->ses_iv)); 262 cprng_fast(ses->ses_iv, sizeof(ses->ses_iv));
263 ses->ses_klen = c->cri_klen; 263 ses->ses_klen = c->cri_klen;
264 ses->ses_cw0 = cw0; 264 ses->ses_cw0 = cw0;
265 265
266 /* Build expanded keys for both directions */ 266 /* Build expanded keys for both directions */
267 rijndaelKeySetupEnc(ses->ses_ekey, c->cri_key, 267 rijndaelKeySetupEnc(ses->ses_ekey, c->cri_key,
268 c->cri_klen); 268 c->cri_klen);
269 rijndaelKeySetupDec(ses->ses_dkey, c->cri_key, 269 rijndaelKeySetupDec(ses->ses_dkey, c->cri_key,
270 c->cri_klen); 270 c->cri_klen);
271 for (i = 0; i < 4 * (RIJNDAEL_MAXNR + 1); i++) { 271 for (i = 0; i < 4 * (RIJNDAEL_MAXNR + 1); i++) {
272 ses->ses_ekey[i] = ntohl(ses->ses_ekey[i]); 272 ses->ses_ekey[i] = ntohl(ses->ses_ekey[i]);
273 ses->ses_dkey[i] = ntohl(ses->ses_dkey[i]); 273 ses->ses_dkey[i] = ntohl(ses->ses_dkey[i]);
274 } 274 }
275 275
276 break; 276 break;
277 277
278 /* Use hashing implementations from the cryptosoft code. */ 278 /* Use hashing implementations from the cryptosoft code. */
279 case CRYPTO_MD5_HMAC: 279 case CRYPTO_MD5_HMAC:
280 axf = &swcr_auth_hash_hmac_md5; 280 axf = &swcr_auth_hash_hmac_md5;
281 goto authcommon; 281 goto authcommon;
282 case CRYPTO_MD5_HMAC_96: 282 case CRYPTO_MD5_HMAC_96:
283 axf = &swcr_auth_hash_hmac_md5_96; 283 axf = &swcr_auth_hash_hmac_md5_96;
284 goto authcommon; 284 goto authcommon;
285 case CRYPTO_SHA1_HMAC: 285 case CRYPTO_SHA1_HMAC:
286 axf = &swcr_auth_hash_hmac_sha1; 286 axf = &swcr_auth_hash_hmac_sha1;
287 goto authcommon; 287 goto authcommon;
288 case CRYPTO_SHA1_HMAC_96: 288 case CRYPTO_SHA1_HMAC_96:
289 axf = &swcr_auth_hash_hmac_sha1_96; 289 axf = &swcr_auth_hash_hmac_sha1_96;
290 goto authcommon; 290 goto authcommon;
291 case CRYPTO_RIPEMD160_HMAC: 291 case CRYPTO_RIPEMD160_HMAC:
292 axf = &swcr_auth_hash_hmac_ripemd_160; 292 axf = &swcr_auth_hash_hmac_ripemd_160;
293 goto authcommon; 293 goto authcommon;
294 case CRYPTO_RIPEMD160_HMAC_96: 294 case CRYPTO_RIPEMD160_HMAC_96:
295 axf = &swcr_auth_hash_hmac_ripemd_160_96; 295 axf = &swcr_auth_hash_hmac_ripemd_160_96;
296 goto authcommon; 296 goto authcommon;
297 case CRYPTO_SHA2_HMAC: 297 case CRYPTO_SHA2_HMAC:
298 if (cri->cri_klen == 256) 298 if (cri->cri_klen == 256)
299 axf = &swcr_auth_hash_hmac_sha2_256; 299 axf = &swcr_auth_hash_hmac_sha2_256;
300 else if (cri->cri_klen == 384) 300 else if (cri->cri_klen == 384)
301 axf = &swcr_auth_hash_hmac_sha2_384; 301 axf = &swcr_auth_hash_hmac_sha2_384;
302 else if (cri->cri_klen == 512) 302 else if (cri->cri_klen == 512)
303 axf = &swcr_auth_hash_hmac_sha2_512; 303 axf = &swcr_auth_hash_hmac_sha2_512;
304 else { 304 else {
305 return EINVAL; 305 return EINVAL;
306 } 306 }
307 authcommon: 307 authcommon:
308 swd = malloc(sizeof(struct swcr_data), M_CRYPTO_DATA, 308 swd = malloc(sizeof(struct swcr_data), M_CRYPTO_DATA,
309 M_NOWAIT|M_ZERO); 309 M_NOWAIT|M_ZERO);
310 if (swd == NULL) { 310 if (swd == NULL) {
311 via_padlock_crypto_freesession(sc, sesn); 311 via_padlock_crypto_freesession(sc, sesn);
312 return (ENOMEM); 312 return (ENOMEM);
313 } 313 }
314 ses->swd = swd; 314 ses->swd = swd;
315 315
316 swd->sw_ictx = malloc(axf->ctxsize, 316 swd->sw_ictx = malloc(axf->ctxsize,
317 M_CRYPTO_DATA, M_NOWAIT); 317 M_CRYPTO_DATA, M_NOWAIT);
318 if (swd->sw_ictx == NULL) { 318 if (swd->sw_ictx == NULL) {
319 via_padlock_crypto_freesession(sc, sesn); 319 via_padlock_crypto_freesession(sc, sesn);
320 return (ENOMEM); 320 return (ENOMEM);
321 } 321 }
322 322
323 swd->sw_octx = malloc(axf->ctxsize, 323 swd->sw_octx = malloc(axf->ctxsize,
324 M_CRYPTO_DATA, M_NOWAIT); 324 M_CRYPTO_DATA, M_NOWAIT);
325 if (swd->sw_octx == NULL) { 325 if (swd->sw_octx == NULL) {
326 via_padlock_crypto_freesession(sc, sesn); 326 via_padlock_crypto_freesession(sc, sesn);
327 return (ENOMEM); 327 return (ENOMEM);
328 } 328 }
329 329
330 for (i = 0; i < c->cri_klen / 8; i++) 330 for (i = 0; i < c->cri_klen / 8; i++)
331 c->cri_key[i] ^= HMAC_IPAD_VAL; 331 c->cri_key[i] ^= HMAC_IPAD_VAL;
332 332
333 axf->Init(swd->sw_ictx); 333 axf->Init(swd->sw_ictx);
334 axf->Update(swd->sw_ictx, c->cri_key, c->cri_klen / 8); 334 axf->Update(swd->sw_ictx, c->cri_key, c->cri_klen / 8);
335 axf->Update(swd->sw_ictx, hmac_ipad_buffer, 335 axf->Update(swd->sw_ictx, hmac_ipad_buffer,
336 HMAC_BLOCK_LEN - (c->cri_klen / 8)); 336 HMAC_BLOCK_LEN - (c->cri_klen / 8));
337 337
338 for (i = 0; i < c->cri_klen / 8; i++) 338 for (i = 0; i < c->cri_klen / 8; i++)
339 c->cri_key[i] ^= (HMAC_IPAD_VAL ^ 339 c->cri_key[i] ^= (HMAC_IPAD_VAL ^
340 HMAC_OPAD_VAL); 340 HMAC_OPAD_VAL);
341 341
342 axf->Init(swd->sw_octx); 342 axf->Init(swd->sw_octx);
343 axf->Update(swd->sw_octx, c->cri_key, c->cri_klen / 8); 343 axf->Update(swd->sw_octx, c->cri_key, c->cri_klen / 8);
344 axf->Update(swd->sw_octx, hmac_opad_buffer, 344 axf->Update(swd->sw_octx, hmac_opad_buffer,
345 HMAC_BLOCK_LEN - (c->cri_klen / 8)); 345 HMAC_BLOCK_LEN - (c->cri_klen / 8));
346 346
347 for (i = 0; i < c->cri_klen / 8; i++) 347 for (i = 0; i < c->cri_klen / 8; i++)
348 c->cri_key[i] ^= HMAC_OPAD_VAL; 348 c->cri_key[i] ^= HMAC_OPAD_VAL;
349 349
350 swd->sw_axf = axf; 350 swd->sw_axf = axf;
351 swd->sw_alg = c->cri_alg; 351 swd->sw_alg = c->cri_alg;
352 352
353 break; 353 break;
354 default: 354 default:
355 return (EINVAL); 355 return (EINVAL);
356 } 356 }
357 } 357 }
358 358
359 *sidp = VIAC3_SID(0, sesn); 359 *sidp = VIAC3_SID(0, sesn);
360 return (0); 360 return (0);
361} 361}
362 362
363int 363int
364via_padlock_crypto_freesession(void *arg, uint64_t tid) 364via_padlock_crypto_freesession(void *arg, uint64_t tid)
365{ 365{
366 struct via_padlock_softc *sc = arg; 366 struct via_padlock_softc *sc = arg;
367 struct swcr_data *swd; 367 struct swcr_data *swd;
368 const struct swcr_auth_hash *axf; 368 const struct swcr_auth_hash *axf;
369 int sesn; 369 int sesn;
370 uint32_t sid = ((uint32_t)tid) & 0xffffffff; 370 uint32_t sid = ((uint32_t)tid) & 0xffffffff;
371 371
372 KASSERT(sc != NULL /*, ("via_padlock_crypto_freesession: null softc")*/); 372 KASSERT(sc != NULL /*, ("via_padlock_crypto_freesession: null softc")*/);
373 if (sc == NULL) 373 if (sc == NULL)
374 return (EINVAL); 374 return (EINVAL);
375 375
376 sesn = VIAC3_SESSION(sid); 376 sesn = VIAC3_SESSION(sid);
377 if (sesn >= sc->sc_nsessions) 377 if (sesn >= sc->sc_nsessions)
378 return (EINVAL); 378 return (EINVAL);
379 379
380 if (sc->sc_sessions[sesn].swd) { 380 if (sc->sc_sessions[sesn].swd) {
381 swd = sc->sc_sessions[sesn].swd; 381 swd = sc->sc_sessions[sesn].swd;
382 axf = swd->sw_axf; 382 axf = swd->sw_axf;
383 383
384 if (swd->sw_ictx) { 384 if (swd->sw_ictx) {
385 memset(swd->sw_ictx, 0, axf->ctxsize); 385 memset(swd->sw_ictx, 0, axf->ctxsize);
386 free(swd->sw_ictx, M_CRYPTO_DATA); 386 free(swd->sw_ictx, M_CRYPTO_DATA);
387 } 387 }
388 if (swd->sw_octx) { 388 if (swd->sw_octx) {
389 memset(swd->sw_octx, 0, axf->ctxsize); 389 memset(swd->sw_octx, 0, axf->ctxsize);
390 free(swd->sw_octx, M_CRYPTO_DATA); 390 free(swd->sw_octx, M_CRYPTO_DATA);
391 } 391 }
392 free(swd, M_CRYPTO_DATA); 392 free(swd, M_CRYPTO_DATA);
393 } 393 }
394 394
395 memset(&sc->sc_sessions[sesn], 0, sizeof(sc->sc_sessions[sesn])); 395 memset(&sc->sc_sessions[sesn], 0, sizeof(sc->sc_sessions[sesn]));
396 return (0); 396 return (0);
397} 397}
398 398
399static __inline void 399static __inline void
400via_padlock_cbc(void *cw, void *src, void *dst, void *key, int rep, 400via_padlock_cbc(void *cw, void *src, void *dst, void *key, int rep,
401 void *iv) 401 void *iv)
402{ 402{
403 unsigned int creg0; 403 unsigned int creg0;
404 404
405 creg0 = rcr0(); /* Permit access to SIMD/FPU path */ 405 creg0 = rcr0(); /* Permit access to SIMD/FPU path */
406 lcr0(creg0 & ~(CR0_EM|CR0_TS)); 406 lcr0(creg0 & ~(CR0_EM|CR0_TS));
407 407
408 /* Do the deed */ 408 /* Do the deed */
409 __asm __volatile("pushfl; popfl"); /* force key reload */ 409 __asm __volatile("pushfl; popfl"); /* force key reload */
410 __asm __volatile(".byte 0xf3, 0x0f, 0xa7, 0xd0" : /* rep xcrypt-cbc */ 410 __asm __volatile(".byte 0xf3, 0x0f, 0xa7, 0xd0" : /* rep xcrypt-cbc */
411 : "a" (iv), "b" (key), "c" (rep), "d" (cw), "S" (src), "D" (dst) 411 : "a" (iv), "b" (key), "c" (rep), "d" (cw), "S" (src), "D" (dst)
412 : "memory", "cc"); 412 : "memory", "cc");
413 413
414 lcr0(creg0); 414 lcr0(creg0);
415} 415}
416 416
417int 417int
418via_padlock_crypto_swauth(struct cryptop *crp, struct cryptodesc *crd, 418via_padlock_crypto_swauth(struct cryptop *crp, struct cryptodesc *crd,
419 struct swcr_data *sw, void *buf) 419 struct swcr_data *sw, void *buf)
420{ 420{
421 int type; 421 int type;
422 422
423 if (crp->crp_flags & CRYPTO_F_IMBUF) 423 if (crp->crp_flags & CRYPTO_F_IMBUF)
424 type = CRYPTO_BUF_MBUF; 424 type = CRYPTO_BUF_MBUF;
425 else 425 else
426 type= CRYPTO_BUF_IOV; 426 type= CRYPTO_BUF_IOV;
427 427
428 return (swcr_authcompute(crp, crd, sw, buf, type)); 428 return (swcr_authcompute(crp, crd, sw, buf, type));
429} 429}
430 430
431int 431int
432via_padlock_crypto_encdec(struct cryptop *crp, struct cryptodesc *crd, 432via_padlock_crypto_encdec(struct cryptop *crp, struct cryptodesc *crd,
433 struct via_padlock_session *ses, struct via_padlock_softc *sc, void *buf) 433 struct via_padlock_session *ses, struct via_padlock_softc *sc, void *buf)
434{ 434{
435 uint32_t *key; 435 uint32_t *key;
436 int err = 0; 436 int err = 0;
437 437
438 if ((crd->crd_len % 16) != 0) { 438 if ((crd->crd_len % 16) != 0) {
439 err = EINVAL; 439 err = EINVAL;
440 return (err); 440 return (err);
441 } 441 }
442 442
443 sc->op_buf = malloc(crd->crd_len, M_DEVBUF, M_NOWAIT); 443 sc->op_buf = malloc(crd->crd_len, M_DEVBUF, M_NOWAIT);
444 if (sc->op_buf == NULL) { 444 if (sc->op_buf == NULL) {
445 err = ENOMEM; 445 err = ENOMEM;
446 return (err); 446 return (err);
447 } 447 }
448 448
449 if (crd->crd_flags & CRD_F_ENCRYPT) { 449 if (crd->crd_flags & CRD_F_ENCRYPT) {
450 sc->op_cw[0] = ses->ses_cw0 | C3_CRYPT_CWLO_ENCRYPT; 450 sc->op_cw[0] = ses->ses_cw0 | C3_CRYPT_CWLO_ENCRYPT;
451 key = ses->ses_ekey; 451 key = ses->ses_ekey;
452 if (crd->crd_flags & CRD_F_IV_EXPLICIT) 452 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
453 memcpy(sc->op_iv, crd->crd_iv, 16); 453 memcpy(sc->op_iv, crd->crd_iv, 16);
454 else 454 else
455 memcpy(sc->op_iv, ses->ses_iv, 16); 455 memcpy(sc->op_iv, ses->ses_iv, 16);
456 456
457 if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) { 457 if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
458 if (crp->crp_flags & CRYPTO_F_IMBUF) 458 if (crp->crp_flags & CRYPTO_F_IMBUF)
459 m_copyback((struct mbuf *)crp->crp_buf, 459 m_copyback((struct mbuf *)crp->crp_buf,
460 crd->crd_inject, 16, sc->op_iv); 460 crd->crd_inject, 16, sc->op_iv);
461 else if (crp->crp_flags & CRYPTO_F_IOV) 461 else if (crp->crp_flags & CRYPTO_F_IOV)
462 cuio_copyback((struct uio *)crp->crp_buf, 462 cuio_copyback((struct uio *)crp->crp_buf,
463 crd->crd_inject, 16, sc->op_iv); 463 crd->crd_inject, 16, sc->op_iv);
464 else 464 else
465 memcpy((char *)crp->crp_buf + crd->crd_inject, 465 memcpy((char *)crp->crp_buf + crd->crd_inject,
466 sc->op_iv, 16); 466 sc->op_iv, 16);
467 } 467 }
468 } else { 468 } else {
469 sc->op_cw[0] = ses->ses_cw0 | C3_CRYPT_CWLO_DECRYPT; 469 sc->op_cw[0] = ses->ses_cw0 | C3_CRYPT_CWLO_DECRYPT;
470 key = ses->ses_dkey; 470 key = ses->ses_dkey;
471 if (crd->crd_flags & CRD_F_IV_EXPLICIT) 471 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
472 memcpy(sc->op_iv, crd->crd_iv, 16); 472 memcpy(sc->op_iv, crd->crd_iv, 16);
473 else { 473 else {
474 if (crp->crp_flags & CRYPTO_F_IMBUF) 474 if (crp->crp_flags & CRYPTO_F_IMBUF)
475 m_copydata((struct mbuf *)crp->crp_buf, 475 m_copydata((struct mbuf *)crp->crp_buf,
476 crd->crd_inject, 16, sc->op_iv); 476 crd->crd_inject, 16, sc->op_iv);
477 else if (crp->crp_flags & CRYPTO_F_IOV) 477 else if (crp->crp_flags & CRYPTO_F_IOV)
478 cuio_copydata((struct uio *)crp->crp_buf, 478 cuio_copydata((struct uio *)crp->crp_buf,
479 crd->crd_inject, 16, sc->op_iv); 479 crd->crd_inject, 16, sc->op_iv);
480 else 480 else
481 memcpy(sc->op_iv, (char *)crp->crp_buf + 481 memcpy(sc->op_iv, (char *)crp->crp_buf +
482 crd->crd_inject, 16); 482 crd->crd_inject, 16);
483 } 483 }
484 } 484 }
485 485
486 if (crp->crp_flags & CRYPTO_F_IMBUF) 486 if (crp->crp_flags & CRYPTO_F_IMBUF)
487 m_copydata((struct mbuf *)crp->crp_buf, 487 m_copydata((struct mbuf *)crp->crp_buf,
488 crd->crd_skip, crd->crd_len, sc->op_buf); 488 crd->crd_skip, crd->crd_len, sc->op_buf);
489 else if (crp->crp_flags & CRYPTO_F_IOV) 489 else if (crp->crp_flags & CRYPTO_F_IOV)
490 cuio_copydata((struct uio *)crp->crp_buf, 490 cuio_copydata((struct uio *)crp->crp_buf,
491 crd->crd_skip, crd->crd_len, sc->op_buf); 491 crd->crd_skip, crd->crd_len, sc->op_buf);
492 else 492 else
493 memcpy(sc->op_buf, (char *)crp->crp_buf + crd->crd_skip, 493 memcpy(sc->op_buf, (char *)crp->crp_buf + crd->crd_skip,
494 crd->crd_len); 494 crd->crd_len);
495 495
496 sc->op_cw[1] = sc->op_cw[2] = sc->op_cw[3] = 0; 496 sc->op_cw[1] = sc->op_cw[2] = sc->op_cw[3] = 0;
497 via_padlock_cbc(&sc->op_cw, sc->op_buf, sc->op_buf, key, 497 via_padlock_cbc(&sc->op_cw, sc->op_buf, sc->op_buf, key,
498 crd->crd_len / 16, sc->op_iv); 498 crd->crd_len / 16, sc->op_iv);
499 499
500 if (crp->crp_flags & CRYPTO_F_IMBUF) 500 if (crp->crp_flags & CRYPTO_F_IMBUF)
501 m_copyback((struct mbuf *)crp->crp_buf, 501 m_copyback((struct mbuf *)crp->crp_buf,
502 crd->crd_skip, crd->crd_len, sc->op_buf); 502 crd->crd_skip, crd->crd_len, sc->op_buf);
503 else if (crp->crp_flags & CRYPTO_F_IOV) 503 else if (crp->crp_flags & CRYPTO_F_IOV)
504 cuio_copyback((struct uio *)crp->crp_buf, 504 cuio_copyback((struct uio *)crp->crp_buf,
505 crd->crd_skip, crd->crd_len, sc->op_buf); 505 crd->crd_skip, crd->crd_len, sc->op_buf);
506 else 506 else
507 memcpy((char *)crp->crp_buf + crd->crd_skip, sc->op_buf, 507 memcpy((char *)crp->crp_buf + crd->crd_skip, sc->op_buf,
508 crd->crd_len); 508 crd->crd_len);
509 509
510 /* copy out last block for use as next session IV */ 510 /* copy out last block for use as next session IV */
511 if (crd->crd_flags & CRD_F_ENCRYPT) { 511 if (crd->crd_flags & CRD_F_ENCRYPT) {
512 if (crp->crp_flags & CRYPTO_F_IMBUF) 512 if (crp->crp_flags & CRYPTO_F_IMBUF)
513 m_copydata((struct mbuf *)crp->crp_buf, 513 m_copydata((struct mbuf *)crp->crp_buf,
514 crd->crd_skip + crd->crd_len - 16, 16, 514 crd->crd_skip + crd->crd_len - 16, 16,
515 ses->ses_iv); 515 ses->ses_iv);
516 else if (crp->crp_flags & CRYPTO_F_IOV) 516 else if (crp->crp_flags & CRYPTO_F_IOV)
517 cuio_copydata((struct uio *)crp->crp_buf, 517 cuio_copydata((struct uio *)crp->crp_buf,
518 crd->crd_skip + crd->crd_len - 16, 16, 518 crd->crd_skip + crd->crd_len - 16, 16,
519 ses->ses_iv); 519 ses->ses_iv);
520 else 520 else
521 memcpy(ses->ses_iv, (char *)crp->crp_buf + 521 memcpy(ses->ses_iv, (char *)crp->crp_buf +
522 crd->crd_skip + crd->crd_len - 16, 16); 522 crd->crd_skip + crd->crd_len - 16, 16);
523 } 523 }
524 524
525 if (sc->op_buf != NULL) { 525 if (sc->op_buf != NULL) {
526 memset(sc->op_buf, 0, crd->crd_len); 526 memset(sc->op_buf, 0, crd->crd_len);
527 free(sc->op_buf, M_DEVBUF); 527 free(sc->op_buf, M_DEVBUF);
528 sc->op_buf = NULL; 528 sc->op_buf = NULL;
529 } 529 }
530 530
531 return (err); 531 return (err);
532} 532}
533 533
534int 534int
535via_padlock_crypto_process(void *arg, struct cryptop *crp, int hint) 535via_padlock_crypto_process(void *arg, struct cryptop *crp, int hint)
536{ 536{
537 struct via_padlock_softc *sc = arg; 537 struct via_padlock_softc *sc = arg;
538 struct via_padlock_session *ses; 538 struct via_padlock_session *ses;
539 struct cryptodesc *crd; 539 struct cryptodesc *crd;
540 int sesn, err = 0; 540 int sesn, err = 0;
541 541
542 KASSERT(sc != NULL /*, ("via_padlock_crypto_process: null softc")*/); 542 KASSERT(sc != NULL /*, ("via_padlock_crypto_process: null softc")*/);
543 if (crp == NULL || crp->crp_callback == NULL) { 543 if (crp == NULL || crp->crp_callback == NULL) {
544 err = EINVAL; 544 err = EINVAL;
545 goto out; 545 goto out;
546 } 546 }
547 547
548 sesn = VIAC3_SESSION(crp->crp_sid); 548 sesn = VIAC3_SESSION(crp->crp_sid);
549 if (sesn >= sc->sc_nsessions) { 549 if (sesn >= sc->sc_nsessions) {
550 err = EINVAL; 550 err = EINVAL;
551 goto out; 551 goto out;
552 } 552 }
553 ses = &sc->sc_sessions[sesn]; 553 ses = &sc->sc_sessions[sesn];
554 554
555 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 555 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
556 switch (crd->crd_alg) { 556 switch (crd->crd_alg) {
557 case CRYPTO_AES_CBC: 557 case CRYPTO_AES_CBC:
558 if ((err = via_padlock_crypto_encdec(crp, crd, ses, 558 if ((err = via_padlock_crypto_encdec(crp, crd, ses,
559 sc, crp->crp_buf)) != 0) 559 sc, crp->crp_buf)) != 0)
560 goto out; 560 goto out;
561 break; 561 break;
562 562
563 case CRYPTO_MD5_HMAC: 563 case CRYPTO_MD5_HMAC:
564 case CRYPTO_SHA1_HMAC: 564 case CRYPTO_SHA1_HMAC:
565 case CRYPTO_RIPEMD160_HMAC: 565 case CRYPTO_RIPEMD160_HMAC:
566 case CRYPTO_SHA2_HMAC: 566 case CRYPTO_SHA2_HMAC:
567 if ((err = via_padlock_crypto_swauth(crp, crd, 567 if ((err = via_padlock_crypto_swauth(crp, crd,
568 ses->swd, crp->crp_buf)) != 0) 568 ses->swd, crp->crp_buf)) != 0)
569 goto out; 569 goto out;
570 break; 570 break;
571 571
572 default: 572 default:
573 err = EINVAL; 573 err = EINVAL;
574 goto out; 574 goto out;
575 } 575 }
576 } 576 }
577out: 577out:
578 crp->crp_etype = err; 578 crp->crp_etype = err;
579 crypto_done(crp); 579 crypto_done(crp);
580 return (err); 580 return (err);
581} 581}
582 582
583static int 583static int
584via_padlock_match(device_t parent, cfdata_t cf, void *opaque) 584via_padlock_match(device_t parent, cfdata_t cf, void *opaque)
585{ 585{
586 struct cpufeature_attach_args *cfaa = opaque; 586 struct cpufeature_attach_args *cfaa = opaque;
587 struct cpu_info *ci = cfaa->ci; 587 struct cpu_info *ci = cfaa->ci;
588 588
589 if (strcmp(cfaa->name, "padlock") != 0) 589 if (strcmp(cfaa->name, "padlock") != 0)
590 return 0; 590 return 0;
591 if ((cpu_feature[4] & (CPUID_VIA_HAS_ACE|CPUID_VIA_HAS_RNG)) == 0) 591 if ((cpu_feature[4] & (CPUID_VIA_HAS_ACE|CPUID_VIA_HAS_RNG)) == 0)
592 return 0; 592 return 0;
593 if ((ci->ci_flags & (CPUF_BSP|CPUF_SP|CPUF_PRIMARY)) == 0) 593 if ((ci->ci_flags & (CPUF_BSP|CPUF_SP|CPUF_PRIMARY)) == 0)
594 return 0; 594 return 0;
595 return 1; 595 return 1;
596} 596}
597 597
598static void 598static void
599via_padlock_attach(device_t parent, device_t self, void *opaque) 599via_padlock_attach(device_t parent, device_t self, void *opaque)
600{ 600{
601 struct via_padlock_softc *sc = device_private(self); 601 struct via_padlock_softc *sc = device_private(self);
602 602
603 sc->sc_dev = self; 603 sc->sc_dev = self;
604 604
605 aprint_naive("\n"); 605 aprint_naive("\n");
606 aprint_normal(": VIA PadLock\n"); 606 aprint_normal(": VIA PadLock\n");
607 607
608 pmf_device_register(self, NULL, NULL); 608 pmf_device_register(self, NULL, NULL);
609 609
610 config_interrupts(self, via_padlock_attach_intr); 610 config_interrupts(self, via_padlock_attach_intr);
611} 611}
612 612
613static void 613static void
614via_padlock_attach_intr(device_t self) 614via_padlock_attach_intr(device_t self)
615{ 615{
616 struct via_padlock_softc *sc = device_private(self); 616 struct via_padlock_softc *sc = device_private(self);
617 617
618 aprint_normal("%s:", device_xname(self)); 618 aprint_normal("%s:", device_xname(self));
619 if (cpu_feature[4] & CPUID_VIA_HAS_RNG) { 619 if (cpu_feature[4] & CPUID_VIA_HAS_RNG) {
620 via_c3_rnd_init(sc); 620 via_c3_rnd_init(sc);
621 aprint_normal(" RNG"); 621 aprint_normal(" RNG");
622 } 622 }
623 if (cpu_feature[4] & CPUID_VIA_HAS_ACE) { 623 if (cpu_feature[4] & CPUID_VIA_HAS_ACE) {
624 via_c3_ace_init(sc); 624 via_c3_ace_init(sc);
625 aprint_normal(" ACE"); 625 aprint_normal(" ACE");
626 } 626 }
627 aprint_normal("\n"); 627 aprint_normal("\n");
628} 628}
629 629
630static int 630static int
631via_padlock_detach(device_t self, int flags) 631via_padlock_detach(device_t self, int flags)
632{ 632{
633 struct via_padlock_softc *sc = device_private(self); 633 struct via_padlock_softc *sc = device_private(self);
634 634
635 if (sc->sc_rnd_attached) { 635 if (sc->sc_rnd_attached) {
636 callout_stop(&sc->sc_rnd_co); 636 callout_stop(&sc->sc_rnd_co);
637 callout_destroy(&sc->sc_rnd_co); 637 callout_destroy(&sc->sc_rnd_co);
638 rnd_detach_source(&sc->sc_rnd_source); 638 rnd_detach_source(&sc->sc_rnd_source);
639 sc->sc_rnd_attached = false; 639 sc->sc_rnd_attached = false;
640 } 640 }
641 if (sc->sc_cid_attached) { 641 if (sc->sc_cid_attached) {
642 crypto_unregister(sc->sc_cid, CRYPTO_AES_CBC); 642 crypto_unregister(sc->sc_cid, CRYPTO_AES_CBC);
643 crypto_unregister(sc->sc_cid, CRYPTO_MD5_HMAC_96); 643 crypto_unregister(sc->sc_cid, CRYPTO_MD5_HMAC_96);
644 crypto_unregister(sc->sc_cid, CRYPTO_MD5_HMAC); 644 crypto_unregister(sc->sc_cid, CRYPTO_MD5_HMAC);
645 crypto_unregister(sc->sc_cid, CRYPTO_SHA1_HMAC_96); 645 crypto_unregister(sc->sc_cid, CRYPTO_SHA1_HMAC_96);
646 crypto_unregister(sc->sc_cid, CRYPTO_SHA1_HMAC); 646 crypto_unregister(sc->sc_cid, CRYPTO_SHA1_HMAC);
647 crypto_unregister(sc->sc_cid, CRYPTO_RIPEMD160_HMAC_96); 647 crypto_unregister(sc->sc_cid, CRYPTO_RIPEMD160_HMAC_96);
648 crypto_unregister(sc->sc_cid, CRYPTO_RIPEMD160_HMAC); 648 crypto_unregister(sc->sc_cid, CRYPTO_RIPEMD160_HMAC);
649 crypto_unregister(sc->sc_cid, CRYPTO_SHA2_HMAC); 649 crypto_unregister(sc->sc_cid, CRYPTO_SHA2_HMAC);
650 sc->sc_cid_attached = false; 650 sc->sc_cid_attached = false;
651 } 651 }
652 652
653 pmf_device_deregister(self); 653 pmf_device_deregister(self);
654 654
655 return 0; 655 return 0;
656} 656}
657 657
658MODULE(MODULE_CLASS_DRIVER, padlock, NULL); 658MODULE(MODULE_CLASS_DRIVER, padlock, NULL);
659 659
660#ifdef _MODULE 660#ifdef _MODULE
661#include "ioconf.c" 661#include "ioconf.c"
662#endif 662#endif
663 663
664static int 664static int
665padlock_modcmd(modcmd_t cmd, void *opaque) 665padlock_modcmd(modcmd_t cmd, void *opaque)
666{ 666{
667 int error = 0; 667 int error = 0;
668 668
669 switch (cmd) { 669 switch (cmd) {
670 case MODULE_CMD_INIT: 670 case MODULE_CMD_INIT:
671#ifdef _MODULE 671#ifdef _MODULE
672 error = config_init_component(cfdriver_ioconf_padlock, 672 error = config_init_component(cfdriver_ioconf_padlock,
673 cfattach_ioconf_padlock, cfdata_ioconf_padlock); 673 cfattach_ioconf_padlock, cfdata_ioconf_padlock);
674#endif 674#endif
675 return error; 675 return error;
676 case MODULE_CMD_FINI: 676 case MODULE_CMD_FINI:
677#ifdef _MODULE 677#ifdef _MODULE
678 error = config_fini_component(cfdriver_ioconf_padlock, 678 error = config_fini_component(cfdriver_ioconf_padlock,
679 cfattach_ioconf_padlock, cfdata_ioconf_padlock); 679 cfattach_ioconf_padlock, cfdata_ioconf_padlock);
680#endif 680#endif
681 return error; 681 return error;
682 default: 682 default:
683 return ENOTTY; 683 return ENOTTY;
684 } 684 }
685} 685}