| @@ -1,836 +1,836 @@ | | | @@ -1,836 +1,836 @@ |
1 | /* | | 1 | /* |
2 | * Copyright 2017-2019 The OpenSSL Project Authors. All Rights Reserved. | | 2 | * Copyright 2017-2019 The OpenSSL Project Authors. All Rights Reserved. |
3 | * | | 3 | * |
4 | * Licensed under the OpenSSL license (the "License"). You may not use | | 4 | * Licensed under the OpenSSL license (the "License"). You may not use |
5 | * this file except in compliance with the License. You can obtain a copy | | 5 | * this file except in compliance with the License. You can obtain a copy |
6 | * in the file LICENSE in the source distribution or at | | 6 | * in the file LICENSE in the source distribution or at |
7 | * https://www.openssl.org/source/license.html | | 7 | * https://www.openssl.org/source/license.html |
8 | */ | | 8 | */ |
9 | | | 9 | |
10 | #include "e_os.h" | | 10 | #include "e_os.h" |
11 | #include <string.h> | | 11 | #include <string.h> |
12 | #include <sys/types.h> | | 12 | #include <sys/types.h> |
13 | #include <sys/stat.h> | | 13 | #include <sys/stat.h> |
14 | #include <fcntl.h> | | 14 | #include <fcntl.h> |
15 | #include <sys/ioctl.h> | | 15 | #include <sys/ioctl.h> |
16 | #include <unistd.h> | | 16 | #include <unistd.h> |
17 | #include <assert.h> | | 17 | #include <assert.h> |
18 | | | 18 | |
19 | #include <openssl/evp.h> | | 19 | #include <openssl/evp.h> |
20 | #include <openssl/err.h> | | 20 | #include <openssl/err.h> |
21 | #include <openssl/engine.h> | | 21 | #include <openssl/engine.h> |
22 | #include <openssl/objects.h> | | 22 | #include <openssl/objects.h> |
23 | #include <crypto/cryptodev.h> | | 23 | #include <crypto/cryptodev.h> |
24 | | | 24 | |
25 | #include "internal/engine.h" | | 25 | #include "internal/engine.h" |
26 | | | 26 | |
27 | /* #define ENGINE_DEVCRYPTO_DEBUG */ | | 27 | /* #define ENGINE_DEVCRYPTO_DEBUG */ |
28 | | | 28 | |
29 | #ifdef CRYPTO_ALGORITHM_MIN | | 29 | #ifdef CRYPTO_ALGORITHM_MIN |
30 | # define CHECK_BSD_STYLE_MACROS | | 30 | # define CHECK_BSD_STYLE_MACROS |
31 | #endif | | 31 | #endif |
32 | | | 32 | |
33 | /* | | 33 | /* |
34 | * ONE global file descriptor for all sessions. This allows operations | | 34 | * ONE global file descriptor for all sessions. This allows operations |
35 | * such as digest session data copying (see digest_copy()), but is also | | 35 | * such as digest session data copying (see digest_copy()), but is also |
36 | * saner... why re-open /dev/crypto for every session? | | 36 | * saner... why re-open /dev/crypto for every session? |
37 | */ | | 37 | */ |
38 | static int cfd; | | 38 | static int cfd; |
39 | | | 39 | |
40 | static int clean_devcrypto_session(struct session_op *sess) { | | 40 | static int clean_devcrypto_session(struct session_op *sess) { |
41 | if (ioctl(cfd, CIOCFSESSION, &sess->ses) < 0) { | | 41 | if (ioctl(cfd, CIOCFSESSION, &sess->ses) < 0) { |
42 | SYSerr(SYS_F_IOCTL, errno); | | 42 | SYSerr(SYS_F_IOCTL, errno); |
43 | return 0; | | 43 | return 0; |
44 | } | | 44 | } |
45 | memset(sess, 0, sizeof(struct session_op)); | | 45 | memset(sess, 0, sizeof(struct session_op)); |
46 | return 1; | | 46 | return 1; |
47 | } | | 47 | } |
48 | | | 48 | |
49 | /****************************************************************************** | | 49 | /****************************************************************************** |
50 | * | | 50 | * |
51 | * Ciphers | | 51 | * Ciphers |
52 | * | | 52 | * |
53 | * Because they all do the same basic operation, we have only one set of | | 53 | * Because they all do the same basic operation, we have only one set of |
54 | * method functions for them all to share, and a mapping table between | | 54 | * method functions for them all to share, and a mapping table between |
55 | * NIDs and cryptodev IDs, with all the necessary size data. | | 55 | * NIDs and cryptodev IDs, with all the necessary size data. |
56 | * | | 56 | * |
57 | *****/ | | 57 | *****/ |
58 | | | 58 | |
59 | struct cipher_ctx { | | 59 | struct cipher_ctx { |
60 | struct session_op sess; | | 60 | struct session_op sess; |
61 | int op; /* COP_ENCRYPT or COP_DECRYPT */ | | 61 | int op; /* COP_ENCRYPT or COP_DECRYPT */ |
62 | unsigned long mode; /* EVP_CIPH_*_MODE */ | | 62 | unsigned long mode; /* EVP_CIPH_*_MODE */ |
63 | | | 63 | |
64 | /* to handle ctr mode being a stream cipher */ | | 64 | /* to handle ctr mode being a stream cipher */ |
65 | unsigned char partial[EVP_MAX_BLOCK_LENGTH]; | | 65 | unsigned char partial[EVP_MAX_BLOCK_LENGTH]; |
66 | unsigned int blocksize, num; | | 66 | unsigned int blocksize, num; |
67 | }; | | 67 | }; |
68 | | | 68 | |
69 | static const struct cipher_data_st { | | 69 | static const struct cipher_data_st { |
70 | int nid; | | 70 | int nid; |
71 | int blocksize; | | 71 | int blocksize; |
72 | int keylen; | | 72 | int keylen; |
73 | int ivlen; | | 73 | int ivlen; |
74 | int flags; | | 74 | int flags; |
75 | int devcryptoid; | | 75 | int devcryptoid; |
76 | } cipher_data[] = { | | 76 | } cipher_data[] = { |
77 | #ifndef OPENSSL_NO_DES | | 77 | #ifndef OPENSSL_NO_DES |
78 | { NID_des_cbc, 8, 8, 8, EVP_CIPH_CBC_MODE, CRYPTO_DES_CBC }, | | 78 | { NID_des_cbc, 8, 8, 8, EVP_CIPH_CBC_MODE, CRYPTO_DES_CBC }, |
79 | { NID_des_ede3_cbc, 8, 24, 8, EVP_CIPH_CBC_MODE, CRYPTO_3DES_CBC }, | | 79 | { NID_des_ede3_cbc, 8, 24, 8, EVP_CIPH_CBC_MODE, CRYPTO_3DES_CBC }, |
80 | #endif | | 80 | #endif |
81 | #ifndef OPENSSL_NO_BF | | 81 | #ifndef OPENSSL_NO_BF |
82 | { NID_bf_cbc, 8, 16, 8, EVP_CIPH_CBC_MODE, CRYPTO_BLF_CBC }, | | 82 | { NID_bf_cbc, 8, 16, 8, EVP_CIPH_CBC_MODE, CRYPTO_BLF_CBC }, |
83 | #endif | | 83 | #endif |
84 | #ifndef OPENSSL_NO_CAST | | 84 | #ifndef OPENSSL_NO_CAST |
85 | { NID_cast5_cbc, 8, 16, 8, EVP_CIPH_CBC_MODE, CRYPTO_CAST_CBC }, | | 85 | { NID_cast5_cbc, 8, 16, 8, EVP_CIPH_CBC_MODE, CRYPTO_CAST_CBC }, |
86 | #endif | | 86 | #endif |
87 | { NID_aes_128_cbc, 16, 128 / 8, 16, EVP_CIPH_CBC_MODE, CRYPTO_AES_CBC }, | | 87 | { NID_aes_128_cbc, 16, 128 / 8, 16, EVP_CIPH_CBC_MODE, CRYPTO_AES_CBC }, |
88 | { NID_aes_192_cbc, 16, 192 / 8, 16, EVP_CIPH_CBC_MODE, CRYPTO_AES_CBC }, | | 88 | { NID_aes_192_cbc, 16, 192 / 8, 16, EVP_CIPH_CBC_MODE, CRYPTO_AES_CBC }, |
89 | { NID_aes_256_cbc, 16, 256 / 8, 16, EVP_CIPH_CBC_MODE, CRYPTO_AES_CBC }, | | 89 | { NID_aes_256_cbc, 16, 256 / 8, 16, EVP_CIPH_CBC_MODE, CRYPTO_AES_CBC }, |
90 | #ifndef OPENSSL_NO_RC4 | | 90 | #ifndef OPENSSL_NO_RC4 |
91 | { NID_rc4, 1, 16, 0, EVP_CIPH_STREAM_CIPHER, CRYPTO_ARC4 }, | | 91 | { NID_rc4, 1, 16, 0, EVP_CIPH_STREAM_CIPHER, CRYPTO_ARC4 }, |
92 | #endif | | 92 | #endif |
93 | #if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_AES_CTR) | | 93 | #if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_AES_CTR) |
94 | { NID_aes_128_ctr, 16, 128 / 8, 16, EVP_CIPH_CTR_MODE, CRYPTO_AES_CTR }, | | 94 | { NID_aes_128_ctr, 16, 128 / 8, 16, EVP_CIPH_CTR_MODE, CRYPTO_AES_CTR }, |
95 | { NID_aes_192_ctr, 16, 192 / 8, 16, EVP_CIPH_CTR_MODE, CRYPTO_AES_CTR }, | | 95 | { NID_aes_192_ctr, 16, 192 / 8, 16, EVP_CIPH_CTR_MODE, CRYPTO_AES_CTR }, |
96 | { NID_aes_256_ctr, 16, 256 / 8, 16, EVP_CIPH_CTR_MODE, CRYPTO_AES_CTR }, | | 96 | { NID_aes_256_ctr, 16, 256 / 8, 16, EVP_CIPH_CTR_MODE, CRYPTO_AES_CTR }, |
97 | #endif | | 97 | #endif |
98 | #if 0 /* Not yet supported */ | | 98 | #if 0 /* Not yet supported */ |
99 | { NID_aes_128_xts, 16, 128 / 8 * 2, 16, EVP_CIPH_XTS_MODE, CRYPTO_AES_XTS }, | | 99 | { NID_aes_128_xts, 16, 128 / 8 * 2, 16, EVP_CIPH_XTS_MODE, CRYPTO_AES_XTS }, |
100 | { NID_aes_256_xts, 16, 256 / 8 * 2, 16, EVP_CIPH_XTS_MODE, CRYPTO_AES_XTS }, | | 100 | { NID_aes_256_xts, 16, 256 / 8 * 2, 16, EVP_CIPH_XTS_MODE, CRYPTO_AES_XTS }, |
101 | #endif | | 101 | #endif |
102 | #if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_AES_ECB) | | 102 | #if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_AES_ECB) |
103 | { NID_aes_128_ecb, 16, 128 / 8, 0, EVP_CIPH_ECB_MODE, CRYPTO_AES_ECB }, | | 103 | { NID_aes_128_ecb, 16, 128 / 8, 0, EVP_CIPH_ECB_MODE, CRYPTO_AES_ECB }, |
104 | { NID_aes_192_ecb, 16, 192 / 8, 0, EVP_CIPH_ECB_MODE, CRYPTO_AES_ECB }, | | 104 | { NID_aes_192_ecb, 16, 192 / 8, 0, EVP_CIPH_ECB_MODE, CRYPTO_AES_ECB }, |
105 | { NID_aes_256_ecb, 16, 256 / 8, 0, EVP_CIPH_ECB_MODE, CRYPTO_AES_ECB }, | | 105 | { NID_aes_256_ecb, 16, 256 / 8, 0, EVP_CIPH_ECB_MODE, CRYPTO_AES_ECB }, |
106 | #endif | | 106 | #endif |
107 | #if 0 /* Not yet supported */ | | 107 | #if 0 /* Not yet supported */ |
108 | { NID_aes_128_gcm, 16, 128 / 8, 16, EVP_CIPH_GCM_MODE, CRYPTO_AES_GCM }, | | 108 | { NID_aes_128_gcm, 16, 128 / 8, 16, EVP_CIPH_GCM_MODE, CRYPTO_AES_GCM }, |
109 | { NID_aes_192_gcm, 16, 192 / 8, 16, EVP_CIPH_GCM_MODE, CRYPTO_AES_GCM }, | | 109 | { NID_aes_192_gcm, 16, 192 / 8, 16, EVP_CIPH_GCM_MODE, CRYPTO_AES_GCM }, |
110 | { NID_aes_256_gcm, 16, 256 / 8, 16, EVP_CIPH_GCM_MODE, CRYPTO_AES_GCM }, | | 110 | { NID_aes_256_gcm, 16, 256 / 8, 16, EVP_CIPH_GCM_MODE, CRYPTO_AES_GCM }, |
111 | #endif | | 111 | #endif |
112 | #ifndef OPENSSL_NO_CAMELLIA | | 112 | #ifndef OPENSSL_NO_CAMELLIA |
113 | { NID_camellia_128_cbc, 16, 128 / 8, 16, EVP_CIPH_CBC_MODE, | | 113 | { NID_camellia_128_cbc, 16, 128 / 8, 16, EVP_CIPH_CBC_MODE, |
114 | CRYPTO_CAMELLIA_CBC }, | | 114 | CRYPTO_CAMELLIA_CBC }, |
115 | { NID_camellia_192_cbc, 16, 192 / 8, 16, EVP_CIPH_CBC_MODE, | | 115 | { NID_camellia_192_cbc, 16, 192 / 8, 16, EVP_CIPH_CBC_MODE, |
116 | CRYPTO_CAMELLIA_CBC }, | | 116 | CRYPTO_CAMELLIA_CBC }, |
117 | { NID_camellia_256_cbc, 16, 256 / 8, 16, EVP_CIPH_CBC_MODE, | | 117 | { NID_camellia_256_cbc, 16, 256 / 8, 16, EVP_CIPH_CBC_MODE, |
118 | CRYPTO_CAMELLIA_CBC }, | | 118 | CRYPTO_CAMELLIA_CBC }, |
119 | #endif | | 119 | #endif |
120 | }; | | 120 | }; |
121 | | | 121 | |
122 | static size_t get_cipher_data_index(int nid) | | 122 | static size_t get_cipher_data_index(int nid) |
123 | { | | 123 | { |
124 | size_t i; | | 124 | size_t i; |
125 | | | 125 | |
126 | for (i = 0; i < OSSL_NELEM(cipher_data); i++) | | 126 | for (i = 0; i < OSSL_NELEM(cipher_data); i++) |
127 | if (nid == cipher_data[i].nid) | | 127 | if (nid == cipher_data[i].nid) |
128 | return i; | | 128 | return i; |
129 | | | 129 | |
130 | /* | | 130 | /* |
131 | * Code further down must make sure that only NIDs in the table above | | 131 | * Code further down must make sure that only NIDs in the table above |
132 | * are used. If any other NID reaches this function, there's a grave | | 132 | * are used. If any other NID reaches this function, there's a grave |
133 | * coding error further down. | | 133 | * coding error further down. |
134 | */ | | 134 | */ |
135 | assert("Code that never should be reached" == NULL); | | 135 | assert("Code that never should be reached" == NULL); |
136 | return -1; | | 136 | return -1; |
137 | } | | 137 | } |
138 | | | 138 | |
139 | static const struct cipher_data_st *get_cipher_data(int nid) | | 139 | static const struct cipher_data_st *get_cipher_data(int nid) |
140 | { | | 140 | { |
141 | return &cipher_data[get_cipher_data_index(nid)]; | | 141 | return &cipher_data[get_cipher_data_index(nid)]; |
142 | } | | 142 | } |
143 | | | 143 | |
144 | /* | | 144 | /* |
145 | * Following are the three necessary functions to map OpenSSL functionality | | 145 | * Following are the three necessary functions to map OpenSSL functionality |
146 | * with cryptodev. | | 146 | * with cryptodev. |
147 | */ | | 147 | */ |
148 | | | 148 | |
149 | static int cipher_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, | | 149 | static int cipher_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, |
150 | const unsigned char *iv, int enc) | | 150 | const unsigned char *iv, int enc) |
151 | { | | 151 | { |
152 | struct cipher_ctx *cipher_ctx = | | 152 | struct cipher_ctx *cipher_ctx = |
153 | (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); | | 153 | (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); |
154 | const struct cipher_data_st *cipher_d = | | 154 | const struct cipher_data_st *cipher_d = |
155 | get_cipher_data(EVP_CIPHER_CTX_nid(ctx)); | | 155 | get_cipher_data(EVP_CIPHER_CTX_nid(ctx)); |
156 | | | 156 | |
157 | /* cleanup a previous session */ | | 157 | /* cleanup a previous session */ |
158 | if (cipher_ctx->sess.ses != 0 && | | 158 | if (cipher_ctx->sess.ses != 0 && |
159 | clean_devcrypto_session(&cipher_ctx->sess) == 0) | | 159 | clean_devcrypto_session(&cipher_ctx->sess) == 0) |
160 | return 0; | | 160 | return 0; |
161 | | | 161 | |
162 | cipher_ctx->sess.cipher = cipher_d->devcryptoid; | | 162 | cipher_ctx->sess.cipher = cipher_d->devcryptoid; |
163 | cipher_ctx->sess.keylen = cipher_d->keylen; | | 163 | cipher_ctx->sess.keylen = cipher_d->keylen; |
164 | cipher_ctx->sess.key = (void *)key; | | 164 | cipher_ctx->sess.key = (void *)key; |
165 | cipher_ctx->op = enc ? COP_ENCRYPT : COP_DECRYPT; | | 165 | cipher_ctx->op = enc ? COP_ENCRYPT : COP_DECRYPT; |
166 | cipher_ctx->mode = cipher_d->flags & EVP_CIPH_MODE; | | 166 | cipher_ctx->mode = cipher_d->flags & EVP_CIPH_MODE; |
167 | cipher_ctx->blocksize = cipher_d->blocksize; | | 167 | cipher_ctx->blocksize = cipher_d->blocksize; |
168 | if (ioctl(cfd, CIOCGSESSION, &cipher_ctx->sess) < 0) { | | 168 | if (ioctl(cfd, CIOCGSESSION, &cipher_ctx->sess) < 0) { |
169 | SYSerr(SYS_F_IOCTL, errno); | | 169 | SYSerr(SYS_F_IOCTL, errno); |
170 | return 0; | | 170 | return 0; |
171 | } | | 171 | } |
172 | | | 172 | |
173 | return 1; | | 173 | return 1; |
174 | } | | 174 | } |
175 | | | 175 | |
176 | static int cipher_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, | | 176 | static int cipher_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, |
177 | const unsigned char *in, size_t inl) | | 177 | const unsigned char *in, size_t inl) |
178 | { | | 178 | { |
179 | struct cipher_ctx *cipher_ctx = | | 179 | struct cipher_ctx *cipher_ctx = |
180 | (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); | | 180 | (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); |
181 | struct crypt_op cryp; | | 181 | struct crypt_op cryp; |
182 | unsigned char *iv = EVP_CIPHER_CTX_iv_noconst(ctx); | | 182 | unsigned char *iv = EVP_CIPHER_CTX_iv_noconst(ctx); |
183 | #if !defined(COP_FLAG_WRITE_IV) | | 183 | #if !defined(COP_FLAG_WRITE_IV) |
184 | unsigned char saved_iv[EVP_MAX_IV_LENGTH]; | | 184 | unsigned char saved_iv[EVP_MAX_IV_LENGTH]; |
185 | const unsigned char *ivptr; | | 185 | const unsigned char *ivptr; |
186 | size_t nblocks, ivlen; | | 186 | size_t nblocks, ivlen; |
187 | #endif | | 187 | #endif |
188 | | | 188 | |
189 | memset(&cryp, 0, sizeof(cryp)); | | 189 | memset(&cryp, 0, sizeof(cryp)); |
190 | cryp.ses = cipher_ctx->sess.ses; | | 190 | cryp.ses = cipher_ctx->sess.ses; |
191 | cryp.len = inl; | | 191 | cryp.len = inl; |
192 | cryp.src = (void *)in; | | 192 | cryp.src = (void *)in; |
193 | cryp.dst = (void *)out; | | 193 | cryp.dst = (void *)out; |
194 | cryp.iv = (void *)iv; | | 194 | cryp.iv = (void *)iv; |
195 | cryp.op = cipher_ctx->op; | | 195 | cryp.op = cipher_ctx->op; |
196 | #if !defined(COP_FLAG_WRITE_IV) | | 196 | #if !defined(COP_FLAG_WRITE_IV) |
197 | cryp.flags = 0; | | 197 | cryp.flags = 0; |
198 | | | 198 | |
199 | ivlen = EVP_CIPHER_CTX_iv_length(ctx); | | 199 | ivlen = EVP_CIPHER_CTX_iv_length(ctx); |
200 | if (ivlen > 0) | | 200 | if (ivlen > 0) |
201 | switch (cipher_ctx->mode) { | | 201 | switch (cipher_ctx->mode) { |
202 | case EVP_CIPH_CBC_MODE: | | 202 | case EVP_CIPH_CBC_MODE: |
203 | assert(inl >= ivlen); | | 203 | assert(inl >= ivlen); |
204 | if (!EVP_CIPHER_CTX_encrypting(ctx)) { | | 204 | if (!EVP_CIPHER_CTX_encrypting(ctx)) { |
205 | ivptr = in + inl - ivlen; | | 205 | ivptr = in + inl - ivlen; |
206 | memcpy(saved_iv, ivptr, ivlen); | | 206 | memcpy(saved_iv, ivptr, ivlen); |
207 | } | | 207 | } |
208 | break; | | 208 | break; |
209 | | | 209 | |
210 | case EVP_CIPH_CTR_MODE: | | 210 | case EVP_CIPH_CTR_MODE: |
211 | break; | | 211 | break; |
212 | | | 212 | |
213 | default: /* should not happen */ | | 213 | default: /* should not happen */ |
214 | return 0; | | 214 | return 0; |
215 | } | | 215 | } |
216 | #else | | 216 | #else |
217 | cryp.flags = COP_FLAG_WRITE_IV; | | 217 | cryp.flags = COP_FLAG_WRITE_IV; |
218 | #endif | | 218 | #endif |
219 | | | 219 | |
220 | if (ioctl(cfd, CIOCCRYPT, &cryp) < 0) { | | 220 | if (ioctl(cfd, CIOCCRYPT, &cryp) < 0) { |
221 | SYSerr(SYS_F_IOCTL, errno); | | 221 | SYSerr(SYS_F_IOCTL, errno); |
222 | return 0; | | 222 | return 0; |
223 | } | | 223 | } |
224 | | | 224 | |
225 | #if !defined(COP_FLAG_WRITE_IV) | | 225 | #if !defined(COP_FLAG_WRITE_IV) |
226 | if (ivlen > 0) | | 226 | if (ivlen > 0) |
227 | switch (cipher_ctx->mode) { | | 227 | switch (cipher_ctx->mode) { |
228 | case EVP_CIPH_CBC_MODE: | | 228 | case EVP_CIPH_CBC_MODE: |
229 | assert(inl >= ivlen); | | 229 | assert(inl >= ivlen); |
230 | if (EVP_CIPHER_CTX_encrypting(ctx)) | | 230 | if (EVP_CIPHER_CTX_encrypting(ctx)) |
231 | ivptr = out + inl - ivlen; | | 231 | ivptr = out + inl - ivlen; |
232 | else | | 232 | else |
233 | ivptr = saved_iv; | | 233 | ivptr = saved_iv; |
234 | | | 234 | |
235 | memcpy(iv, ivptr, ivlen); | | 235 | memcpy(iv, ivptr, ivlen); |
236 | break; | | 236 | break; |
237 | | | 237 | |
238 | case EVP_CIPH_CTR_MODE: | | 238 | case EVP_CIPH_CTR_MODE: |
239 | nblocks = (inl + cipher_ctx->blocksize - 1) | | 239 | nblocks = (inl + cipher_ctx->blocksize - 1) |
240 | / cipher_ctx->blocksize; | | 240 | / cipher_ctx->blocksize; |
241 | do { | | 241 | do { |
242 | ivlen--; | | 242 | ivlen--; |
243 | nblocks += iv[ivlen]; | | 243 | nblocks += iv[ivlen]; |
244 | iv[ivlen] = (uint8_t) nblocks; | | 244 | iv[ivlen] = (uint8_t) nblocks; |
245 | nblocks >>= 8; | | 245 | nblocks >>= 8; |
246 | } while (ivlen); | | 246 | } while (ivlen); |
247 | break; | | 247 | break; |
248 | | | 248 | |
249 | default: /* should not happen */ | | 249 | default: /* should not happen */ |
250 | return 0; | | 250 | return 0; |
251 | } | | 251 | } |
252 | #endif | | 252 | #endif |
253 | | | 253 | |
254 | return 1; | | 254 | return 1; |
255 | } | | 255 | } |
256 | | | 256 | |
257 | static int ctr_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, | | 257 | static int ctr_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, |
258 | const unsigned char *in, size_t inl) | | 258 | const unsigned char *in, size_t inl) |
259 | { | | 259 | { |
260 | struct cipher_ctx *cipher_ctx = | | 260 | struct cipher_ctx *cipher_ctx = |
261 | (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); | | 261 | (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); |
262 | size_t nblocks, len; | | 262 | size_t nblocks, len; |
263 | | | 263 | |
264 | /* initial partial block */ | | 264 | /* initial partial block */ |
265 | while (cipher_ctx->num && inl) { | | 265 | while (cipher_ctx->num && inl) { |
266 | (*out++) = *(in++) ^ cipher_ctx->partial[cipher_ctx->num]; | | 266 | (*out++) = *(in++) ^ cipher_ctx->partial[cipher_ctx->num]; |
267 | --inl; | | 267 | --inl; |
268 | cipher_ctx->num = (cipher_ctx->num + 1) % cipher_ctx->blocksize; | | 268 | cipher_ctx->num = (cipher_ctx->num + 1) % cipher_ctx->blocksize; |
269 | } | | 269 | } |
270 | | | 270 | |
271 | /* full blocks */ | | 271 | /* full blocks */ |
272 | if (inl > (unsigned int) cipher_ctx->blocksize) { | | 272 | if (inl > (unsigned int) cipher_ctx->blocksize) { |
273 | nblocks = inl/cipher_ctx->blocksize; | | 273 | nblocks = inl/cipher_ctx->blocksize; |
274 | len = nblocks * cipher_ctx->blocksize; | | 274 | len = nblocks * cipher_ctx->blocksize; |
275 | if (cipher_do_cipher(ctx, out, in, len) < 1) | | 275 | if (cipher_do_cipher(ctx, out, in, len) < 1) |
276 | return 0; | | 276 | return 0; |
277 | inl -= len; | | 277 | inl -= len; |
278 | out += len; | | 278 | out += len; |
279 | in += len; | | 279 | in += len; |
280 | } | | 280 | } |
281 | | | 281 | |
282 | /* final partial block */ | | 282 | /* final partial block */ |
283 | if (inl) { | | 283 | if (inl) { |
284 | memset(cipher_ctx->partial, 0, cipher_ctx->blocksize); | | 284 | memset(cipher_ctx->partial, 0, cipher_ctx->blocksize); |
285 | if (cipher_do_cipher(ctx, cipher_ctx->partial, cipher_ctx->partial, | | 285 | if (cipher_do_cipher(ctx, cipher_ctx->partial, cipher_ctx->partial, |
286 | cipher_ctx->blocksize) < 1) | | 286 | cipher_ctx->blocksize) < 1) |
287 | return 0; | | 287 | return 0; |
288 | while (inl--) { | | 288 | while (inl--) { |
289 | out[cipher_ctx->num] = in[cipher_ctx->num] | | 289 | out[cipher_ctx->num] = in[cipher_ctx->num] |
290 | ^ cipher_ctx->partial[cipher_ctx->num]; | | 290 | ^ cipher_ctx->partial[cipher_ctx->num]; |
291 | cipher_ctx->num++; | | 291 | cipher_ctx->num++; |
292 | } | | 292 | } |
293 | } | | 293 | } |
294 | | | 294 | |
295 | return 1; | | 295 | return 1; |
296 | } | | 296 | } |
297 | | | 297 | |
298 | static int cipher_ctrl(EVP_CIPHER_CTX *ctx, int type, int p1, void* p2) | | 298 | static int cipher_ctrl(EVP_CIPHER_CTX *ctx, int type, int p1, void* p2) |
299 | { | | 299 | { |
300 | struct cipher_ctx *cipher_ctx = | | 300 | struct cipher_ctx *cipher_ctx = |
301 | (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); | | 301 | (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); |
302 | EVP_CIPHER_CTX *to_ctx = (EVP_CIPHER_CTX *)p2; | | 302 | EVP_CIPHER_CTX *to_ctx = (EVP_CIPHER_CTX *)p2; |
303 | struct cipher_ctx *to_cipher_ctx; | | 303 | struct cipher_ctx *to_cipher_ctx; |
304 | | | 304 | |
305 | switch (type) { | | 305 | switch (type) { |
306 | case EVP_CTRL_COPY: | | 306 | case EVP_CTRL_COPY: |
307 | if (cipher_ctx == NULL) | | 307 | if (cipher_ctx == NULL) |
308 | return 1; | | 308 | return 1; |
309 | /* when copying the context, a new session needs to be initialized */ | | 309 | /* when copying the context, a new session needs to be initialized */ |
310 | to_cipher_ctx = | | 310 | to_cipher_ctx = |
311 | (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(to_ctx); | | 311 | (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(to_ctx); |
312 | memset(&to_cipher_ctx->sess, 0, sizeof(to_cipher_ctx->sess)); | | 312 | memset(&to_cipher_ctx->sess, 0, sizeof(to_cipher_ctx->sess)); |
313 | return cipher_init(to_ctx, cipher_ctx->sess.key, EVP_CIPHER_CTX_iv(ctx), | | 313 | return cipher_init(to_ctx, cipher_ctx->sess.key, EVP_CIPHER_CTX_iv(ctx), |
314 | (cipher_ctx->op == COP_ENCRYPT)); | | 314 | (cipher_ctx->op == COP_ENCRYPT)); |
315 | | | 315 | |
316 | case EVP_CTRL_INIT: | | 316 | case EVP_CTRL_INIT: |
317 | memset(&cipher_ctx->sess, 0, sizeof(cipher_ctx->sess)); | | 317 | memset(&cipher_ctx->sess, 0, sizeof(cipher_ctx->sess)); |
318 | return 1; | | 318 | return 1; |
319 | | | 319 | |
320 | default: | | 320 | default: |
321 | break; | | 321 | break; |
322 | } | | 322 | } |
323 | | | 323 | |
324 | return -1; | | 324 | return -1; |
325 | } | | 325 | } |
326 | | | 326 | |
327 | static int cipher_cleanup(EVP_CIPHER_CTX *ctx) | | 327 | static int cipher_cleanup(EVP_CIPHER_CTX *ctx) |
328 | { | | 328 | { |
329 | struct cipher_ctx *cipher_ctx = | | 329 | struct cipher_ctx *cipher_ctx = |
330 | (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); | | 330 | (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); |
331 | | | 331 | |
332 | return clean_devcrypto_session(&cipher_ctx->sess); | | 332 | return clean_devcrypto_session(&cipher_ctx->sess); |
333 | } | | 333 | } |
334 | | | 334 | |
335 | /* | | 335 | /* |
336 | * Keep a table of known nids and associated methods. | | 336 | * Keep a table of known nids and associated methods. |
337 | * Note that known_cipher_nids[] isn't necessarily indexed the same way as | | 337 | * Note that known_cipher_nids[] isn't necessarily indexed the same way as |
338 | * cipher_data[] above, which known_cipher_methods[] is. | | 338 | * cipher_data[] above, which known_cipher_methods[] is. |
339 | */ | | 339 | */ |
340 | static int known_cipher_nids[OSSL_NELEM(cipher_data)]; | | 340 | static int known_cipher_nids[OSSL_NELEM(cipher_data)]; |
341 | static int known_cipher_nids_amount = -1; /* -1 indicates not yet initialised */ | | 341 | static int known_cipher_nids_amount = -1; /* -1 indicates not yet initialised */ |
342 | static EVP_CIPHER *known_cipher_methods[OSSL_NELEM(cipher_data)] = { NULL, }; | | 342 | static EVP_CIPHER *known_cipher_methods[OSSL_NELEM(cipher_data)] = { NULL, }; |
343 | | | 343 | |
344 | static void prepare_cipher_methods(void) | | 344 | static void prepare_cipher_methods(void) |
345 | { | | 345 | { |
346 | size_t i; | | 346 | size_t i; |
347 | struct session_op sess; | | 347 | struct session_op sess; |
348 | unsigned long cipher_mode; | | 348 | unsigned long cipher_mode; |
349 | | | 349 | |
350 | memset(&sess, 0, sizeof(sess)); | | 350 | memset(&sess, 0, sizeof(sess)); |
351 | sess.key = (void *)"01234567890123456789012345678901234567890123456789"; | | 351 | sess.key = (void *)"01234567890123456789012345678901234567890123456789"; |
352 | | | 352 | |
353 | for (i = 0, known_cipher_nids_amount = 0; | | 353 | for (i = 0, known_cipher_nids_amount = 0; |
354 | i < OSSL_NELEM(cipher_data); i++) { | | 354 | i < OSSL_NELEM(cipher_data); i++) { |
355 | | | 355 | |
356 | /* | | 356 | /* |
357 | * Check that the algo is really availably by trying to open and close | | 357 | * Check that the algo is really availably by trying to open and close |
358 | * a session. | | 358 | * a session. |
359 | */ | | 359 | */ |
360 | sess.cipher = cipher_data[i].devcryptoid; | | 360 | sess.cipher = cipher_data[i].devcryptoid; |
361 | sess.keylen = cipher_data[i].keylen; | | 361 | sess.keylen = cipher_data[i].keylen; |
362 | if (ioctl(cfd, CIOCGSESSION, &sess) < 0 | | 362 | if (ioctl(cfd, CIOCGSESSION, &sess) < 0 |
363 | || ioctl(cfd, CIOCFSESSION, &sess.ses) < 0) | | 363 | || ioctl(cfd, CIOCFSESSION, &sess.ses) < 0) |
364 | continue; | | 364 | continue; |
365 | | | 365 | |
366 | cipher_mode = cipher_data[i].flags & EVP_CIPH_MODE; | | 366 | cipher_mode = cipher_data[i].flags & EVP_CIPH_MODE; |
367 | | | 367 | |
368 | if ((known_cipher_methods[i] = | | 368 | if ((known_cipher_methods[i] = |
369 | EVP_CIPHER_meth_new(cipher_data[i].nid, | | 369 | EVP_CIPHER_meth_new(cipher_data[i].nid, |
370 | cipher_mode == EVP_CIPH_CTR_MODE ? 1 : | | 370 | cipher_mode == EVP_CIPH_CTR_MODE ? 1 : |
371 | cipher_data[i].blocksize, | | 371 | cipher_data[i].blocksize, |
372 | cipher_data[i].keylen)) == NULL | | 372 | cipher_data[i].keylen)) == NULL |
373 | || !EVP_CIPHER_meth_set_iv_length(known_cipher_methods[i], | | 373 | || !EVP_CIPHER_meth_set_iv_length(known_cipher_methods[i], |
374 | cipher_data[i].ivlen) | | 374 | cipher_data[i].ivlen) |
375 | || !EVP_CIPHER_meth_set_flags(known_cipher_methods[i], | | 375 | || !EVP_CIPHER_meth_set_flags(known_cipher_methods[i], |
376 | cipher_data[i].flags | | 376 | cipher_data[i].flags |
377 | | EVP_CIPH_CUSTOM_COPY | | 377 | | EVP_CIPH_CUSTOM_COPY |
378 | | EVP_CIPH_CTRL_INIT | | 378 | | EVP_CIPH_CTRL_INIT |
379 | | EVP_CIPH_FLAG_DEFAULT_ASN1) | | 379 | | EVP_CIPH_FLAG_DEFAULT_ASN1) |
380 | || !EVP_CIPHER_meth_set_init(known_cipher_methods[i], cipher_init) | | 380 | || !EVP_CIPHER_meth_set_init(known_cipher_methods[i], cipher_init) |
381 | || !EVP_CIPHER_meth_set_do_cipher(known_cipher_methods[i], | | 381 | || !EVP_CIPHER_meth_set_do_cipher(known_cipher_methods[i], |
382 | cipher_mode == EVP_CIPH_CTR_MODE ? | | 382 | cipher_mode == EVP_CIPH_CTR_MODE ? |
383 | ctr_do_cipher : | | 383 | ctr_do_cipher : |
384 | cipher_do_cipher) | | 384 | cipher_do_cipher) |
385 | || !EVP_CIPHER_meth_set_ctrl(known_cipher_methods[i], cipher_ctrl) | | 385 | || !EVP_CIPHER_meth_set_ctrl(known_cipher_methods[i], cipher_ctrl) |
386 | || !EVP_CIPHER_meth_set_cleanup(known_cipher_methods[i], | | 386 | || !EVP_CIPHER_meth_set_cleanup(known_cipher_methods[i], |
387 | cipher_cleanup) | | 387 | cipher_cleanup) |
388 | || !EVP_CIPHER_meth_set_impl_ctx_size(known_cipher_methods[i], | | 388 | || !EVP_CIPHER_meth_set_impl_ctx_size(known_cipher_methods[i], |
389 | sizeof(struct cipher_ctx))) { | | 389 | sizeof(struct cipher_ctx))) { |
390 | EVP_CIPHER_meth_free(known_cipher_methods[i]); | | 390 | EVP_CIPHER_meth_free(known_cipher_methods[i]); |
391 | known_cipher_methods[i] = NULL; | | 391 | known_cipher_methods[i] = NULL; |
392 | } else { | | 392 | } else { |
393 | known_cipher_nids[known_cipher_nids_amount++] = | | 393 | known_cipher_nids[known_cipher_nids_amount++] = |
394 | cipher_data[i].nid; | | 394 | cipher_data[i].nid; |
395 | } | | 395 | } |
396 | } | | 396 | } |
397 | } | | 397 | } |
398 | | | 398 | |
399 | static const EVP_CIPHER *get_cipher_method(int nid) | | 399 | static const EVP_CIPHER *get_cipher_method(int nid) |
400 | { | | 400 | { |
401 | size_t i = get_cipher_data_index(nid); | | 401 | size_t i = get_cipher_data_index(nid); |
402 | | | 402 | |
403 | if (i == (size_t)-1) | | 403 | if (i == (size_t)-1) |
404 | return NULL; | | 404 | return NULL; |
405 | return known_cipher_methods[i]; | | 405 | return known_cipher_methods[i]; |
406 | } | | 406 | } |
407 | | | 407 | |
408 | static int get_cipher_nids(const int **nids) | | 408 | static int get_cipher_nids(const int **nids) |
409 | { | | 409 | { |
410 | *nids = known_cipher_nids; | | 410 | *nids = known_cipher_nids; |
411 | return known_cipher_nids_amount; | | 411 | return known_cipher_nids_amount; |
412 | } | | 412 | } |
413 | | | 413 | |
414 | static void destroy_cipher_method(int nid) | | 414 | static void destroy_cipher_method(int nid) |
415 | { | | 415 | { |
416 | size_t i = get_cipher_data_index(nid); | | 416 | size_t i = get_cipher_data_index(nid); |
417 | | | 417 | |
418 | EVP_CIPHER_meth_free(known_cipher_methods[i]); | | 418 | EVP_CIPHER_meth_free(known_cipher_methods[i]); |
419 | known_cipher_methods[i] = NULL; | | 419 | known_cipher_methods[i] = NULL; |
420 | } | | 420 | } |
421 | | | 421 | |
422 | static void destroy_all_cipher_methods(void) | | 422 | static void destroy_all_cipher_methods(void) |
423 | { | | 423 | { |
424 | size_t i; | | 424 | size_t i; |
425 | | | 425 | |
426 | for (i = 0; i < OSSL_NELEM(cipher_data); i++) | | 426 | for (i = 0; i < OSSL_NELEM(cipher_data); i++) |
427 | destroy_cipher_method(cipher_data[i].nid); | | 427 | destroy_cipher_method(cipher_data[i].nid); |
428 | } | | 428 | } |
429 | | | 429 | |
430 | static int devcrypto_ciphers(ENGINE *e, const EVP_CIPHER **cipher, | | 430 | static int devcrypto_ciphers(ENGINE *e, const EVP_CIPHER **cipher, |
431 | const int **nids, int nid) | | 431 | const int **nids, int nid) |
432 | { | | 432 | { |
433 | if (cipher == NULL) | | 433 | if (cipher == NULL) |
434 | return get_cipher_nids(nids); | | 434 | return get_cipher_nids(nids); |
435 | | | 435 | |
436 | *cipher = get_cipher_method(nid); | | 436 | *cipher = get_cipher_method(nid); |
437 | | | 437 | |
438 | return *cipher != NULL; | | 438 | return *cipher != NULL; |
439 | } | | 439 | } |
440 | | | 440 | |
441 | /* | | 441 | /* |
442 | * We only support digests if the cryptodev implementation supports multiple | | 442 | * We only support digests if the cryptodev implementation supports multiple |
443 | * data updates and session copying. Otherwise, we would be forced to maintain | | 443 | * data updates and session copying. Otherwise, we would be forced to maintain |
444 | * a cache, which is perilous if there's a lot of data coming in (if someone | | 444 | * a cache, which is perilous if there's a lot of data coming in (if someone |
445 | * wants to checksum an OpenSSL tarball, for example). | | 445 | * wants to checksum an OpenSSL tarball, for example). |
446 | */ | | 446 | */ |
447 | #if defined(CIOCCPHASH) && defined(COP_FLAG_UPDATE) && defined(COP_FLAG_FINAL) | | 447 | #if defined(CIOCCPHASH) && defined(COP_FLAG_UPDATE) && defined(COP_FLAG_FINAL) |
448 | #define IMPLEMENT_DIGEST | | 448 | #define IMPLEMENT_DIGEST |
449 | | | 449 | |
450 | /****************************************************************************** | | 450 | /****************************************************************************** |
451 | * | | 451 | * |
452 | * Digests | | 452 | * Digests |
453 | * | | 453 | * |
454 | * Because they all do the same basic operation, we have only one set of | | 454 | * Because they all do the same basic operation, we have only one set of |
455 | * method functions for them all to share, and a mapping table between | | 455 | * method functions for them all to share, and a mapping table between |
456 | * NIDs and cryptodev IDs, with all the necessary size data. | | 456 | * NIDs and cryptodev IDs, with all the necessary size data. |
457 | * | | 457 | * |
458 | *****/ | | 458 | *****/ |
459 | | | 459 | |
460 | struct digest_ctx { | | 460 | struct digest_ctx { |
461 | struct session_op sess; | | 461 | struct session_op sess; |
462 | /* This signals that the init function was called, not that it succeeded. */ | | 462 | /* This signals that the init function was called, not that it succeeded. */ |
463 | int init_called; | | 463 | int init_called; |
464 | }; | | 464 | }; |
465 | | | 465 | |
466 | static const struct digest_data_st { | | 466 | static const struct digest_data_st { |
467 | int nid; | | 467 | int nid; |
468 | int blocksize; | | 468 | int blocksize; |
469 | int digestlen; | | 469 | int digestlen; |
470 | int devcryptoid; | | 470 | int devcryptoid; |
471 | } digest_data[] = { | | 471 | } digest_data[] = { |
472 | #ifndef OPENSSL_NO_MD5 | | 472 | #ifndef OPENSSL_NO_MD5 |
473 | { NID_md5, /* MD5_CBLOCK */ 64, 16, CRYPTO_MD5 }, | | 473 | { NID_md5, /* MD5_CBLOCK */ 64, 16, CRYPTO_MD5 }, |
474 | #endif | | 474 | #endif |
475 | { NID_sha1, SHA_CBLOCK, 20, CRYPTO_SHA1 }, | | 475 | { NID_sha1, SHA_CBLOCK, 20, CRYPTO_SHA1 }, |
476 | #ifndef OPENSSL_NO_RMD160 | | 476 | #ifndef OPENSSL_NO_RMD160 |
477 | # if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_RIPEMD160) | | 477 | # if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_RIPEMD160) |
478 | { NID_ripemd160, /* RIPEMD160_CBLOCK */ 64, 20, CRYPTO_RIPEMD160 }, | | 478 | { NID_ripemd160, /* RIPEMD160_CBLOCK */ 64, 20, CRYPTO_RIPEMD160 }, |
479 | # endif | | 479 | # endif |
480 | #endif | | 480 | #endif |
481 | #if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_SHA2_224) | | 481 | #if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_SHA2_224) |
482 | { NID_sha224, SHA256_CBLOCK, 224 / 8, CRYPTO_SHA2_224 }, | | 482 | { NID_sha224, SHA256_CBLOCK, 224 / 8, CRYPTO_SHA2_224 }, |
483 | #endif | | 483 | #endif |
484 | #if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_SHA2_256) | | 484 | #if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_SHA2_256) |
485 | { NID_sha256, SHA256_CBLOCK, 256 / 8, CRYPTO_SHA2_256 }, | | 485 | { NID_sha256, SHA256_CBLOCK, 256 / 8, CRYPTO_SHA2_256 }, |
486 | #endif | | 486 | #endif |
487 | #if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_SHA2_384) | | 487 | #if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_SHA2_384) |
488 | { NID_sha384, SHA512_CBLOCK, 384 / 8, CRYPTO_SHA2_384 }, | | 488 | { NID_sha384, SHA512_CBLOCK, 384 / 8, CRYPTO_SHA2_384 }, |
489 | #endif | | 489 | #endif |
490 | #if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_SHA2_512) | | 490 | #if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_SHA2_512) |
491 | { NID_sha512, SHA512_CBLOCK, 512 / 8, CRYPTO_SHA2_512 }, | | 491 | { NID_sha512, SHA512_CBLOCK, 512 / 8, CRYPTO_SHA2_512 }, |
492 | #endif | | 492 | #endif |
493 | }; | | 493 | }; |
494 | | | 494 | |
495 | static size_t get_digest_data_index(int nid) | | 495 | static size_t get_digest_data_index(int nid) |
496 | { | | 496 | { |
497 | size_t i; | | 497 | size_t i; |
498 | | | 498 | |
499 | for (i = 0; i < OSSL_NELEM(digest_data); i++) | | 499 | for (i = 0; i < OSSL_NELEM(digest_data); i++) |
500 | if (nid == digest_data[i].nid) | | 500 | if (nid == digest_data[i].nid) |
501 | return i; | | 501 | return i; |
502 | | | 502 | |
503 | /* | | 503 | /* |
504 | * Code further down must make sure that only NIDs in the table above | | 504 | * Code further down must make sure that only NIDs in the table above |
505 | * are used. If any other NID reaches this function, there's a grave | | 505 | * are used. If any other NID reaches this function, there's a grave |
506 | * coding error further down. | | 506 | * coding error further down. |
507 | */ | | 507 | */ |
508 | assert("Code that never should be reached" == NULL); | | 508 | assert("Code that never should be reached" == NULL); |
509 | return -1; | | 509 | return -1; |
510 | } | | 510 | } |
511 | | | 511 | |
512 | static const struct digest_data_st *get_digest_data(int nid) | | 512 | static const struct digest_data_st *get_digest_data(int nid) |
513 | { | | 513 | { |
514 | return &digest_data[get_digest_data_index(nid)]; | | 514 | return &digest_data[get_digest_data_index(nid)]; |
515 | } | | 515 | } |
516 | | | 516 | |
517 | /* | | 517 | /* |
518 | * Following are the four necessary functions to map OpenSSL functionality | | 518 | * Following are the four necessary functions to map OpenSSL functionality |
519 | * with cryptodev. | | 519 | * with cryptodev. |
520 | */ | | 520 | */ |
521 | | | 521 | |
522 | static int digest_init(EVP_MD_CTX *ctx) | | 522 | static int digest_init(EVP_MD_CTX *ctx) |
523 | { | | 523 | { |
524 | struct digest_ctx *digest_ctx = | | 524 | struct digest_ctx *digest_ctx = |
525 | (struct digest_ctx *)EVP_MD_CTX_md_data(ctx); | | 525 | (struct digest_ctx *)EVP_MD_CTX_md_data(ctx); |
526 | const struct digest_data_st *digest_d = | | 526 | const struct digest_data_st *digest_d = |
527 | get_digest_data(EVP_MD_CTX_type(ctx)); | | 527 | get_digest_data(EVP_MD_CTX_type(ctx)); |
528 | | | 528 | |
529 | digest_ctx->init_called = 1; | | 529 | digest_ctx->init_called = 1; |
530 | | | 530 | |
531 | memset(&digest_ctx->sess, 0, sizeof(digest_ctx->sess)); | | 531 | memset(&digest_ctx->sess, 0, sizeof(digest_ctx->sess)); |
532 | digest_ctx->sess.mac = digest_d->devcryptoid; | | 532 | digest_ctx->sess.mac = digest_d->devcryptoid; |
533 | if (ioctl(cfd, CIOCGSESSION, &digest_ctx->sess) < 0) { | | 533 | if (ioctl(cfd, CIOCGSESSION, &digest_ctx->sess) < 0) { |
534 | SYSerr(SYS_F_IOCTL, errno); | | 534 | SYSerr(SYS_F_IOCTL, errno); |
535 | return 0; | | 535 | return 0; |
536 | } | | 536 | } |
537 | | | 537 | |
538 | return 1; | | 538 | return 1; |
539 | } | | 539 | } |
540 | | | 540 | |
541 | static int digest_op(struct digest_ctx *ctx, const void *src, size_t srclen, | | 541 | static int digest_op(struct digest_ctx *ctx, const void *src, size_t srclen, |
542 | void *res, unsigned int flags) | | 542 | void *res, unsigned int flags) |
543 | { | | 543 | { |
544 | struct crypt_op cryp; | | 544 | struct crypt_op cryp; |
545 | | | 545 | |
546 | memset(&cryp, 0, sizeof(cryp)); | | 546 | memset(&cryp, 0, sizeof(cryp)); |
547 | cryp.ses = ctx->sess.ses; | | 547 | cryp.ses = ctx->sess.ses; |
548 | cryp.len = srclen; | | 548 | cryp.len = srclen; |
549 | cryp.src = (void *)src; | | 549 | cryp.src = (void *)src; |
550 | cryp.dst = NULL; | | 550 | cryp.dst = NULL; |
551 | cryp.mac = res; | | 551 | cryp.mac = res; |
552 | cryp.flags = flags; | | 552 | cryp.flags = flags; |
553 | return ioctl(cfd, CIOCCRYPT, &cryp); | | 553 | return ioctl(cfd, CIOCCRYPT, &cryp); |
554 | } | | 554 | } |
555 | | | 555 | |
556 | static int digest_update(EVP_MD_CTX *ctx, const void *data, size_t count) | | 556 | static int digest_update(EVP_MD_CTX *ctx, const void *data, size_t count) |
557 | { | | 557 | { |
558 | struct digest_ctx *digest_ctx = | | 558 | struct digest_ctx *digest_ctx = |
559 | (struct digest_ctx *)EVP_MD_CTX_md_data(ctx); | | 559 | (struct digest_ctx *)EVP_MD_CTX_md_data(ctx); |
560 | | | 560 | |
561 | if (count == 0) | | 561 | if (count == 0) |
562 | return 1; | | 562 | return 1; |
563 | | | 563 | |
564 | if (digest_ctx == NULL) | | 564 | if (digest_ctx == NULL) |
565 | return 0; | | 565 | return 0; |
566 | | | 566 | |
567 | if (digest_op(digest_ctx, data, count, NULL, COP_FLAG_UPDATE) < 0) { | | 567 | if (digest_op(digest_ctx, data, count, NULL, COP_FLAG_UPDATE) < 0) { |
568 | SYSerr(SYS_F_IOCTL, errno); | | 568 | SYSerr(SYS_F_IOCTL, errno); |
569 | return 0; | | 569 | return 0; |
570 | } | | 570 | } |
571 | | | 571 | |
572 | return 1; | | 572 | return 1; |
573 | } | | 573 | } |
574 | | | 574 | |
575 | static int digest_final(EVP_MD_CTX *ctx, unsigned char *md) | | 575 | static int digest_final(EVP_MD_CTX *ctx, unsigned char *md) |
576 | { | | 576 | { |
577 | struct digest_ctx *digest_ctx = | | 577 | struct digest_ctx *digest_ctx = |
578 | (struct digest_ctx *)EVP_MD_CTX_md_data(ctx); | | 578 | (struct digest_ctx *)EVP_MD_CTX_md_data(ctx); |
579 | | | 579 | |
580 | if (md == NULL || digest_ctx == NULL) | | 580 | if (md == NULL || digest_ctx == NULL) |
581 | return 0; | | 581 | return 0; |
582 | if (digest_op(digest_ctx, NULL, 0, md, COP_FLAG_FINAL) < 0) { | | 582 | if (digest_op(digest_ctx, NULL, 0, md, COP_FLAG_FINAL) < 0) { |
583 | SYSerr(SYS_F_IOCTL, errno); | | 583 | SYSerr(SYS_F_IOCTL, errno); |
584 | return 0; | | 584 | return 0; |
585 | } | | 585 | } |
586 | | | 586 | |
587 | return 1; | | 587 | return 1; |
588 | } | | 588 | } |
589 | | | 589 | |
590 | static int digest_copy(EVP_MD_CTX *to, const EVP_MD_CTX *from) | | 590 | static int digest_copy(EVP_MD_CTX *to, const EVP_MD_CTX *from) |
591 | { | | 591 | { |
592 | struct digest_ctx *digest_from = | | 592 | struct digest_ctx *digest_from = |
593 | (struct digest_ctx *)EVP_MD_CTX_md_data(from); | | 593 | (struct digest_ctx *)EVP_MD_CTX_md_data(from); |
594 | struct digest_ctx *digest_to = | | 594 | struct digest_ctx *digest_to = |
595 | (struct digest_ctx *)EVP_MD_CTX_md_data(to); | | 595 | (struct digest_ctx *)EVP_MD_CTX_md_data(to); |
596 | struct cphash_op cphash; | | 596 | struct cphash_op cphash; |
597 | | | 597 | |
598 | if (digest_from == NULL || digest_from->init_called != 1) | | 598 | if (digest_from == NULL || digest_from->init_called != 1) |
599 | return 1; | | 599 | return 1; |
600 | | | 600 | |
601 | if (!digest_init(to)) { | | 601 | if (!digest_init(to)) { |
602 | SYSerr(SYS_F_IOCTL, errno); | | 602 | SYSerr(SYS_F_IOCTL, errno); |
603 | return 0; | | 603 | return 0; |
604 | } | | 604 | } |
605 | | | 605 | |
606 | cphash.src_ses = digest_from->sess.ses; | | 606 | cphash.src_ses = digest_from->sess.ses; |
607 | cphash.dst_ses = digest_to->sess.ses; | | 607 | cphash.dst_ses = digest_to->sess.ses; |
608 | if (ioctl(cfd, CIOCCPHASH, &cphash) < 0) { | | 608 | if (ioctl(cfd, CIOCCPHASH, &cphash) < 0) { |
609 | SYSerr(SYS_F_IOCTL, errno); | | 609 | SYSerr(SYS_F_IOCTL, errno); |
610 | return 0; | | 610 | return 0; |
611 | } | | 611 | } |
612 | return 1; | | 612 | return 1; |
613 | } | | 613 | } |
614 | | | 614 | |
615 | static int digest_cleanup(EVP_MD_CTX *ctx) | | 615 | static int digest_cleanup(EVP_MD_CTX *ctx) |
616 | { | | 616 | { |
617 | struct digest_ctx *digest_ctx = | | 617 | struct digest_ctx *digest_ctx = |
618 | (struct digest_ctx *)EVP_MD_CTX_md_data(ctx); | | 618 | (struct digest_ctx *)EVP_MD_CTX_md_data(ctx); |
619 | | | 619 | |
620 | if (digest_ctx == NULL) | | 620 | if (digest_ctx == NULL) |
621 | return 1; | | 621 | return 1; |
622 | | | 622 | |
623 | return clean_devcrypto_session(&digest_ctx->sess); | | 623 | return clean_devcrypto_session(&digest_ctx->sess); |
624 | } | | 624 | } |
625 | | | 625 | |
626 | static int devcrypto_test_digest(size_t digest_data_index) | | 626 | static int devcrypto_test_digest(size_t digest_data_index) |
627 | { | | 627 | { |
628 | struct session_op sess1, sess2; | | 628 | struct session_op sess1, sess2; |
629 | struct cphash_op cphash; | | 629 | struct cphash_op cphash; |
630 | int ret=0; | | 630 | int ret=0; |
631 | | | 631 | |
632 | memset(&sess1, 0, sizeof(sess1)); | | 632 | memset(&sess1, 0, sizeof(sess1)); |
633 | memset(&sess2, 0, sizeof(sess2)); | | 633 | memset(&sess2, 0, sizeof(sess2)); |
634 | sess1.mac = digest_data[digest_data_index].devcryptoid; | | 634 | sess1.mac = digest_data[digest_data_index].devcryptoid; |
635 | if (ioctl(cfd, CIOCGSESSION, &sess1) < 0) | | 635 | if (ioctl(cfd, CIOCGSESSION, &sess1) < 0) |
636 | return 0; | | 636 | return 0; |
637 | /* Make sure the driver is capable of hash state copy */ | | 637 | /* Make sure the driver is capable of hash state copy */ |
638 | sess2.mac = sess1.mac; | | 638 | sess2.mac = sess1.mac; |
639 | if (ioctl(cfd, CIOCGSESSION, &sess2) >= 0) { | | 639 | if (ioctl(cfd, CIOCGSESSION, &sess2) >= 0) { |
640 | cphash.src_ses = sess1.ses; | | 640 | cphash.src_ses = sess1.ses; |
641 | cphash.dst_ses = sess2.ses; | | 641 | cphash.dst_ses = sess2.ses; |
642 | if (ioctl(cfd, CIOCCPHASH, &cphash) >= 0) | | 642 | if (ioctl(cfd, CIOCCPHASH, &cphash) >= 0) |
643 | ret = 1; | | 643 | ret = 1; |
644 | ioctl(cfd, CIOCFSESSION, &sess2.ses); | | 644 | ioctl(cfd, CIOCFSESSION, &sess2.ses); |
645 | } | | 645 | } |
646 | ioctl(cfd, CIOCFSESSION, &sess1.ses); | | 646 | ioctl(cfd, CIOCFSESSION, &sess1.ses); |
647 | return ret; | | 647 | return ret; |
648 | } | | 648 | } |
649 | | | 649 | |
650 | /* | | 650 | /* |
651 | * Keep a table of known nids and associated methods. | | 651 | * Keep a table of known nids and associated methods. |
652 | * Note that known_digest_nids[] isn't necessarily indexed the same way as | | 652 | * Note that known_digest_nids[] isn't necessarily indexed the same way as |
653 | * digest_data[] above, which known_digest_methods[] is. | | 653 | * digest_data[] above, which known_digest_methods[] is. |
654 | */ | | 654 | */ |
655 | static int known_digest_nids[OSSL_NELEM(digest_data)]; | | 655 | static int known_digest_nids[OSSL_NELEM(digest_data)]; |
656 | static int known_digest_nids_amount = -1; /* -1 indicates not yet initialised */ | | 656 | static int known_digest_nids_amount = -1; /* -1 indicates not yet initialised */ |
657 | static EVP_MD *known_digest_methods[OSSL_NELEM(digest_data)] = { NULL, }; | | 657 | static EVP_MD *known_digest_methods[OSSL_NELEM(digest_data)] = { NULL, }; |
658 | | | 658 | |
659 | static void prepare_digest_methods(void) | | 659 | static void prepare_digest_methods(void) |
660 | { | | 660 | { |
661 | size_t i; | | 661 | size_t i; |
662 | | | 662 | |
663 | for (i = 0, known_digest_nids_amount = 0; i < OSSL_NELEM(digest_data); | | 663 | for (i = 0, known_digest_nids_amount = 0; i < OSSL_NELEM(digest_data); |
664 | i++) { | | 664 | i++) { |
665 | | | 665 | |
666 | /* | | 666 | /* |
667 | * Check that the algo is usable | | 667 | * Check that the algo is usable |
668 | */ | | 668 | */ |
669 | if (!devcrypto_test_digest(i)) | | 669 | if (!devcrypto_test_digest(i)) |
670 | continue; | | 670 | continue; |
671 | | | 671 | |
672 | if ((known_digest_methods[i] = EVP_MD_meth_new(digest_data[i].nid, | | 672 | if ((known_digest_methods[i] = EVP_MD_meth_new(digest_data[i].nid, |
673 | NID_undef)) == NULL | | 673 | NID_undef)) == NULL |
674 | || !EVP_MD_meth_set_input_blocksize(known_digest_methods[i], | | 674 | || !EVP_MD_meth_set_input_blocksize(known_digest_methods[i], |
675 | digest_data[i].blocksize) | | 675 | digest_data[i].blocksize) |
676 | || !EVP_MD_meth_set_result_size(known_digest_methods[i], | | 676 | || !EVP_MD_meth_set_result_size(known_digest_methods[i], |
677 | digest_data[i].digestlen) | | 677 | digest_data[i].digestlen) |
678 | || !EVP_MD_meth_set_init(known_digest_methods[i], digest_init) | | 678 | || !EVP_MD_meth_set_init(known_digest_methods[i], digest_init) |
679 | || !EVP_MD_meth_set_update(known_digest_methods[i], digest_update) | | 679 | || !EVP_MD_meth_set_update(known_digest_methods[i], digest_update) |
680 | || !EVP_MD_meth_set_final(known_digest_methods[i], digest_final) | | 680 | || !EVP_MD_meth_set_final(known_digest_methods[i], digest_final) |
681 | || !EVP_MD_meth_set_copy(known_digest_methods[i], digest_copy) | | 681 | || !EVP_MD_meth_set_copy(known_digest_methods[i], digest_copy) |
682 | || !EVP_MD_meth_set_cleanup(known_digest_methods[i], digest_cleanup) | | 682 | || !EVP_MD_meth_set_cleanup(known_digest_methods[i], digest_cleanup) |
683 | || !EVP_MD_meth_set_app_datasize(known_digest_methods[i], | | 683 | || !EVP_MD_meth_set_app_datasize(known_digest_methods[i], |
684 | sizeof(struct digest_ctx))) { | | 684 | sizeof(struct digest_ctx))) { |
685 | EVP_MD_meth_free(known_digest_methods[i]); | | 685 | EVP_MD_meth_free(known_digest_methods[i]); |
686 | known_digest_methods[i] = NULL; | | 686 | known_digest_methods[i] = NULL; |
687 | } else { | | 687 | } else { |
688 | known_digest_nids[known_digest_nids_amount++] = digest_data[i].nid; | | 688 | known_digest_nids[known_digest_nids_amount++] = digest_data[i].nid; |
689 | } | | 689 | } |
690 | } | | 690 | } |
691 | } | | 691 | } |
692 | | | 692 | |
693 | static const EVP_MD *get_digest_method(int nid) | | 693 | static const EVP_MD *get_digest_method(int nid) |
694 | { | | 694 | { |
695 | size_t i = get_digest_data_index(nid); | | 695 | size_t i = get_digest_data_index(nid); |
696 | | | 696 | |
697 | if (i == (size_t)-1) | | 697 | if (i == (size_t)-1) |
698 | return NULL; | | 698 | return NULL; |
699 | return known_digest_methods[i]; | | 699 | return known_digest_methods[i]; |
700 | } | | 700 | } |
701 | | | 701 | |
702 | static int get_digest_nids(const int **nids) | | 702 | static int get_digest_nids(const int **nids) |
703 | { | | 703 | { |
704 | *nids = known_digest_nids; | | 704 | *nids = known_digest_nids; |
705 | return known_digest_nids_amount; | | 705 | return known_digest_nids_amount; |
706 | } | | 706 | } |
707 | | | 707 | |
708 | static void destroy_digest_method(int nid) | | 708 | static void destroy_digest_method(int nid) |
709 | { | | 709 | { |
710 | size_t i = get_digest_data_index(nid); | | 710 | size_t i = get_digest_data_index(nid); |
711 | | | 711 | |
712 | EVP_MD_meth_free(known_digest_methods[i]); | | 712 | EVP_MD_meth_free(known_digest_methods[i]); |
713 | known_digest_methods[i] = NULL; | | 713 | known_digest_methods[i] = NULL; |
714 | } | | 714 | } |
715 | | | 715 | |
716 | static void destroy_all_digest_methods(void) | | 716 | static void destroy_all_digest_methods(void) |
717 | { | | 717 | { |
718 | size_t i; | | 718 | size_t i; |
719 | | | 719 | |
720 | for (i = 0; i < OSSL_NELEM(digest_data); i++) | | 720 | for (i = 0; i < OSSL_NELEM(digest_data); i++) |
721 | destroy_digest_method(digest_data[i].nid); | | 721 | destroy_digest_method(digest_data[i].nid); |
722 | } | | 722 | } |
723 | | | 723 | |
724 | static int devcrypto_digests(ENGINE *e, const EVP_MD **digest, | | 724 | static int devcrypto_digests(ENGINE *e, const EVP_MD **digest, |
725 | const int **nids, int nid) | | 725 | const int **nids, int nid) |
726 | { | | 726 | { |
727 | if (digest == NULL) | | 727 | if (digest == NULL) |
728 | return get_digest_nids(nids); | | 728 | return get_digest_nids(nids); |
729 | | | 729 | |
730 | *digest = get_digest_method(nid); | | 730 | *digest = get_digest_method(nid); |
731 | | | 731 | |
732 | return *digest != NULL; | | 732 | return *digest != NULL; |
733 | } | | 733 | } |
734 | | | 734 | |
735 | #endif | | 735 | #endif |
736 | | | 736 | |
737 | /****************************************************************************** | | 737 | /****************************************************************************** |
738 | * | | 738 | * |
739 | * LOAD / UNLOAD | | 739 | * LOAD / UNLOAD |
740 | * | | 740 | * |
741 | *****/ | | 741 | *****/ |
742 | | | 742 | |
743 | static int devcrypto_unload(ENGINE *e) | | 743 | static int devcrypto_unload(ENGINE *e) |
744 | { | | 744 | { |
745 | destroy_all_cipher_methods(); | | 745 | destroy_all_cipher_methods(); |
746 | #ifdef IMPLEMENT_DIGEST | | 746 | #ifdef IMPLEMENT_DIGEST |
747 | destroy_all_digest_methods(); | | 747 | destroy_all_digest_methods(); |
748 | #endif | | 748 | #endif |
749 | | | 749 | |
750 | close(cfd); | | 750 | close(cfd); |
751 | | | 751 | |
752 | return 1; | | 752 | return 1; |
753 | } | | 753 | } |
754 | /* | | 754 | /* |
755 | * This engine is always built into libcrypto, so it doesn't offer any | | 755 | * This engine is always built into libcrypto, so it doesn't offer any |
756 | * ability to be dynamically loadable. | | 756 | * ability to be dynamically loadable. |
757 | */ | | 757 | */ |
758 | void engine_load_devcrypto_int() | | 758 | void engine_load_devcrypto_int() |
759 | { | | 759 | { |
760 | ENGINE *e = NULL; | | 760 | ENGINE *e = NULL; |
761 | | | 761 | |
762 | if ((cfd = open("/dev/crypto", O_RDWR, 0)) < 0) { | | 762 | if ((cfd = open("/dev/crypto", O_RDWR, 0)) < 0) { |
763 | #ifndef ENGINE_DEVCRYPTO_DEBUG | | 763 | #ifndef ENGINE_DEVCRYPTO_DEBUG |
764 | if (errno != ENOENT) | | 764 | if (errno != ENOENT && errno != ENXIO) |
765 | #endif | | 765 | #endif |
766 | fprintf(stderr, "Could not open /dev/crypto: %s\n", strerror(errno)); | | 766 | fprintf(stderr, "Could not open /dev/crypto: %s\n", strerror(errno)); |
767 | return; | | 767 | return; |
768 | } | | 768 | } |
769 | | | 769 | |
770 | if ((e = ENGINE_new()) == NULL | | 770 | if ((e = ENGINE_new()) == NULL |
771 | || !ENGINE_set_destroy_function(e, devcrypto_unload)) { | | 771 | || !ENGINE_set_destroy_function(e, devcrypto_unload)) { |
772 | ENGINE_free(e); | | 772 | ENGINE_free(e); |
773 | /* | | 773 | /* |
774 | * We know that devcrypto_unload() won't be called when one of the | | 774 | * We know that devcrypto_unload() won't be called when one of the |
775 | * above two calls have failed, so we close cfd explicitly here to | | 775 | * above two calls have failed, so we close cfd explicitly here to |
776 | * avoid leaking resources. | | 776 | * avoid leaking resources. |
777 | */ | | 777 | */ |
778 | close(cfd); | | 778 | close(cfd); |
779 | return; | | 779 | return; |
780 | } | | 780 | } |
781 | | | 781 | |
782 | prepare_cipher_methods(); | | 782 | prepare_cipher_methods(); |
783 | #ifdef IMPLEMENT_DIGEST | | 783 | #ifdef IMPLEMENT_DIGEST |
784 | prepare_digest_methods(); | | 784 | prepare_digest_methods(); |
785 | #endif | | 785 | #endif |
786 | | | 786 | |
787 | if (!ENGINE_set_id(e, "devcrypto") | | 787 | if (!ENGINE_set_id(e, "devcrypto") |
788 | || !ENGINE_set_name(e, "/dev/crypto engine") | | 788 | || !ENGINE_set_name(e, "/dev/crypto engine") |
789 | | | 789 | |
790 | /* | | 790 | /* |
791 | * Asymmetric ciphers aren't well supported with /dev/crypto. Among the BSD | | 791 | * Asymmetric ciphers aren't well supported with /dev/crypto. Among the BSD |
792 | * implementations, it seems to only exist in FreeBSD, and regarding the | | 792 | * implementations, it seems to only exist in FreeBSD, and regarding the |
793 | * parameters in its crypt_kop, the manual crypto(4) has this to say: | | 793 | * parameters in its crypt_kop, the manual crypto(4) has this to say: |
794 | * | | 794 | * |
795 | * The semantics of these arguments are currently undocumented. | | 795 | * The semantics of these arguments are currently undocumented. |
796 | * | | 796 | * |
797 | * Reading through the FreeBSD source code doesn't give much more than | | 797 | * Reading through the FreeBSD source code doesn't give much more than |
798 | * their CRK_MOD_EXP implementation for ubsec. | | 798 | * their CRK_MOD_EXP implementation for ubsec. |
799 | * | | 799 | * |
800 | * It doesn't look much better with cryptodev-linux. They have the crypt_kop | | 800 | * It doesn't look much better with cryptodev-linux. They have the crypt_kop |
801 | * structure as well as the command (CRK_*) in cryptodev.h, but no support | | 801 | * structure as well as the command (CRK_*) in cryptodev.h, but no support |
802 | * seems to be implemented at all for the moment. | | 802 | * seems to be implemented at all for the moment. |
803 | * | | 803 | * |
804 | * At the time of writing, it seems impossible to write proper support for | | 804 | * At the time of writing, it seems impossible to write proper support for |
805 | * FreeBSD's asym features without some very deep knowledge and access to | | 805 | * FreeBSD's asym features without some very deep knowledge and access to |
806 | * specific kernel modules. | | 806 | * specific kernel modules. |
807 | * | | 807 | * |
808 | * /Richard Levitte, 2017-05-11 | | 808 | * /Richard Levitte, 2017-05-11 |
809 | */ | | 809 | */ |
810 | #if 0 | | 810 | #if 0 |
811 | # ifndef OPENSSL_NO_RSA | | 811 | # ifndef OPENSSL_NO_RSA |
812 | || !ENGINE_set_RSA(e, devcrypto_rsa) | | 812 | || !ENGINE_set_RSA(e, devcrypto_rsa) |
813 | # endif | | 813 | # endif |
814 | # ifndef OPENSSL_NO_DSA | | 814 | # ifndef OPENSSL_NO_DSA |
815 | || !ENGINE_set_DSA(e, devcrypto_dsa) | | 815 | || !ENGINE_set_DSA(e, devcrypto_dsa) |
816 | # endif | | 816 | # endif |
817 | # ifndef OPENSSL_NO_DH | | 817 | # ifndef OPENSSL_NO_DH |
818 | || !ENGINE_set_DH(e, devcrypto_dh) | | 818 | || !ENGINE_set_DH(e, devcrypto_dh) |
819 | # endif | | 819 | # endif |
820 | # ifndef OPENSSL_NO_EC | | 820 | # ifndef OPENSSL_NO_EC |
821 | || !ENGINE_set_EC(e, devcrypto_ec) | | 821 | || !ENGINE_set_EC(e, devcrypto_ec) |
822 | # endif | | 822 | # endif |
823 | #endif | | 823 | #endif |
824 | || !ENGINE_set_ciphers(e, devcrypto_ciphers) | | 824 | || !ENGINE_set_ciphers(e, devcrypto_ciphers) |
825 | #ifdef IMPLEMENT_DIGEST | | 825 | #ifdef IMPLEMENT_DIGEST |
826 | || !ENGINE_set_digests(e, devcrypto_digests) | | 826 | || !ENGINE_set_digests(e, devcrypto_digests) |
827 | #endif | | 827 | #endif |
828 | ) { | | 828 | ) { |
829 | ENGINE_free(e); | | 829 | ENGINE_free(e); |
830 | return; | | 830 | return; |
831 | } | | 831 | } |
832 | | | 832 | |
833 | ENGINE_add(e); | | 833 | ENGINE_add(e); |
834 | ENGINE_free(e); /* Loose our local reference */ | | 834 | ENGINE_free(e); /* Loose our local reference */ |
835 | ERR_clear_error(); | | 835 | ERR_clear_error(); |
836 | } | | 836 | } |