diff options
Diffstat (limited to 'src/lib/libcrypto/evp/evp_cipher.c')
-rw-r--r-- | src/lib/libcrypto/evp/evp_cipher.c | 687 |
1 files changed, 687 insertions, 0 deletions
diff --git a/src/lib/libcrypto/evp/evp_cipher.c b/src/lib/libcrypto/evp/evp_cipher.c new file mode 100644 index 0000000000..3b38e18bf3 --- /dev/null +++ b/src/lib/libcrypto/evp/evp_cipher.c | |||
@@ -0,0 +1,687 @@ | |||
1 | /* $OpenBSD: evp_cipher.c,v 1.1 2023/12/29 05:57:24 tb Exp $ */ | ||
2 | /* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This package is an SSL implementation written | ||
6 | * by Eric Young (eay@cryptsoft.com). | ||
7 | * The implementation was written so as to conform with Netscapes SSL. | ||
8 | * | ||
9 | * This library is free for commercial and non-commercial use as long as | ||
10 | * the following conditions are aheared to. The following conditions | ||
11 | * apply to all code found in this distribution, be it the RC4, RSA, | ||
12 | * lhash, DES, etc., code; not just the SSL code. The SSL documentation | ||
13 | * included with this distribution is covered by the same copyright terms | ||
14 | * except that the holder is Tim Hudson (tjh@cryptsoft.com). | ||
15 | * | ||
16 | * Copyright remains Eric Young's, and as such any Copyright notices in | ||
17 | * the code are not to be removed. | ||
18 | * If this package is used in a product, Eric Young should be given attribution | ||
19 | * as the author of the parts of the library used. | ||
20 | * This can be in the form of a textual message at program startup or | ||
21 | * in documentation (online or textual) provided with the package. | ||
22 | * | ||
23 | * Redistribution and use in source and binary forms, with or without | ||
24 | * modification, are permitted provided that the following conditions | ||
25 | * are met: | ||
26 | * 1. Redistributions of source code must retain the copyright | ||
27 | * notice, this list of conditions and the following disclaimer. | ||
28 | * 2. Redistributions in binary form must reproduce the above copyright | ||
29 | * notice, this list of conditions and the following disclaimer in the | ||
30 | * documentation and/or other materials provided with the distribution. | ||
31 | * 3. All advertising materials mentioning features or use of this software | ||
32 | * must display the following acknowledgement: | ||
33 | * "This product includes cryptographic software written by | ||
34 | * Eric Young (eay@cryptsoft.com)" | ||
35 | * The word 'cryptographic' can be left out if the rouines from the library | ||
36 | * being used are not cryptographic related :-). | ||
37 | * 4. If you include any Windows specific code (or a derivative thereof) from | ||
38 | * the apps directory (application code) you must include an acknowledgement: | ||
39 | * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" | ||
40 | * | ||
41 | * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND | ||
42 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
43 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
44 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE | ||
45 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
46 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
47 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
48 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | ||
49 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | ||
50 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | ||
51 | * SUCH DAMAGE. | ||
52 | * | ||
53 | * The licence and distribution terms for any publically available version or | ||
54 | * derivative of this code cannot be changed. i.e. this code cannot simply be | ||
55 | * copied and put under another distribution licence | ||
56 | * [including the GNU Public Licence.] | ||
57 | */ | ||
58 | |||
59 | #include <limits.h> | ||
60 | #include <stdio.h> | ||
61 | #include <stdlib.h> | ||
62 | #include <string.h> | ||
63 | |||
64 | #include <openssl/opensslconf.h> | ||
65 | |||
66 | #include <openssl/err.h> | ||
67 | #include <openssl/evp.h> | ||
68 | |||
69 | #include "evp_local.h" | ||
70 | |||
71 | int | ||
72 | EVP_CipherInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, | ||
73 | const unsigned char *key, const unsigned char *iv, int enc) | ||
74 | { | ||
75 | return EVP_CipherInit_ex(ctx, cipher, NULL, key, iv, enc); | ||
76 | } | ||
77 | |||
78 | int | ||
79 | EVP_CipherInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, ENGINE *engine, | ||
80 | const unsigned char *key, const unsigned char *iv, int enc) | ||
81 | { | ||
82 | if (enc == -1) | ||
83 | enc = ctx->encrypt; | ||
84 | if (enc != 0) | ||
85 | enc = 1; | ||
86 | ctx->encrypt = enc; | ||
87 | |||
88 | if (cipher == NULL && ctx->cipher == NULL) { | ||
89 | EVPerror(EVP_R_NO_CIPHER_SET); | ||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | /* | ||
94 | * Set up cipher and context. Allocate cipher data and initialize ctx. | ||
95 | * On ctx reuse only retain encryption direction and key wrap flag. | ||
96 | */ | ||
97 | if (cipher != NULL) { | ||
98 | unsigned long flags = ctx->flags; | ||
99 | |||
100 | EVP_CIPHER_CTX_cleanup(ctx); | ||
101 | ctx->encrypt = enc; | ||
102 | ctx->flags = flags & EVP_CIPHER_CTX_FLAG_WRAP_ALLOW; | ||
103 | |||
104 | ctx->cipher = cipher; | ||
105 | ctx->key_len = cipher->key_len; | ||
106 | |||
107 | if (ctx->cipher->ctx_size != 0) { | ||
108 | ctx->cipher_data = calloc(1, ctx->cipher->ctx_size); | ||
109 | if (ctx->cipher_data == NULL) { | ||
110 | EVPerror(ERR_R_MALLOC_FAILURE); | ||
111 | return 0; | ||
112 | } | ||
113 | } | ||
114 | |||
115 | if ((ctx->cipher->flags & EVP_CIPH_CTRL_INIT) != 0) { | ||
116 | if (!EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_INIT, 0, NULL)) { | ||
117 | EVPerror(EVP_R_INITIALIZATION_ERROR); | ||
118 | return 0; | ||
119 | } | ||
120 | } | ||
121 | } | ||
122 | |||
123 | /* Block sizes must be a power of 2 due to the use of block_mask. */ | ||
124 | if (ctx->cipher->block_size != 1 && | ||
125 | ctx->cipher->block_size != 8 && | ||
126 | ctx->cipher->block_size != 16) { | ||
127 | EVPerror(EVP_R_BAD_BLOCK_LENGTH); | ||
128 | return 0; | ||
129 | } | ||
130 | |||
131 | if ((ctx->flags & EVP_CIPHER_CTX_FLAG_WRAP_ALLOW) == 0 && | ||
132 | EVP_CIPHER_CTX_mode(ctx) == EVP_CIPH_WRAP_MODE) { | ||
133 | EVPerror(EVP_R_WRAP_MODE_NOT_ALLOWED); | ||
134 | return 0; | ||
135 | } | ||
136 | |||
137 | if ((EVP_CIPHER_CTX_flags(ctx) & EVP_CIPH_CUSTOM_IV) == 0) { | ||
138 | int iv_len; | ||
139 | |||
140 | switch (EVP_CIPHER_CTX_mode(ctx)) { | ||
141 | |||
142 | case EVP_CIPH_STREAM_CIPHER: | ||
143 | case EVP_CIPH_ECB_MODE: | ||
144 | break; | ||
145 | |||
146 | case EVP_CIPH_CFB_MODE: | ||
147 | case EVP_CIPH_OFB_MODE: | ||
148 | |||
149 | ctx->num = 0; | ||
150 | /* fall-through */ | ||
151 | |||
152 | case EVP_CIPH_CBC_MODE: | ||
153 | iv_len = EVP_CIPHER_CTX_iv_length(ctx); | ||
154 | if (iv_len < 0 || iv_len > sizeof(ctx->oiv)) { | ||
155 | EVPerror(EVP_R_IV_TOO_LARGE); | ||
156 | return 0; | ||
157 | } | ||
158 | if (iv != NULL) | ||
159 | memcpy(ctx->oiv, iv, iv_len); | ||
160 | memcpy(ctx->iv, ctx->oiv, iv_len); | ||
161 | break; | ||
162 | |||
163 | case EVP_CIPH_CTR_MODE: | ||
164 | ctx->num = 0; | ||
165 | iv_len = EVP_CIPHER_CTX_iv_length(ctx); | ||
166 | if (iv_len < 0 || iv_len > sizeof(ctx->iv)) { | ||
167 | EVPerror(EVP_R_IV_TOO_LARGE); | ||
168 | return 0; | ||
169 | } | ||
170 | /* Don't reuse IV for CTR mode */ | ||
171 | if (iv != NULL) | ||
172 | memcpy(ctx->iv, iv, iv_len); | ||
173 | break; | ||
174 | |||
175 | default: | ||
176 | return 0; | ||
177 | break; | ||
178 | } | ||
179 | } | ||
180 | |||
181 | if (key != NULL || (ctx->cipher->flags & EVP_CIPH_ALWAYS_CALL_INIT) != 0) { | ||
182 | if (!ctx->cipher->init(ctx, key, iv, enc)) | ||
183 | return 0; | ||
184 | } | ||
185 | |||
186 | ctx->partial_len = 0; | ||
187 | ctx->final_used = 0; | ||
188 | |||
189 | return 1; | ||
190 | } | ||
191 | |||
192 | int | ||
193 | EVP_CipherUpdate(EVP_CIPHER_CTX *ctx, unsigned char *out, int *out_len, | ||
194 | const unsigned char *in, int in_len) | ||
195 | { | ||
196 | if (ctx->encrypt) | ||
197 | return EVP_EncryptUpdate(ctx, out, out_len, in, in_len); | ||
198 | |||
199 | return EVP_DecryptUpdate(ctx, out, out_len, in, in_len); | ||
200 | } | ||
201 | |||
202 | int | ||
203 | EVP_CipherFinal(EVP_CIPHER_CTX *ctx, unsigned char *out, int *out_len) | ||
204 | { | ||
205 | if (ctx->encrypt) | ||
206 | return EVP_EncryptFinal_ex(ctx, out, out_len); | ||
207 | |||
208 | return EVP_DecryptFinal_ex(ctx, out, out_len); | ||
209 | } | ||
210 | |||
211 | int | ||
212 | EVP_CipherFinal_ex(EVP_CIPHER_CTX *ctx, unsigned char *out, int *out_len) | ||
213 | { | ||
214 | if (ctx->encrypt) | ||
215 | return EVP_EncryptFinal_ex(ctx, out, out_len); | ||
216 | |||
217 | return EVP_DecryptFinal_ex(ctx, out, out_len); | ||
218 | } | ||
219 | |||
220 | int | ||
221 | EVP_EncryptInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, | ||
222 | const unsigned char *key, const unsigned char *iv) | ||
223 | { | ||
224 | return EVP_CipherInit(ctx, cipher, key, iv, 1); | ||
225 | } | ||
226 | |||
227 | int | ||
228 | EVP_EncryptInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, ENGINE *engine, | ||
229 | const unsigned char *key, const unsigned char *iv) | ||
230 | { | ||
231 | return EVP_CipherInit_ex(ctx, cipher, NULL, key, iv, 1); | ||
232 | } | ||
233 | |||
234 | /* | ||
235 | * EVP_Cipher() is an implementation detail of EVP_Cipher{Update,Final}(). | ||
236 | * Behavior depends on EVP_CIPH_FLAG_CUSTOM_CIPHER being set on ctx->cipher. | ||
237 | * | ||
238 | * If the flag is set, do_cipher() operates in update mode if in != NULL and | ||
239 | * in final mode if in == NULL. It returns the number of bytes written to out | ||
240 | * (which may be 0) or -1 on error. | ||
241 | * | ||
242 | * If the flag is not set, do_cipher() assumes properly aligned data and that | ||
243 | * padding is handled correctly by the caller. Most do_cipher() methods will | ||
244 | * silently produce garbage and succeed. Returns 1 on success, 0 on error. | ||
245 | */ | ||
246 | int | ||
247 | EVP_Cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, const unsigned char *in, | ||
248 | unsigned int in_len) | ||
249 | { | ||
250 | return ctx->cipher->do_cipher(ctx, out, in, in_len); | ||
251 | } | ||
252 | |||
253 | static int | ||
254 | evp_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, int *out_len, | ||
255 | const unsigned char *in, int in_len) | ||
256 | { | ||
257 | int len; | ||
258 | |||
259 | *out_len = 0; | ||
260 | |||
261 | if (in_len < 0) | ||
262 | return 0; | ||
263 | |||
264 | if ((ctx->cipher->flags & EVP_CIPH_FLAG_CUSTOM_CIPHER) != 0) { | ||
265 | if ((len = ctx->cipher->do_cipher(ctx, out, in, in_len)) < 0) | ||
266 | return 0; | ||
267 | |||
268 | *out_len = len; | ||
269 | return 1; | ||
270 | } | ||
271 | |||
272 | if (!ctx->cipher->do_cipher(ctx, out, in, in_len)) | ||
273 | return 0; | ||
274 | |||
275 | *out_len = in_len; | ||
276 | |||
277 | return 1; | ||
278 | } | ||
279 | |||
280 | int | ||
281 | EVP_EncryptUpdate(EVP_CIPHER_CTX *ctx, unsigned char *out, int *out_len, | ||
282 | const unsigned char *in, int in_len) | ||
283 | { | ||
284 | const int block_size = ctx->cipher->block_size; | ||
285 | const int block_mask = block_size - 1; | ||
286 | int partial_len = ctx->partial_len; | ||
287 | int len = 0, total_len = 0; | ||
288 | |||
289 | *out_len = 0; | ||
290 | |||
291 | if ((block_size & block_mask) != 0) | ||
292 | return 0; | ||
293 | |||
294 | if (in_len < 0) | ||
295 | return 0; | ||
296 | |||
297 | if (in_len == 0 && EVP_CIPHER_mode(ctx->cipher) != EVP_CIPH_CCM_MODE) | ||
298 | return 1; | ||
299 | |||
300 | if ((ctx->cipher->flags & EVP_CIPH_FLAG_CUSTOM_CIPHER) != 0) | ||
301 | return evp_cipher(ctx, out, out_len, in, in_len); | ||
302 | |||
303 | if (partial_len == 0 && (in_len & block_mask) == 0) | ||
304 | return evp_cipher(ctx, out, out_len, in, in_len); | ||
305 | |||
306 | if (partial_len < 0 || partial_len >= block_size || | ||
307 | block_size > sizeof(ctx->buf)) { | ||
308 | EVPerror(EVP_R_BAD_BLOCK_LENGTH); | ||
309 | return 0; | ||
310 | } | ||
311 | |||
312 | if (partial_len > 0) { | ||
313 | int partial_needed; | ||
314 | |||
315 | if ((partial_needed = block_size - partial_len) > in_len) { | ||
316 | memcpy(&ctx->buf[partial_len], in, in_len); | ||
317 | ctx->partial_len += in_len; | ||
318 | return 1; | ||
319 | } | ||
320 | |||
321 | /* | ||
322 | * Once the first partial_needed bytes from in are processed, | ||
323 | * the number of multiples of block_size of data remaining is | ||
324 | * (in_len - partial_needed) & ~block_mask. Ensure that this | ||
325 | * plus the block processed from ctx->buf doesn't overflow. | ||
326 | */ | ||
327 | if (((in_len - partial_needed) & ~block_mask) > INT_MAX - block_size) { | ||
328 | EVPerror(EVP_R_TOO_LARGE); | ||
329 | return 0; | ||
330 | } | ||
331 | memcpy(&ctx->buf[partial_len], in, partial_needed); | ||
332 | |||
333 | len = 0; | ||
334 | if (!evp_cipher(ctx, out, &len, ctx->buf, block_size)) | ||
335 | return 0; | ||
336 | total_len = len; | ||
337 | |||
338 | in_len -= partial_needed; | ||
339 | in += partial_needed; | ||
340 | out += len; | ||
341 | } | ||
342 | |||
343 | partial_len = in_len & block_mask; | ||
344 | if ((in_len -= partial_len) > 0) { | ||
345 | if (INT_MAX - in_len < total_len) | ||
346 | return 0; | ||
347 | len = 0; | ||
348 | if (!evp_cipher(ctx, out, &len, in, in_len)) | ||
349 | return 0; | ||
350 | if (INT_MAX - len < total_len) | ||
351 | return 0; | ||
352 | total_len += len; | ||
353 | } | ||
354 | |||
355 | if ((ctx->partial_len = partial_len) > 0) | ||
356 | memcpy(ctx->buf, &in[in_len], partial_len); | ||
357 | |||
358 | *out_len = total_len; | ||
359 | |||
360 | return 1; | ||
361 | } | ||
362 | |||
363 | int | ||
364 | EVP_EncryptFinal(EVP_CIPHER_CTX *ctx, unsigned char *out, int *out_len) | ||
365 | { | ||
366 | return EVP_EncryptFinal_ex(ctx, out, out_len); | ||
367 | } | ||
368 | |||
369 | int | ||
370 | EVP_EncryptFinal_ex(EVP_CIPHER_CTX *ctx, unsigned char *out, int *out_len) | ||
371 | { | ||
372 | const int block_size = ctx->cipher->block_size; | ||
373 | int partial_len = ctx->partial_len; | ||
374 | int pad; | ||
375 | |||
376 | *out_len = 0; | ||
377 | |||
378 | if ((ctx->cipher->flags & EVP_CIPH_FLAG_CUSTOM_CIPHER) != 0) | ||
379 | return evp_cipher(ctx, out, out_len, NULL, 0); | ||
380 | |||
381 | if (partial_len < 0 || partial_len >= block_size || | ||
382 | block_size > sizeof(ctx->buf)) { | ||
383 | EVPerror(EVP_R_BAD_BLOCK_LENGTH); | ||
384 | return 0; | ||
385 | } | ||
386 | if (block_size == 1) | ||
387 | return 1; | ||
388 | |||
389 | if ((ctx->flags & EVP_CIPH_NO_PADDING) != 0) { | ||
390 | if (partial_len != 0) { | ||
391 | EVPerror(EVP_R_DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH); | ||
392 | return 0; | ||
393 | } | ||
394 | return 1; | ||
395 | } | ||
396 | |||
397 | pad = block_size - partial_len; | ||
398 | memset(&ctx->buf[partial_len], pad, pad); | ||
399 | |||
400 | return evp_cipher(ctx, out, out_len, ctx->buf, block_size); | ||
401 | } | ||
402 | |||
403 | int | ||
404 | EVP_DecryptInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, | ||
405 | const unsigned char *key, const unsigned char *iv) | ||
406 | { | ||
407 | return EVP_CipherInit(ctx, cipher, key, iv, 0); | ||
408 | } | ||
409 | |||
410 | int | ||
411 | EVP_DecryptInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, ENGINE *engine, | ||
412 | const unsigned char *key, const unsigned char *iv) | ||
413 | { | ||
414 | return EVP_CipherInit_ex(ctx, cipher, NULL, key, iv, 0); | ||
415 | } | ||
416 | |||
417 | int | ||
418 | EVP_DecryptUpdate(EVP_CIPHER_CTX *ctx, unsigned char *out, int *out_len, | ||
419 | const unsigned char *in, int in_len) | ||
420 | { | ||
421 | const int block_size = ctx->cipher->block_size; | ||
422 | const int block_mask = block_size - 1; | ||
423 | int len = 0, total_len = 0; | ||
424 | |||
425 | *out_len = 0; | ||
426 | |||
427 | if ((block_size & block_mask) != 0) | ||
428 | return 0; | ||
429 | |||
430 | if (in_len < 0) | ||
431 | return 0; | ||
432 | |||
433 | if (in_len == 0 && EVP_CIPHER_mode(ctx->cipher) != EVP_CIPH_CCM_MODE) | ||
434 | return 1; | ||
435 | |||
436 | if ((ctx->cipher->flags & EVP_CIPH_FLAG_CUSTOM_CIPHER) != 0) | ||
437 | return evp_cipher(ctx, out, out_len, in, in_len); | ||
438 | |||
439 | if ((ctx->flags & EVP_CIPH_NO_PADDING) != 0) | ||
440 | return EVP_EncryptUpdate(ctx, out, out_len, in, in_len); | ||
441 | |||
442 | if (block_size > sizeof(ctx->final)) { | ||
443 | EVPerror(EVP_R_BAD_BLOCK_LENGTH); | ||
444 | return 0; | ||
445 | } | ||
446 | |||
447 | if (ctx->final_used) { | ||
448 | /* | ||
449 | * final_used is only set if partial_len is 0. Therefore the | ||
450 | * output from EVP_EncryptUpdate() is in_len & ~block_mask. | ||
451 | * Ensure (in_len & ~block_mask) + block_size doesn't overflow. | ||
452 | */ | ||
453 | if ((in_len & ~block_mask) > INT_MAX - block_size) { | ||
454 | EVPerror(EVP_R_TOO_LARGE); | ||
455 | return 0; | ||
456 | } | ||
457 | memcpy(out, ctx->final, block_size); | ||
458 | out += block_size; | ||
459 | total_len = block_size; | ||
460 | } | ||
461 | |||
462 | ctx->final_used = 0; | ||
463 | |||
464 | len = 0; | ||
465 | if (!EVP_EncryptUpdate(ctx, out, &len, in, in_len)) | ||
466 | return 0; | ||
467 | |||
468 | /* Keep copy of last block if a multiple of block_size was decrypted. */ | ||
469 | if (block_size > 1 && ctx->partial_len == 0) { | ||
470 | if (len < block_size) | ||
471 | return 0; | ||
472 | len -= block_size; | ||
473 | memcpy(ctx->final, &out[len], block_size); | ||
474 | ctx->final_used = 1; | ||
475 | } | ||
476 | |||
477 | if (len > INT_MAX - total_len) | ||
478 | return 0; | ||
479 | total_len += len; | ||
480 | |||
481 | *out_len = total_len; | ||
482 | |||
483 | return 1; | ||
484 | } | ||
485 | |||
486 | int | ||
487 | EVP_DecryptFinal(EVP_CIPHER_CTX *ctx, unsigned char *out, int *out_len) | ||
488 | { | ||
489 | return EVP_DecryptFinal_ex(ctx, out, out_len); | ||
490 | } | ||
491 | |||
492 | int | ||
493 | EVP_DecryptFinal_ex(EVP_CIPHER_CTX *ctx, unsigned char *out, int *out_len) | ||
494 | { | ||
495 | const int block_size = ctx->cipher->block_size; | ||
496 | int partial_len = ctx->partial_len; | ||
497 | int i, pad, plain_len; | ||
498 | |||
499 | *out_len = 0; | ||
500 | |||
501 | if ((ctx->cipher->flags & EVP_CIPH_FLAG_CUSTOM_CIPHER) != 0) | ||
502 | return evp_cipher(ctx, out, out_len, NULL, 0); | ||
503 | |||
504 | if ((ctx->flags & EVP_CIPH_NO_PADDING) != 0) { | ||
505 | if (partial_len != 0) { | ||
506 | EVPerror(EVP_R_DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH); | ||
507 | return 0; | ||
508 | } | ||
509 | return 1; | ||
510 | } | ||
511 | |||
512 | if (block_size == 1) | ||
513 | return 1; | ||
514 | |||
515 | if (partial_len != 0 || !ctx->final_used) { | ||
516 | EVPerror(EVP_R_WRONG_FINAL_BLOCK_LENGTH); | ||
517 | return 0; | ||
518 | } | ||
519 | |||
520 | if (block_size > sizeof(ctx->final)) { | ||
521 | EVPerror(EVP_R_BAD_BLOCK_LENGTH); | ||
522 | return 0; | ||
523 | } | ||
524 | |||
525 | pad = ctx->final[block_size - 1]; | ||
526 | if (pad <= 0 || pad > block_size) { | ||
527 | EVPerror(EVP_R_BAD_DECRYPT); | ||
528 | return 0; | ||
529 | } | ||
530 | plain_len = block_size - pad; | ||
531 | for (i = plain_len; i < block_size; i++) { | ||
532 | if (ctx->final[i] != pad) { | ||
533 | EVPerror(EVP_R_BAD_DECRYPT); | ||
534 | return 0; | ||
535 | } | ||
536 | } | ||
537 | |||
538 | memcpy(out, ctx->final, plain_len); | ||
539 | *out_len = plain_len; | ||
540 | |||
541 | return 1; | ||
542 | } | ||
543 | |||
544 | EVP_CIPHER_CTX * | ||
545 | EVP_CIPHER_CTX_new(void) | ||
546 | { | ||
547 | return calloc(1, sizeof(EVP_CIPHER_CTX)); | ||
548 | } | ||
549 | |||
550 | void | ||
551 | EVP_CIPHER_CTX_free(EVP_CIPHER_CTX *ctx) | ||
552 | { | ||
553 | if (ctx == NULL) | ||
554 | return; | ||
555 | |||
556 | EVP_CIPHER_CTX_cleanup(ctx); | ||
557 | |||
558 | free(ctx); | ||
559 | } | ||
560 | |||
561 | void | ||
562 | EVP_CIPHER_CTX_init(EVP_CIPHER_CTX *ctx) | ||
563 | { | ||
564 | memset(ctx, 0, sizeof(EVP_CIPHER_CTX)); | ||
565 | } | ||
566 | |||
567 | int | ||
568 | EVP_CIPHER_CTX_reset(EVP_CIPHER_CTX *a) | ||
569 | { | ||
570 | return EVP_CIPHER_CTX_cleanup(a); | ||
571 | } | ||
572 | |||
573 | int | ||
574 | EVP_CIPHER_CTX_cleanup(EVP_CIPHER_CTX *c) | ||
575 | { | ||
576 | if (c->cipher != NULL) { | ||
577 | /* XXX - Avoid leaks, so ignore return value of cleanup()... */ | ||
578 | if (c->cipher->cleanup != NULL) | ||
579 | c->cipher->cleanup(c); | ||
580 | if (c->cipher_data != NULL) | ||
581 | explicit_bzero(c->cipher_data, c->cipher->ctx_size); | ||
582 | } | ||
583 | |||
584 | /* XXX - store size of cipher_data so we can always freezero(). */ | ||
585 | free(c->cipher_data); | ||
586 | |||
587 | explicit_bzero(c, sizeof(EVP_CIPHER_CTX)); | ||
588 | |||
589 | return 1; | ||
590 | } | ||
591 | |||
592 | int | ||
593 | EVP_CIPHER_CTX_set_key_length(EVP_CIPHER_CTX *c, int keylen) | ||
594 | { | ||
595 | if (c->cipher->flags & EVP_CIPH_CUSTOM_KEY_LENGTH) | ||
596 | return EVP_CIPHER_CTX_ctrl(c, EVP_CTRL_SET_KEY_LENGTH, | ||
597 | keylen, NULL); | ||
598 | if (c->key_len == keylen) | ||
599 | return 1; | ||
600 | if (keylen > 0 && (c->cipher->flags & EVP_CIPH_VARIABLE_LENGTH)) { | ||
601 | c->key_len = keylen; | ||
602 | return 1; | ||
603 | } | ||
604 | EVPerror(EVP_R_INVALID_KEY_LENGTH); | ||
605 | return 0; | ||
606 | } | ||
607 | |||
608 | int | ||
609 | EVP_CIPHER_CTX_set_padding(EVP_CIPHER_CTX *ctx, int pad) | ||
610 | { | ||
611 | if (pad) | ||
612 | ctx->flags &= ~EVP_CIPH_NO_PADDING; | ||
613 | else | ||
614 | ctx->flags |= EVP_CIPH_NO_PADDING; | ||
615 | return 1; | ||
616 | } | ||
617 | |||
618 | int | ||
619 | EVP_CIPHER_CTX_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg, void *ptr) | ||
620 | { | ||
621 | int ret; | ||
622 | |||
623 | if (!ctx->cipher) { | ||
624 | EVPerror(EVP_R_NO_CIPHER_SET); | ||
625 | return 0; | ||
626 | } | ||
627 | |||
628 | if (!ctx->cipher->ctrl) { | ||
629 | EVPerror(EVP_R_CTRL_NOT_IMPLEMENTED); | ||
630 | return 0; | ||
631 | } | ||
632 | |||
633 | ret = ctx->cipher->ctrl(ctx, type, arg, ptr); | ||
634 | if (ret == -1) { | ||
635 | EVPerror(EVP_R_CTRL_OPERATION_NOT_IMPLEMENTED); | ||
636 | return 0; | ||
637 | } | ||
638 | return ret; | ||
639 | } | ||
640 | |||
641 | int | ||
642 | EVP_CIPHER_CTX_rand_key(EVP_CIPHER_CTX *ctx, unsigned char *key) | ||
643 | { | ||
644 | if (ctx->cipher->flags & EVP_CIPH_RAND_KEY) | ||
645 | return EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_RAND_KEY, 0, key); | ||
646 | arc4random_buf(key, ctx->key_len); | ||
647 | return 1; | ||
648 | } | ||
649 | |||
650 | int | ||
651 | EVP_CIPHER_CTX_copy(EVP_CIPHER_CTX *out, const EVP_CIPHER_CTX *in) | ||
652 | { | ||
653 | if (in == NULL || in->cipher == NULL) { | ||
654 | EVPerror(EVP_R_INPUT_NOT_INITIALIZED); | ||
655 | return 0; | ||
656 | } | ||
657 | |||
658 | EVP_CIPHER_CTX_cleanup(out); | ||
659 | memcpy(out, in, sizeof *out); | ||
660 | |||
661 | if (in->cipher_data && in->cipher->ctx_size) { | ||
662 | out->cipher_data = calloc(1, in->cipher->ctx_size); | ||
663 | if (out->cipher_data == NULL) { | ||
664 | EVPerror(ERR_R_MALLOC_FAILURE); | ||
665 | return 0; | ||
666 | } | ||
667 | memcpy(out->cipher_data, in->cipher_data, in->cipher->ctx_size); | ||
668 | } | ||
669 | |||
670 | if (in->cipher->flags & EVP_CIPH_CUSTOM_COPY) { | ||
671 | if (!in->cipher->ctrl((EVP_CIPHER_CTX *)in, EVP_CTRL_COPY, | ||
672 | 0, out)) { | ||
673 | /* | ||
674 | * If the custom copy control failed, assume that there | ||
675 | * may still be pointers copied in the cipher_data that | ||
676 | * we do not own. This may result in a leak from a bad | ||
677 | * custom copy control, but that's preferable to a | ||
678 | * double free... | ||
679 | */ | ||
680 | freezero(out->cipher_data, in->cipher->ctx_size); | ||
681 | out->cipher_data = NULL; | ||
682 | return 0; | ||
683 | } | ||
684 | } | ||
685 | |||
686 | return 1; | ||
687 | } | ||