summaryrefslogtreecommitdiff
path: root/src/lib
diff options
context:
space:
mode:
authortb <>2023-07-20 16:36:06 +0000
committertb <>2023-07-20 16:36:06 +0000
commitffc366ad631823ca313e68ca9a54544739bce0c8 (patch)
treedc0fc379632be1703e99e20edee64079a8279ae5 /src/lib
parent7b660744be840ee9a7e28176e6a379962d6e2332 (diff)
downloadopenbsd-ffc366ad631823ca313e68ca9a54544739bce0c8.tar.gz
openbsd-ffc366ad631823ca313e68ca9a54544739bce0c8.tar.bz2
openbsd-ffc366ad631823ca313e68ca9a54544739bce0c8.zip
Remove some ancient cruft that hasn't been used in ages
discussed with jsing
Diffstat (limited to 'src/lib')
-rw-r--r--src/lib/libcrypto/Makefile4
-rw-r--r--src/lib/libcrypto/engine/eng_aesni.c563
-rw-r--r--src/lib/libcrypto/engine/eng_padlock.c1128
-rw-r--r--src/lib/libcrypto/engine/eng_padlock.ec1
4 files changed, 1 insertions, 1695 deletions
diff --git a/src/lib/libcrypto/Makefile b/src/lib/libcrypto/Makefile
index 44e5900863..8ec9b1b3d8 100644
--- a/src/lib/libcrypto/Makefile
+++ b/src/lib/libcrypto/Makefile
@@ -1,4 +1,4 @@
1# $OpenBSD: Makefile,v 1.137 2023/07/07 06:10:14 jsing Exp $ 1# $OpenBSD: Makefile,v 1.138 2023/07/20 16:36:06 tb Exp $
2 2
3LIB= crypto 3LIB= crypto
4LIBREBUILD=y 4LIBREBUILD=y
@@ -386,8 +386,6 @@ SRCS+= tb_pkmeth.c
386SRCS+= tb_rand.c 386SRCS+= tb_rand.c
387SRCS+= tb_rsa.c 387SRCS+= tb_rsa.c
388SRCS+= tb_store.c 388SRCS+= tb_store.c
389# XXX unnecessary? handled in EVP now...
390# SRCS+= eng_aesni.c # local addition
391 389
392# err/ 390# err/
393SRCS+= err.c 391SRCS+= err.c
diff --git a/src/lib/libcrypto/engine/eng_aesni.c b/src/lib/libcrypto/engine/eng_aesni.c
deleted file mode 100644
index e08edcf346..0000000000
--- a/src/lib/libcrypto/engine/eng_aesni.c
+++ /dev/null
@@ -1,563 +0,0 @@
1/* $OpenBSD: eng_aesni.c,v 1.14 2023/07/20 15:08:12 tb Exp $ */
2/*
3 * Support for Intel AES-NI instruction set
4 * Author: Huang Ying <ying.huang@intel.com>
5 *
6 * Intel AES-NI is a new set of Single Instruction Multiple Data
7 * (SIMD) instructions that are going to be introduced in the next
8 * generation of Intel processor, as of 2009. These instructions
9 * enable fast and secure data encryption and decryption, using the
10 * Advanced Encryption Standard (AES), defined by FIPS Publication
11 * number 197. The architecture introduces six instructions that
12 * offer full hardware support for AES. Four of them support high
13 * performance data encryption and decryption, and the other two
14 * instructions support the AES key expansion procedure.
15 *
16 * The white paper can be downloaded from:
17 * http://softwarecommunity.intel.com/isn/downloads/intelavx/AES-Instructions-Set_WP.pdf
18 *
19 * This file is based on engines/e_padlock.c
20 */
21
22/* ====================================================================
23 * Copyright (c) 1999-2001 The OpenSSL Project. All rights reserved.
24 *
25 * Redistribution and use in source and binary forms, with or without
26 * modification, are permitted provided that the following conditions
27 * are met:
28 *
29 * 1. Redistributions of source code must retain the above copyright
30 * notice, this list of conditions and the following disclaimer.
31 *
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in
34 * the documentation and/or other materials provided with the
35 * distribution.
36 *
37 * 3. All advertising materials mentioning features or use of this
38 * software must display the following acknowledgment:
39 * "This product includes software developed by the OpenSSL Project
40 * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
41 *
42 * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
43 * endorse or promote products derived from this software without
44 * prior written permission. For written permission, please contact
45 * licensing@OpenSSL.org.
46 *
47 * 5. Products derived from this software may not be called "OpenSSL"
48 * nor may "OpenSSL" appear in their names without prior written
49 * permission of the OpenSSL Project.
50 *
51 * 6. Redistributions of any form whatsoever must retain the following
52 * acknowledgment:
53 * "This product includes software developed by the OpenSSL Project
54 * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
57 * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
59 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
60 * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
61 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
62 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
63 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
64 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
65 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
67 * OF THE POSSIBILITY OF SUCH DAMAGE.
68 * ====================================================================
69 *
70 * This product includes cryptographic software written by Eric Young
71 * (eay@cryptsoft.com). This product includes software written by Tim
72 * Hudson (tjh@cryptsoft.com).
73 *
74 */
75
76#include <stdio.h>
77
78#include <openssl/opensslconf.h>
79
80#if !defined(OPENSSL_NO_HW) && !defined(OPENSSL_NO_HW_AES_NI) && !defined(OPENSSL_NO_AES)
81
82#include <openssl/aes.h>
83#include <openssl/engine.h>
84#include <openssl/err.h>
85#include <openssl/evp.h>
86
87#include "evp_local.h"
88
89/* AES-NI is available *ONLY* on some x86 CPUs. Not only that it
90 doesn't exist elsewhere, but it even can't be compiled on other
91 platforms! */
92#undef COMPILE_HW_AESNI
93#if (defined(__x86_64) || defined(__x86_64__) || \
94 defined(_M_AMD64) || defined(_M_X64) || \
95 defined(OPENSSL_IA32_SSE2)) && !defined(OPENSSL_NO_ASM) && !defined(__i386__)
96#define COMPILE_HW_AESNI
97#include "x86_arch.h"
98#endif
99static ENGINE *ENGINE_aesni(void);
100
101void ENGINE_load_aesni(void)
102{
103/* On non-x86 CPUs it just returns. */
104#ifdef COMPILE_HW_AESNI
105 ENGINE *toadd = ENGINE_aesni();
106 if (toadd == NULL)
107 return;
108 ENGINE_add(toadd);
109 ENGINE_register_complete(toadd);
110 ENGINE_free(toadd);
111 ERR_clear_error();
112#endif
113}
114
115#ifdef COMPILE_HW_AESNI
116int aesni_set_encrypt_key(const unsigned char *userKey, int bits,
117 AES_KEY *key);
118int aesni_set_decrypt_key(const unsigned char *userKey, int bits,
119 AES_KEY *key);
120
121void aesni_encrypt(const unsigned char *in, unsigned char *out,
122 const AES_KEY *key);
123void aesni_decrypt(const unsigned char *in, unsigned char *out,
124 const AES_KEY *key);
125
126void aesni_ecb_encrypt(const unsigned char *in, unsigned char *out,
127 size_t length, const AES_KEY *key, int enc);
128void aesni_cbc_encrypt(const unsigned char *in, unsigned char *out,
129 size_t length, const AES_KEY *key, unsigned char *ivec, int enc);
130
131/* Function for ENGINE detection and control */
132static int aesni_init(ENGINE *e);
133
134/* Cipher Stuff */
135static int aesni_ciphers(ENGINE *e, const EVP_CIPHER **cipher,
136 const int **nids, int nid);
137
138#define AESNI_MIN_ALIGN 16
139#define AESNI_ALIGN(x) \
140 ((void *)(((unsigned long)(x)+AESNI_MIN_ALIGN-1)&~(AESNI_MIN_ALIGN-1)))
141
142/* Engine names */
143static const char aesni_id[] = "aesni",
144 aesni_name[] = "Intel AES-NI engine",
145 no_aesni_name[] = "Intel AES-NI engine (no-aesni)";
146
147
148/* The input and output encrypted as though 128bit cfb mode is being
149 * used. The extra state information to record how much of the
150 * 128bit block we have used is contained in *num;
151 */
152static void
153aesni_cfb128_encrypt(const unsigned char *in, unsigned char *out,
154 unsigned int len, const void *key, unsigned char ivec[16], int *num,
155 int enc)
156{
157 unsigned int n;
158 size_t l = 0;
159
160 n = *num;
161
162 if (enc) {
163#if !defined(OPENSSL_SMALL_FOOTPRINT)
164 if (16%sizeof(size_t) == 0) do { /* always true actually */
165 while (n && len) {
166 *(out++) = ivec[n] ^= *(in++);
167 --len;
168 n = (n + 1) % 16;
169 }
170 while (len >= 16) {
171 aesni_encrypt(ivec, ivec, key);
172 for (n = 0; n < 16; n += sizeof(size_t)) {
173 *(size_t*)(out + n) =
174 *(size_t*)(ivec + n) ^= *(size_t*)(in + n);
175 }
176 len -= 16;
177 out += 16;
178 in += 16;
179 }
180 n = 0;
181 if (len) {
182 aesni_encrypt(ivec, ivec, key);
183 while (len--) {
184 out[n] = ivec[n] ^= in[n];
185 ++n;
186 }
187 }
188 *num = n;
189 return;
190 } while (0);
191 /* the rest would be commonly eliminated by x86* compiler */
192#endif
193 while (l < len) {
194 if (n == 0) {
195 aesni_encrypt(ivec, ivec, key);
196 }
197 out[l] = ivec[n] ^= in[l];
198 ++l;
199 n = (n + 1) % 16;
200 }
201 *num = n;
202 } else {
203#if !defined(OPENSSL_SMALL_FOOTPRINT)
204 if (16%sizeof(size_t) == 0) do { /* always true actually */
205 while (n && len) {
206 unsigned char c;
207 *(out++) = ivec[n] ^ (c = *(in++));
208 ivec[n] = c;
209 --len;
210 n = (n + 1) % 16;
211 }
212 while (len >= 16) {
213 aesni_encrypt(ivec, ivec, key);
214 for (n = 0; n < 16; n += sizeof(size_t)) {
215 size_t t = *(size_t*)(in + n);
216 *(size_t*)(out + n) = *(size_t*)(ivec + n) ^ t;
217 *(size_t*)(ivec + n) = t;
218 }
219 len -= 16;
220 out += 16;
221 in += 16;
222 }
223 n = 0;
224 if (len) {
225 aesni_encrypt(ivec, ivec, key);
226 while (len--) {
227 unsigned char c;
228 out[n] = ivec[n] ^ (c = in[n]);
229 ivec[n] = c;
230 ++n;
231 }
232 }
233 *num = n;
234 return;
235 } while (0);
236 /* the rest would be commonly eliminated by x86* compiler */
237#endif
238 while (l < len) {
239 unsigned char c;
240 if (n == 0) {
241 aesni_encrypt(ivec, ivec, key);
242 }
243 out[l] = ivec[n] ^ (c = in[l]);
244 ivec[n] = c;
245 ++l;
246 n = (n + 1) % 16;
247 }
248 *num = n;
249 }
250}
251
252/* The input and output encrypted as though 128bit ofb mode is being
253 * used. The extra state information to record how much of the
254 * 128bit block we have used is contained in *num;
255 */
256static void
257aesni_ofb128_encrypt(const unsigned char *in, unsigned char *out,
258 unsigned int len, const void *key, unsigned char ivec[16], int *num)
259{
260 unsigned int n;
261 size_t l = 0;
262
263 n = *num;
264
265#if !defined(OPENSSL_SMALL_FOOTPRINT)
266 if (16%sizeof(size_t) == 0) do { /* always true actually */
267 while (n && len) {
268 *(out++) = *(in++) ^ ivec[n];
269 --len;
270 n = (n + 1) % 16;
271 }
272 while (len >= 16) {
273 aesni_encrypt(ivec, ivec, key);
274 for (n = 0; n < 16; n += sizeof(size_t))
275 *(size_t*)(out + n) =
276 *(size_t*)(in + n) ^ *(size_t*)(ivec + n);
277 len -= 16;
278 out += 16;
279 in += 16;
280 }
281 n = 0;
282 if (len) {
283 aesni_encrypt(ivec, ivec, key);
284 while (len--) {
285 out[n] = in[n] ^ ivec[n];
286 ++n;
287 }
288 }
289 *num = n;
290 return;
291 } while (0);
292 /* the rest would be commonly eliminated by x86* compiler */
293#endif
294 while (l < len) {
295 if (n == 0) {
296 aesni_encrypt(ivec, ivec, key);
297 }
298 out[l] = in[l] ^ ivec[n];
299 ++l;
300 n = (n + 1) % 16;
301 }
302
303 *num = n;
304}
305/* ===== Engine "management" functions ===== */
306
307/* Prepare the ENGINE structure for registration */
308static int
309aesni_bind_helper(ENGINE *e)
310{
311 int engage;
312
313 engage = (OPENSSL_cpu_caps() & CPUCAP_MASK_AESNI) != 0;
314
315 /* Register everything or return with an error */
316 if (!ENGINE_set_id(e, aesni_id) ||
317 !ENGINE_set_name(e, engage ? aesni_name : no_aesni_name) ||
318 !ENGINE_set_init_function(e, aesni_init) ||
319 (engage && !ENGINE_set_ciphers (e, aesni_ciphers)))
320 return 0;
321
322 /* Everything looks good */
323 return 1;
324}
325
326/* Constructor */
327static ENGINE *
328ENGINE_aesni(void)
329{
330 ENGINE *eng = ENGINE_new();
331
332 if (!eng) {
333 return NULL;
334 }
335
336 if (!aesni_bind_helper(eng)) {
337 ENGINE_free(eng);
338 return NULL;
339 }
340
341 return eng;
342}
343
344/* Check availability of the engine */
345static int
346aesni_init(ENGINE *e)
347{
348 return 1;
349}
350
351#if defined(NID_aes_128_cfb128) && ! defined (NID_aes_128_cfb)
352#define NID_aes_128_cfb NID_aes_128_cfb128
353#endif
354
355#if defined(NID_aes_128_ofb128) && ! defined (NID_aes_128_ofb)
356#define NID_aes_128_ofb NID_aes_128_ofb128
357#endif
358
359#if defined(NID_aes_192_cfb128) && ! defined (NID_aes_192_cfb)
360#define NID_aes_192_cfb NID_aes_192_cfb128
361#endif
362
363#if defined(NID_aes_192_ofb128) && ! defined (NID_aes_192_ofb)
364#define NID_aes_192_ofb NID_aes_192_ofb128
365#endif
366
367#if defined(NID_aes_256_cfb128) && ! defined (NID_aes_256_cfb)
368#define NID_aes_256_cfb NID_aes_256_cfb128
369#endif
370
371#if defined(NID_aes_256_ofb128) && ! defined (NID_aes_256_ofb)
372#define NID_aes_256_ofb NID_aes_256_ofb128
373#endif
374
375/* List of supported ciphers. */
376static int aesni_cipher_nids[] = {
377 NID_aes_128_ecb,
378 NID_aes_128_cbc,
379 NID_aes_128_cfb,
380 NID_aes_128_ofb,
381
382 NID_aes_192_ecb,
383 NID_aes_192_cbc,
384 NID_aes_192_cfb,
385 NID_aes_192_ofb,
386
387 NID_aes_256_ecb,
388 NID_aes_256_cbc,
389 NID_aes_256_cfb,
390 NID_aes_256_ofb,
391};
392static int aesni_cipher_nids_num =
393 (sizeof(aesni_cipher_nids) / sizeof(aesni_cipher_nids[0]));
394
395typedef struct {
396 AES_KEY ks;
397 unsigned int _pad1[3];
398} AESNI_KEY;
399
400static int
401aesni_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *user_key,
402 const unsigned char *iv, int enc)
403{
404 int ret;
405 AES_KEY *key = AESNI_ALIGN(ctx->cipher_data);
406
407 if ((ctx->cipher->flags & EVP_CIPH_MODE) == EVP_CIPH_CFB_MODE ||
408 (ctx->cipher->flags & EVP_CIPH_MODE) == EVP_CIPH_OFB_MODE ||
409 enc)
410 ret = aesni_set_encrypt_key(user_key, ctx->key_len * 8, key);
411 else
412 ret = aesni_set_decrypt_key(user_key, ctx->key_len * 8, key);
413
414 if (ret < 0) {
415 EVPerror(EVP_R_AES_KEY_SETUP_FAILED);
416 return 0;
417 }
418
419 return 1;
420}
421
422static int
423aesni_cipher_ecb(EVP_CIPHER_CTX *ctx, unsigned char *out,
424 const unsigned char *in, size_t inl)
425{
426 AES_KEY *key = AESNI_ALIGN(ctx->cipher_data);
427
428 aesni_ecb_encrypt(in, out, inl, key, ctx->encrypt);
429 return 1;
430}
431
432static int
433aesni_cipher_cbc(EVP_CIPHER_CTX *ctx, unsigned char *out,
434 const unsigned char *in, size_t inl)
435{
436 AES_KEY *key = AESNI_ALIGN(ctx->cipher_data);
437
438 aesni_cbc_encrypt(in, out, inl, key, ctx->iv, ctx->encrypt);
439 return 1;
440}
441
442static int
443aesni_cipher_cfb(EVP_CIPHER_CTX *ctx, unsigned char *out,
444 const unsigned char *in, size_t inl)
445{
446 AES_KEY *key = AESNI_ALIGN(ctx->cipher_data);
447
448 aesni_cfb128_encrypt(in, out, inl, key, ctx->iv, &ctx->num,
449 ctx->encrypt);
450 return 1;
451}
452
453static int
454aesni_cipher_ofb(EVP_CIPHER_CTX *ctx, unsigned char *out,
455 const unsigned char *in, size_t inl)
456{
457 AES_KEY *key = AESNI_ALIGN(ctx->cipher_data);
458
459 aesni_ofb128_encrypt(in, out, inl, key, ctx->iv, &ctx->num);
460 return 1;
461}
462
463#define AES_BLOCK_SIZE 16
464
465#define EVP_CIPHER_block_size_ECB AES_BLOCK_SIZE
466#define EVP_CIPHER_block_size_CBC AES_BLOCK_SIZE
467#define EVP_CIPHER_block_size_OFB 1
468#define EVP_CIPHER_block_size_CFB 1
469
470/* Declaring so many ciphers by hand would be a pain.
471 Instead introduce a bit of preprocessor magic :-) */
472#define DECLARE_AES_EVP(ksize,lmode,umode) \
473static const EVP_CIPHER aesni_##ksize##_##lmode = { \
474 NID_aes_##ksize##_##lmode, \
475 EVP_CIPHER_block_size_##umode, \
476 ksize / 8, \
477 AES_BLOCK_SIZE, \
478 0 | EVP_CIPH_##umode##_MODE, \
479 aesni_init_key, \
480 aesni_cipher_##lmode, \
481 NULL, \
482 sizeof(AESNI_KEY), \
483 EVP_CIPHER_set_asn1_iv, \
484 EVP_CIPHER_get_asn1_iv, \
485 NULL, \
486 NULL \
487}
488
489DECLARE_AES_EVP(128, ecb, ECB);
490DECLARE_AES_EVP(128, cbc, CBC);
491DECLARE_AES_EVP(128, cfb, CFB);
492DECLARE_AES_EVP(128, ofb, OFB);
493
494DECLARE_AES_EVP(192, ecb, ECB);
495DECLARE_AES_EVP(192, cbc, CBC);
496DECLARE_AES_EVP(192, cfb, CFB);
497DECLARE_AES_EVP(192, ofb, OFB);
498
499DECLARE_AES_EVP(256, ecb, ECB);
500DECLARE_AES_EVP(256, cbc, CBC);
501DECLARE_AES_EVP(256, cfb, CFB);
502DECLARE_AES_EVP(256, ofb, OFB);
503
504static int
505aesni_ciphers(ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid)
506{
507 /* No specific cipher => return a list of supported nids ... */
508 if (!cipher) {
509 *nids = aesni_cipher_nids;
510 return aesni_cipher_nids_num;
511 }
512
513 /* ... or the requested "cipher" otherwise */
514 switch (nid) {
515 case NID_aes_128_ecb:
516 *cipher = &aesni_128_ecb;
517 break;
518 case NID_aes_128_cbc:
519 *cipher = &aesni_128_cbc;
520 break;
521 case NID_aes_128_cfb:
522 *cipher = &aesni_128_cfb;
523 break;
524 case NID_aes_128_ofb:
525 *cipher = &aesni_128_ofb;
526 break;
527
528 case NID_aes_192_ecb:
529 *cipher = &aesni_192_ecb;
530 break;
531 case NID_aes_192_cbc:
532 *cipher = &aesni_192_cbc;
533 break;
534 case NID_aes_192_cfb:
535 *cipher = &aesni_192_cfb;
536 break;
537 case NID_aes_192_ofb:
538 *cipher = &aesni_192_ofb;
539 break;
540
541 case NID_aes_256_ecb:
542 *cipher = &aesni_256_ecb;
543 break;
544 case NID_aes_256_cbc:
545 *cipher = &aesni_256_cbc;
546 break;
547 case NID_aes_256_cfb:
548 *cipher = &aesni_256_cfb;
549 break;
550 case NID_aes_256_ofb:
551 *cipher = &aesni_256_ofb;
552 break;
553
554 default:
555 /* Sorry, we don't support this NID */
556 *cipher = NULL;
557 return 0;
558 }
559 return 1;
560}
561
562#endif /* COMPILE_HW_AESNI */
563#endif /* !defined(OPENSSL_NO_HW) && !defined(OPENSSL_NO_HW_AESNI) && !defined(OPENSSL_NO_AES) */
diff --git a/src/lib/libcrypto/engine/eng_padlock.c b/src/lib/libcrypto/engine/eng_padlock.c
deleted file mode 100644
index 3ff6df24e2..0000000000
--- a/src/lib/libcrypto/engine/eng_padlock.c
+++ /dev/null
@@ -1,1128 +0,0 @@
1/* $OpenBSD: eng_padlock.c,v 1.18 2023/07/20 15:08:12 tb Exp $ */
2/*
3 * Support for VIA PadLock Advanced Cryptography Engine (ACE)
4 * Written by Michal Ludvig <michal@logix.cz>
5 * http://www.logix.cz/michal
6 *
7 * Big thanks to Andy Polyakov for a help with optimization,
8 * assembler fixes, port to MS Windows and a lot of other
9 * valuable work on this engine!
10 */
11
12/* ====================================================================
13 * Copyright (c) 1999-2001 The OpenSSL Project. All rights reserved.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 *
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 *
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in
24 * the documentation and/or other materials provided with the
25 * distribution.
26 *
27 * 3. All advertising materials mentioning features or use of this
28 * software must display the following acknowledgment:
29 * "This product includes software developed by the OpenSSL Project
30 * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
31 *
32 * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
33 * endorse or promote products derived from this software without
34 * prior written permission. For written permission, please contact
35 * licensing@OpenSSL.org.
36 *
37 * 5. Products derived from this software may not be called "OpenSSL"
38 * nor may "OpenSSL" appear in their names without prior written
39 * permission of the OpenSSL Project.
40 *
41 * 6. Redistributions of any form whatsoever must retain the following
42 * acknowledgment:
43 * "This product includes software developed by the OpenSSL Project
44 * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
47 * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
49 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
50 * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
51 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
52 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
53 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
55 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
56 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
57 * OF THE POSSIBILITY OF SUCH DAMAGE.
58 * ====================================================================
59 *
60 * This product includes cryptographic software written by Eric Young
61 * (eay@cryptsoft.com). This product includes software written by Tim
62 * Hudson (tjh@cryptsoft.com).
63 *
64 */
65
66#include <stdio.h>
67#include <string.h>
68
69#include <openssl/opensslconf.h>
70
71#include <openssl/crypto.h>
72#include <openssl/engine.h>
73#include <openssl/evp.h>
74#ifndef OPENSSL_NO_AES
75#include <openssl/aes.h>
76#endif
77#include <openssl/err.h>
78
79#ifndef OPENSSL_NO_HW
80#ifndef OPENSSL_NO_HW_PADLOCK
81
82/* Attempt to have a single source for both 0.9.7 and 0.9.8 :-) */
83#if (OPENSSL_VERSION_NUMBER >= 0x00908000L)
84# ifndef OPENSSL_NO_DYNAMIC_ENGINE
85# define DYNAMIC_ENGINE
86# endif
87#elif (OPENSSL_VERSION_NUMBER >= 0x00907000L)
88# ifdef ENGINE_DYNAMIC_SUPPORT
89# define DYNAMIC_ENGINE
90# endif
91#else
92# error "Only OpenSSL >= 0.9.7 is supported"
93#endif
94
95/* VIA PadLock AES is available *ONLY* on some x86 CPUs.
96 Not only that it doesn't exist elsewhere, but it
97 even can't be compiled on other platforms!
98
99 In addition, because of the heavy use of inline assembler,
100 compiler choice is limited to GCC and Microsoft C. */
101#undef COMPILE_HW_PADLOCK
102#if !defined(OPENSSL_NO_INLINE_ASM)
103# if (defined(__GNUC__) && (defined(__i386__) || defined(__i386)))
104# define COMPILE_HW_PADLOCK
105# endif
106#endif
107
108#ifdef OPENSSL_NO_DYNAMIC_ENGINE
109#ifdef COMPILE_HW_PADLOCK
110static ENGINE *ENGINE_padlock(void);
111#endif
112
113void
114ENGINE_load_padlock(void)
115{
116/* On non-x86 CPUs it just returns. */
117#ifdef COMPILE_HW_PADLOCK
118 ENGINE *toadd = ENGINE_padlock();
119
120 if (toadd == NULL)
121 return;
122 ENGINE_add(toadd);
123 ENGINE_free(toadd);
124 ERR_clear_error();
125#endif
126}
127
128#endif
129
130#ifdef COMPILE_HW_PADLOCK
131/* We do these includes here to avoid header problems on platforms that
132 do not have the VIA padlock anyway... */
133#include <stdlib.h>
134#if defined(__GNUC__)
135# ifndef alloca
136# define alloca(s) __builtin_alloca(s)
137# endif
138#endif
139
140/* Function for ENGINE detection and control */
141static int padlock_available(void);
142static int padlock_init(ENGINE *e);
143
144/* RNG Stuff */
145static RAND_METHOD padlock_rand;
146
147/* Cipher Stuff */
148#ifndef OPENSSL_NO_AES
149static int padlock_ciphers(ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid);
150#endif
151
152/* Engine names */
153static const char *padlock_id = "padlock";
154static char padlock_name[100];
155
156/* Available features */
157static int padlock_use_ace = 0; /* Advanced Cryptography Engine */
158static int padlock_use_rng = 0; /* Random Number Generator */
159#ifndef OPENSSL_NO_AES
160static int padlock_aes_align_required = 1;
161#endif
162
163/* ===== Engine "management" functions ===== */
164
165/* Prepare the ENGINE structure for registration */
166static int
167padlock_bind_helper(ENGINE *e)
168{
169 /* Check available features */
170 padlock_available();
171
172 /*
173 * RNG is currently disabled for reasons discussed in commentary just
174 * before padlock_rand_bytes function.
175 */
176 padlock_use_rng = 0;
177
178 /* Generate a nice engine name with available features */
179 (void) snprintf(padlock_name, sizeof(padlock_name),
180 "VIA PadLock (%s, %s)",
181 padlock_use_rng ? "RNG" : "no-RNG",
182 padlock_use_ace ? "ACE" : "no-ACE");
183
184 /* Register everything or return with an error */
185 if (!ENGINE_set_id(e, padlock_id) ||
186 !ENGINE_set_name(e, padlock_name) ||
187 !ENGINE_set_init_function(e, padlock_init) ||
188#ifndef OPENSSL_NO_AES
189 (padlock_use_ace && !ENGINE_set_ciphers (e, padlock_ciphers)) ||
190#endif
191 (padlock_use_rng && !ENGINE_set_RAND (e, &padlock_rand))) {
192 return 0;
193 }
194
195 /* Everything looks good */
196 return 1;
197}
198
199#ifdef OPENSSL_NO_DYNAMIC_ENGINE
200
201/* Constructor */
202static ENGINE *
203ENGINE_padlock(void)
204{
205 ENGINE *eng = ENGINE_new();
206
207 if (eng == NULL)
208 return NULL;
209
210 if (!padlock_bind_helper(eng)) {
211 ENGINE_free(eng);
212 return NULL;
213 }
214
215 return eng;
216}
217
218#endif
219
220/* Check availability of the engine */
221static int
222padlock_init(ENGINE *e)
223{
224 return (padlock_use_rng || padlock_use_ace);
225}
226
227/* This stuff is needed if this ENGINE is being compiled into a self-contained
228 * shared-library.
229 */
230#ifdef DYNAMIC_ENGINE
231static int
232padlock_bind_fn(ENGINE *e, const char *id)
233{
234 if (id && (strcmp(id, padlock_id) != 0)) {
235 return 0;
236 }
237
238 if (!padlock_bind_helper(e)) {
239 return 0;
240 }
241
242 return 1;
243}
244
245IMPLEMENT_DYNAMIC_CHECK_FN()
246IMPLEMENT_DYNAMIC_BIND_FN (padlock_bind_fn)
247#endif /* DYNAMIC_ENGINE */
248
249/* ===== Here comes the "real" engine ===== */
250
251#ifndef OPENSSL_NO_AES
252/* Some AES-related constants */
253#define AES_BLOCK_SIZE 16
254#define AES_KEY_SIZE_128 16
255#define AES_KEY_SIZE_192 24
256#define AES_KEY_SIZE_256 32
257
258/* Here we store the status information relevant to the
259 current context. */
260/* BIG FAT WARNING:
261 * Inline assembler in PADLOCK_XCRYPT_ASM()
262 * depends on the order of items in this structure.
263 * Don't blindly modify, reorder, etc!
264 */
265struct padlock_cipher_data {
266 unsigned char iv[AES_BLOCK_SIZE]; /* Initialization vector */
267 union {
268 unsigned int pad[4];
269 struct {
270 int rounds : 4;
271 int dgst : 1; /* n/a in C3 */
272 int align : 1; /* n/a in C3 */
273 int ciphr : 1; /* n/a in C3 */
274 unsigned int keygen : 1;
275 int interm : 1;
276 unsigned int encdec : 1;
277 int ksize : 2;
278 } b;
279 } cword; /* Control word */
280 AES_KEY ks; /* Encryption key */
281};
282
283/*
284 * Essentially this variable belongs in thread local storage.
285 * Having this variable global on the other hand can only cause
286 * few bogus key reloads [if any at all on single-CPU system],
287 * so we accept the penalty...
288 */
289static volatile struct padlock_cipher_data *padlock_saved_context;
290#endif
291
292/*
293 * =======================================================
294 * Inline assembler section(s).
295 * =======================================================
296 * Order of arguments is chosen to facilitate Windows port
297 * using __fastcall calling convention. If you wish to add
298 * more routines, keep in mind that first __fastcall
299 * argument is passed in %ecx and second - in %edx.
300 * =======================================================
301 */
302#if defined(__GNUC__) && __GNUC__>=2
303/*
304 * As for excessive "push %ebx"/"pop %ebx" found all over.
305 * When generating position-independent code GCC won't let
306 * us use "b" in assembler templates nor even respect "ebx"
307 * in "clobber description." Therefore the trouble...
308 */
309
310/* Helper function - check if a CPUID instruction
311 is available on this CPU */
312static int
313padlock_insn_cpuid_available(void)
314{
315 int result = -1;
316
317 /* We're checking if the bit #21 of EFLAGS
318 can be toggled. If yes = CPUID is available. */
319 asm volatile (
320 "pushf\n"
321 "popl %%eax\n"
322 "xorl $0x200000, %%eax\n"
323 "movl %%eax, %%ecx\n"
324 "andl $0x200000, %%ecx\n"
325 "pushl %%eax\n"
326 "popf\n"
327 "pushf\n"
328 "popl %%eax\n"
329 "andl $0x200000, %%eax\n"
330 "xorl %%eax, %%ecx\n"
331 "movl %%ecx, %0\n"
332 : "=r" (result) : : "eax", "ecx");
333
334 return (result == 0);
335}
336
337/* Load supported features of the CPU to see if
338 the PadLock is available. */
339static int
340padlock_available(void)
341{
342 char vendor_string[16];
343 unsigned int eax, edx;
344
345 /* First check if the CPUID instruction is available at all... */
346 if (! padlock_insn_cpuid_available())
347 return 0;
348
349 /* Are we running on the Centaur (VIA) CPU? */
350 eax = 0x00000000;
351 vendor_string[12] = 0;
352 asm volatile (
353 "pushl %%ebx\n"
354 "cpuid\n"
355 "movl %%ebx,(%%edi)\n"
356 "movl %%edx,4(%%edi)\n"
357 "movl %%ecx,8(%%edi)\n"
358 "popl %%ebx"
359 : "+a"(eax) : "D"(vendor_string) : "ecx", "edx");
360 if (strcmp(vendor_string, "CentaurHauls") != 0)
361 return 0;
362
363 /* Check for Centaur Extended Feature Flags presence */
364 eax = 0xC0000000;
365 asm volatile ("pushl %%ebx; cpuid; popl %%ebx"
366 : "+a"(eax) : : "ecx", "edx");
367 if (eax < 0xC0000001)
368 return 0;
369
370 /* Read the Centaur Extended Feature Flags */
371 eax = 0xC0000001;
372 asm volatile ("pushl %%ebx; cpuid; popl %%ebx"
373 : "+a"(eax), "=d"(edx) : : "ecx");
374
375 /* Fill up some flags */
376 padlock_use_ace = ((edx & (0x3 << 6)) == (0x3 << 6));
377 padlock_use_rng = ((edx & (0x3 << 2)) == (0x3 << 2));
378
379 return padlock_use_ace + padlock_use_rng;
380}
381
382#ifndef OPENSSL_NO_AES
383/* Our own htonl()/ntohl() */
384static inline void
385padlock_bswapl(AES_KEY *ks)
386{
387 size_t i = sizeof(ks->rd_key)/sizeof(ks->rd_key[0]);
388 unsigned int *key = ks->rd_key;
389
390 while (i--) {
391 asm volatile ("bswapl %0" : "+r"(*key));
392 key++;
393 }
394}
395#endif
396
397/* Force key reload from memory to the CPU microcode.
398 Loading EFLAGS from the stack clears EFLAGS[30]
399 which does the trick. */
400static inline void
401padlock_reload_key(void)
402{
403 asm volatile ("pushfl; popfl");
404}
405
406#ifndef OPENSSL_NO_AES
407/*
408 * This is heuristic key context tracing. At first one
409 * believes that one should use atomic swap instructions,
410 * but it's not actually necessary. Point is that if
411 * padlock_saved_context was changed by another thread
412 * after we've read it and before we compare it with cdata,
413 * our key *shall* be reloaded upon thread context switch
414 * and we are therefore set in either case...
415 */
416static inline void
417padlock_verify_context(struct padlock_cipher_data *cdata)
418{
419 asm volatile (
420 "pushfl\n"
421 " btl $30,(%%esp)\n"
422 " jnc 1f\n"
423 " cmpl %2,%1\n"
424 " je 1f\n"
425 " popfl\n"
426 " subl $4,%%esp\n"
427 "1: addl $4,%%esp\n"
428 " movl %2,%0"
429 :"+m"(padlock_saved_context)
430 : "r"(padlock_saved_context), "r"(cdata) : "cc");
431}
432
433/* Template for padlock_xcrypt_* modes */
434/* BIG FAT WARNING:
435 * The offsets used with 'leal' instructions
436 * describe items of the 'padlock_cipher_data'
437 * structure.
438 */
439#define PADLOCK_XCRYPT_ASM(name,rep_xcrypt) \
440static inline void *name(size_t cnt, \
441 struct padlock_cipher_data *cdata, \
442 void *out, const void *inp) \
443{ void *iv; \
444 asm volatile ( "pushl %%ebx\n" \
445 " leal 16(%0),%%edx\n" \
446 " leal 32(%0),%%ebx\n" \
447 rep_xcrypt "\n" \
448 " popl %%ebx" \
449 : "=a"(iv), "=c"(cnt), "=D"(out), "=S"(inp) \
450 : "0"(cdata), "1"(cnt), "2"(out), "3"(inp) \
451 : "edx", "cc", "memory"); \
452 return iv; \
453}
454
455/* Generate all functions with appropriate opcodes */
456PADLOCK_XCRYPT_ASM(padlock_xcrypt_ecb, ".byte 0xf3,0x0f,0xa7,0xc8") /* rep xcryptecb */
457PADLOCK_XCRYPT_ASM(padlock_xcrypt_cbc, ".byte 0xf3,0x0f,0xa7,0xd0") /* rep xcryptcbc */
458PADLOCK_XCRYPT_ASM(padlock_xcrypt_cfb, ".byte 0xf3,0x0f,0xa7,0xe0") /* rep xcryptcfb */
459PADLOCK_XCRYPT_ASM(padlock_xcrypt_ofb, ".byte 0xf3,0x0f,0xa7,0xe8") /* rep xcryptofb */
460#endif
461
462/* The RNG call itself */
463static inline unsigned int
464padlock_xstore(void *addr, unsigned int edx_in)
465{
466 unsigned int eax_out;
467
468 asm volatile (".byte 0x0f,0xa7,0xc0" /* xstore */
469 : "=a"(eax_out),"=m"(*(unsigned *)addr)
470 : "D"(addr), "d" (edx_in)
471 );
472
473 return eax_out;
474}
475
476/* Why not inline 'rep movsd'? I failed to find information on what
477 * value in Direction Flag one can expect and consequently have to
478 * apply "better-safe-than-sorry" approach and assume "undefined."
479 * I could explicitly clear it and restore the original value upon
480 * return from padlock_aes_cipher, but it's presumably too much
481 * trouble for too little gain...
482 *
483 * In case you wonder 'rep xcrypt*' instructions above are *not*
484 * affected by the Direction Flag and pointers advance toward
485 * larger addresses unconditionally.
486 */
487static inline unsigned char *
488padlock_memcpy(void *dst, const void *src, size_t n)
489{
490 long *d = dst;
491 const long *s = src;
492
493 n /= sizeof(*d);
494 do { *d++ = *s++;
495 } while (--n);
496
497 return dst;
498}
499#endif
500
501/* ===== AES encryption/decryption ===== */
502#ifndef OPENSSL_NO_AES
503
504#if defined(NID_aes_128_cfb128) && ! defined (NID_aes_128_cfb)
505#define NID_aes_128_cfb NID_aes_128_cfb128
506#endif
507
508#if defined(NID_aes_128_ofb128) && ! defined (NID_aes_128_ofb)
509#define NID_aes_128_ofb NID_aes_128_ofb128
510#endif
511
512#if defined(NID_aes_192_cfb128) && ! defined (NID_aes_192_cfb)
513#define NID_aes_192_cfb NID_aes_192_cfb128
514#endif
515
516#if defined(NID_aes_192_ofb128) && ! defined (NID_aes_192_ofb)
517#define NID_aes_192_ofb NID_aes_192_ofb128
518#endif
519
520#if defined(NID_aes_256_cfb128) && ! defined (NID_aes_256_cfb)
521#define NID_aes_256_cfb NID_aes_256_cfb128
522#endif
523
524#if defined(NID_aes_256_ofb128) && ! defined (NID_aes_256_ofb)
525#define NID_aes_256_ofb NID_aes_256_ofb128
526#endif
527
528/* List of supported ciphers. */
529static int padlock_cipher_nids[] = {
530 NID_aes_128_ecb,
531 NID_aes_128_cbc,
532 NID_aes_128_cfb,
533 NID_aes_128_ofb,
534
535 NID_aes_192_ecb,
536 NID_aes_192_cbc,
537 NID_aes_192_cfb,
538 NID_aes_192_ofb,
539
540 NID_aes_256_ecb,
541 NID_aes_256_cbc,
542 NID_aes_256_cfb,
543 NID_aes_256_ofb,
544};
545static int padlock_cipher_nids_num = (sizeof(padlock_cipher_nids)/
546sizeof(padlock_cipher_nids[0]));
547
548/* Function prototypes ... */
549static int padlock_aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
550 const unsigned char *iv, int enc);
551static int padlock_aes_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
552 const unsigned char *in, size_t nbytes);
553
554#define NEAREST_ALIGNED(ptr) ( (unsigned char *)(ptr) + \
555 ( (0x10 - ((size_t)(ptr) & 0x0F)) & 0x0F ) )
556#define ALIGNED_CIPHER_DATA(ctx) ((struct padlock_cipher_data *)\
557 NEAREST_ALIGNED(ctx->cipher_data))
558
559#define EVP_CIPHER_block_size_ECB AES_BLOCK_SIZE
560#define EVP_CIPHER_block_size_CBC AES_BLOCK_SIZE
561#define EVP_CIPHER_block_size_OFB 1
562#define EVP_CIPHER_block_size_CFB 1
563
564/* Declaring so many ciphers by hand would be a pain.
565 Instead introduce a bit of preprocessor magic :-) */
566#define DECLARE_AES_EVP(ksize,lmode,umode) \
567static const EVP_CIPHER padlock_aes_##ksize##_##lmode = { \
568 NID_aes_##ksize##_##lmode, \
569 EVP_CIPHER_block_size_##umode, \
570 AES_KEY_SIZE_##ksize, \
571 AES_BLOCK_SIZE, \
572 0 | EVP_CIPH_##umode##_MODE, \
573 padlock_aes_init_key, \
574 padlock_aes_cipher, \
575 NULL, \
576 sizeof(struct padlock_cipher_data) + 16, \
577 EVP_CIPHER_set_asn1_iv, \
578 EVP_CIPHER_get_asn1_iv, \
579 NULL, \
580 NULL \
581}
582
583DECLARE_AES_EVP(128, ecb, ECB);
584DECLARE_AES_EVP(128, cbc, CBC);
585DECLARE_AES_EVP(128, cfb, CFB);
586DECLARE_AES_EVP(128, ofb, OFB);
587
588DECLARE_AES_EVP(192, ecb, ECB);
589DECLARE_AES_EVP(192, cbc, CBC);
590DECLARE_AES_EVP(192, cfb, CFB);
591DECLARE_AES_EVP(192, ofb, OFB);
592
593DECLARE_AES_EVP(256, ecb, ECB);
594DECLARE_AES_EVP(256, cbc, CBC);
595DECLARE_AES_EVP(256, cfb, CFB);
596DECLARE_AES_EVP(256, ofb, OFB);
597
598static int
599padlock_ciphers(ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid)
600{
601 /* No specific cipher => return a list of supported nids ... */
602 if (!cipher) {
603 *nids = padlock_cipher_nids;
604 return padlock_cipher_nids_num;
605 }
606
607 /* ... or the requested "cipher" otherwise */
608 switch (nid) {
609 case NID_aes_128_ecb:
610 *cipher = &padlock_aes_128_ecb;
611 break;
612 case NID_aes_128_cbc:
613 *cipher = &padlock_aes_128_cbc;
614 break;
615 case NID_aes_128_cfb:
616 *cipher = &padlock_aes_128_cfb;
617 break;
618 case NID_aes_128_ofb:
619 *cipher = &padlock_aes_128_ofb;
620 break;
621 case NID_aes_192_ecb:
622 *cipher = &padlock_aes_192_ecb;
623 break;
624 case NID_aes_192_cbc:
625 *cipher = &padlock_aes_192_cbc;
626 break;
627 case NID_aes_192_cfb:
628 *cipher = &padlock_aes_192_cfb;
629 break;
630 case NID_aes_192_ofb:
631 *cipher = &padlock_aes_192_ofb;
632 break;
633 case NID_aes_256_ecb:
634 *cipher = &padlock_aes_256_ecb;
635 break;
636 case NID_aes_256_cbc:
637 *cipher = &padlock_aes_256_cbc;
638 break;
639 case NID_aes_256_cfb:
640 *cipher = &padlock_aes_256_cfb;
641 break;
642 case NID_aes_256_ofb:
643 *cipher = &padlock_aes_256_ofb;
644 break;
645 default:
646 /* Sorry, we don't support this NID */
647 *cipher = NULL;
648 return 0;
649 }
650
651 return 1;
652}
653
654/* Prepare the encryption key for PadLock usage */
655static int
656padlock_aes_init_key (EVP_CIPHER_CTX *ctx, const unsigned char *key,
657 const unsigned char *iv, int enc)
658{
659 struct padlock_cipher_data *cdata;
660 int key_len = EVP_CIPHER_CTX_key_length(ctx) * 8;
661
662 if (key == NULL)
663 return 0; /* ERROR */
664
665 cdata = ALIGNED_CIPHER_DATA(ctx);
666 memset(cdata, 0, sizeof(struct padlock_cipher_data));
667
668 /* Prepare Control word. */
669 if (EVP_CIPHER_CTX_mode(ctx) == EVP_CIPH_OFB_MODE)
670 cdata->cword.b.encdec = 0;
671 else
672 cdata->cword.b.encdec = (ctx->encrypt == 0);
673 cdata->cword.b.rounds = 10 + (key_len - 128) / 32;
674 cdata->cword.b.ksize = (key_len - 128) / 64;
675
676 switch (key_len) {
677 case 128:
678 /* PadLock can generate an extended key for
679 AES128 in hardware */
680 memcpy(cdata->ks.rd_key, key, AES_KEY_SIZE_128);
681 cdata->cword.b.keygen = 0;
682 break;
683
684 case 192:
685 case 256:
686 /* Generate an extended AES key in software.
687 Needed for AES192/AES256 */
688 /* Well, the above applies to Stepping 8 CPUs
689 and is listed as hardware errata. They most
690 likely will fix it at some point and then
691 a check for stepping would be due here. */
692 if (EVP_CIPHER_CTX_mode(ctx) == EVP_CIPH_CFB_MODE ||
693 EVP_CIPHER_CTX_mode(ctx) == EVP_CIPH_OFB_MODE ||
694 enc)
695 AES_set_encrypt_key(key, key_len, &cdata->ks);
696 else
697 AES_set_decrypt_key(key, key_len, &cdata->ks);
698#ifndef AES_ASM
699 /* OpenSSL C functions use byte-swapped extended key. */
700 padlock_bswapl(&cdata->ks);
701#endif
702 cdata->cword.b.keygen = 1;
703 break;
704
705 default:
706 /* ERROR */
707 return 0;
708 }
709
710 /*
711 * This is done to cover for cases when user reuses the
712 * context for new key. The catch is that if we don't do
713 * this, padlock_eas_cipher might proceed with old key...
714 */
715 padlock_reload_key ();
716
717 return 1;
718}
719
720/*
721 * Simplified version of padlock_aes_cipher() used when
722 * 1) both input and output buffers are at aligned addresses.
723 * or when
724 * 2) running on a newer CPU that doesn't require aligned buffers.
725 */
726static int
727padlock_aes_cipher_omnivorous(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
728 const unsigned char *in_arg, size_t nbytes)
729{
730 struct padlock_cipher_data *cdata;
731 void *iv;
732
733 cdata = ALIGNED_CIPHER_DATA(ctx);
734 padlock_verify_context(cdata);
735
736 switch (EVP_CIPHER_CTX_mode(ctx)) {
737 case EVP_CIPH_ECB_MODE:
738 padlock_xcrypt_ecb(nbytes / AES_BLOCK_SIZE, cdata,
739 out_arg, in_arg);
740 break;
741
742 case EVP_CIPH_CBC_MODE:
743 memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE);
744 iv = padlock_xcrypt_cbc(nbytes / AES_BLOCK_SIZE, cdata,
745 out_arg, in_arg);
746 memcpy(ctx->iv, iv, AES_BLOCK_SIZE);
747 break;
748
749 case EVP_CIPH_CFB_MODE:
750 memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE);
751 iv = padlock_xcrypt_cfb(nbytes / AES_BLOCK_SIZE, cdata,
752 out_arg, in_arg);
753 memcpy(ctx->iv, iv, AES_BLOCK_SIZE);
754 break;
755
756 case EVP_CIPH_OFB_MODE:
757 memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE);
758 padlock_xcrypt_ofb(nbytes / AES_BLOCK_SIZE, cdata,
759 out_arg, in_arg);
760 memcpy(ctx->iv, cdata->iv, AES_BLOCK_SIZE);
761 break;
762
763 default:
764 return 0;
765 }
766
767 memset(cdata->iv, 0, AES_BLOCK_SIZE);
768
769 return 1;
770}
771
772#ifndef PADLOCK_CHUNK
773# define PADLOCK_CHUNK 512 /* Must be a power of 2 larger than 16 */
774#endif
775#if PADLOCK_CHUNK<16 || PADLOCK_CHUNK&(PADLOCK_CHUNK-1)
776# error "insane PADLOCK_CHUNK..."
777#endif
778
779/* Re-align the arguments to 16-Bytes boundaries and run the
780 encryption function itself. This function is not AES-specific. */
781static int
782padlock_aes_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
783 const unsigned char *in_arg, size_t nbytes)
784{
785 struct padlock_cipher_data *cdata;
786 const void *inp;
787 unsigned char *out;
788 void *iv;
789 int inp_misaligned, out_misaligned, realign_in_loop;
790 size_t chunk, allocated = 0;
791
792 /* ctx->num is maintained in byte-oriented modes,
793 such as CFB and OFB... */
794 if ((chunk = ctx->num)) {
795 /* borrow chunk variable */
796 unsigned char *ivp = ctx->iv;
797
798 switch (EVP_CIPHER_CTX_mode(ctx)) {
799 case EVP_CIPH_CFB_MODE:
800 if (chunk >= AES_BLOCK_SIZE)
801 return 0; /* bogus value */
802
803 if (ctx->encrypt)
804 while (chunk < AES_BLOCK_SIZE && nbytes != 0) {
805 ivp[chunk] = *(out_arg++) = *(in_arg++) ^ ivp[chunk];
806 chunk++, nbytes--;
807 }
808 else
809 while (chunk < AES_BLOCK_SIZE && nbytes != 0) {
810 unsigned char c = *(in_arg++);
811 *(out_arg++) = c ^ ivp[chunk];
812 ivp[chunk++] = c, nbytes--;
813 }
814
815 ctx->num = chunk % AES_BLOCK_SIZE;
816 break;
817 case EVP_CIPH_OFB_MODE:
818 if (chunk >= AES_BLOCK_SIZE)
819 return 0; /* bogus value */
820
821 while (chunk < AES_BLOCK_SIZE && nbytes != 0) {
822 *(out_arg++) = *(in_arg++) ^ ivp[chunk];
823 chunk++, nbytes--;
824 }
825
826 ctx->num = chunk % AES_BLOCK_SIZE;
827 break;
828 }
829 }
830
831 if (nbytes == 0)
832 return 1;
833#if 0
834 if (nbytes % AES_BLOCK_SIZE)
835 return 0; /* are we expected to do tail processing? */
836#else
837 /* nbytes is always multiple of AES_BLOCK_SIZE in ECB and CBC
838 modes and arbitrary value in byte-oriented modes, such as
839 CFB and OFB... */
840#endif
841
842 /* VIA promises CPUs that won't require alignment in the future.
843 For now padlock_aes_align_required is initialized to 1 and
844 the condition is never met... */
845 /* C7 core is capable to manage unaligned input in non-ECB[!]
846 mode, but performance penalties appear to be approximately
847 same as for software alignment below or ~3x. They promise to
848 improve it in the future, but for now we can just as well
849 pretend that it can only handle aligned input... */
850 if (!padlock_aes_align_required && (nbytes % AES_BLOCK_SIZE) == 0)
851 return padlock_aes_cipher_omnivorous(ctx, out_arg, in_arg,
852 nbytes);
853
854 inp_misaligned = (((size_t)in_arg) & 0x0F);
855 out_misaligned = (((size_t)out_arg) & 0x0F);
856
857 /* Note that even if output is aligned and input not,
858 * I still prefer to loop instead of copy the whole
859 * input and then encrypt in one stroke. This is done
860 * in order to improve L1 cache utilization... */
861 realign_in_loop = out_misaligned|inp_misaligned;
862
863 if (!realign_in_loop && (nbytes % AES_BLOCK_SIZE) == 0)
864 return padlock_aes_cipher_omnivorous(ctx, out_arg, in_arg,
865 nbytes);
866
867 /* this takes one "if" out of the loops */
868 chunk = nbytes;
869 chunk %= PADLOCK_CHUNK;
870 if (chunk == 0)
871 chunk = PADLOCK_CHUNK;
872
873 if (out_misaligned) {
874 /* optimize for small input */
875 allocated = (chunk < nbytes ? PADLOCK_CHUNK : nbytes);
876 out = alloca(0x10 + allocated);
877 out = NEAREST_ALIGNED(out);
878 } else
879 out = out_arg;
880
881 cdata = ALIGNED_CIPHER_DATA(ctx);
882 padlock_verify_context(cdata);
883
884 switch (EVP_CIPHER_CTX_mode(ctx)) {
885 case EVP_CIPH_ECB_MODE:
886 do {
887 if (inp_misaligned)
888 inp = padlock_memcpy(out, in_arg, chunk);
889 else
890 inp = in_arg;
891 in_arg += chunk;
892
893 padlock_xcrypt_ecb(chunk / AES_BLOCK_SIZE, cdata,
894 out, inp);
895
896 if (out_misaligned)
897 out_arg = padlock_memcpy(out_arg, out, chunk) +
898 chunk;
899 else
900 out = out_arg += chunk;
901
902 nbytes -= chunk;
903 chunk = PADLOCK_CHUNK;
904 } while (nbytes);
905 break;
906
907 case EVP_CIPH_CBC_MODE:
908 memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE);
909 goto cbc_shortcut;
910 do {
911 if (iv != cdata->iv)
912 memcpy(cdata->iv, iv, AES_BLOCK_SIZE);
913 chunk = PADLOCK_CHUNK;
914 cbc_shortcut: /* optimize for small input */
915 if (inp_misaligned)
916 inp = padlock_memcpy(out, in_arg, chunk);
917 else
918 inp = in_arg;
919 in_arg += chunk;
920
921 iv = padlock_xcrypt_cbc(chunk / AES_BLOCK_SIZE, cdata,
922 out, inp);
923
924 if (out_misaligned)
925 out_arg = padlock_memcpy(out_arg, out, chunk) +
926 chunk;
927 else
928 out = out_arg += chunk;
929 } while (nbytes -= chunk);
930 memcpy(ctx->iv, iv, AES_BLOCK_SIZE);
931 break;
932
933 case EVP_CIPH_CFB_MODE:
934 memcpy (iv = cdata->iv, ctx->iv, AES_BLOCK_SIZE);
935 chunk &= ~(AES_BLOCK_SIZE - 1);
936 if (chunk)
937 goto cfb_shortcut;
938 else
939 goto cfb_skiploop;
940 do {
941 if (iv != cdata->iv)
942 memcpy(cdata->iv, iv, AES_BLOCK_SIZE);
943 chunk = PADLOCK_CHUNK;
944 cfb_shortcut: /* optimize for small input */
945 if (inp_misaligned)
946 inp = padlock_memcpy(out, in_arg, chunk);
947 else
948 inp = in_arg;
949 in_arg += chunk;
950
951 iv = padlock_xcrypt_cfb(chunk / AES_BLOCK_SIZE, cdata,
952 out, inp);
953
954 if (out_misaligned)
955 out_arg = padlock_memcpy(out_arg, out, chunk) +
956 chunk;
957 else
958 out = out_arg += chunk;
959
960 nbytes -= chunk;
961 } while (nbytes >= AES_BLOCK_SIZE);
962
963cfb_skiploop:
964 if (nbytes) {
965 unsigned char *ivp = cdata->iv;
966
967 if (iv != ivp) {
968 memcpy(ivp, iv, AES_BLOCK_SIZE);
969 iv = ivp;
970 }
971 ctx->num = nbytes;
972 if (cdata->cword.b.encdec) {
973 cdata->cword.b.encdec = 0;
974 padlock_reload_key();
975 padlock_xcrypt_ecb(1, cdata, ivp, ivp);
976 cdata->cword.b.encdec = 1;
977 padlock_reload_key();
978 while (nbytes) {
979 unsigned char c = *(in_arg++);
980 *(out_arg++) = c ^ *ivp;
981 *(ivp++) = c, nbytes--;
982 }
983 } else {
984 padlock_reload_key();
985 padlock_xcrypt_ecb(1, cdata, ivp, ivp);
986 padlock_reload_key();
987 while (nbytes) {
988 *ivp = *(out_arg++) = *(in_arg++) ^ *ivp;
989 ivp++, nbytes--;
990 }
991 }
992 }
993
994 memcpy(ctx->iv, iv, AES_BLOCK_SIZE);
995 break;
996
997 case EVP_CIPH_OFB_MODE:
998 memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE);
999 chunk &= ~(AES_BLOCK_SIZE - 1);
1000 if (chunk) do {
1001 if (inp_misaligned)
1002 inp = padlock_memcpy(out, in_arg, chunk);
1003 else
1004 inp = in_arg;
1005 in_arg += chunk;
1006
1007 padlock_xcrypt_ofb(chunk / AES_BLOCK_SIZE, cdata,
1008 out, inp);
1009
1010 if (out_misaligned)
1011 out_arg = padlock_memcpy(out_arg, out, chunk) +
1012 chunk;
1013 else
1014 out = out_arg += chunk;
1015
1016 nbytes -= chunk;
1017 chunk = PADLOCK_CHUNK;
1018 } while (nbytes >= AES_BLOCK_SIZE);
1019
1020 if (nbytes) {
1021 unsigned char *ivp = cdata->iv;
1022
1023 ctx->num = nbytes;
1024 padlock_reload_key(); /* empirically found */
1025 padlock_xcrypt_ecb(1, cdata, ivp, ivp);
1026 padlock_reload_key(); /* empirically found */
1027 while (nbytes) {
1028 *(out_arg++) = *(in_arg++) ^ *ivp;
1029 ivp++, nbytes--;
1030 }
1031 }
1032
1033 memcpy(ctx->iv, cdata->iv, AES_BLOCK_SIZE);
1034 break;
1035
1036 default:
1037 return 0;
1038 }
1039
1040 /* Clean the realign buffer if it was used */
1041 if (out_misaligned) {
1042 volatile unsigned long *p = (void *)out;
1043 size_t n = allocated/sizeof(*p);
1044 while (n--)
1045 *p++ = 0;
1046 }
1047
1048 memset(cdata->iv, 0, AES_BLOCK_SIZE);
1049
1050 return 1;
1051}
1052
1053#endif /* OPENSSL_NO_AES */
1054
1055/* ===== Random Number Generator ===== */
1056/*
1057 * This code is not engaged. The reason is that it does not comply
1058 * with recommendations for VIA RNG usage for secure applications
1059 * (posted at http://www.via.com.tw/en/viac3/c3.jsp) nor does it
1060 * provide meaningful error control...
1061 */
1062/* Wrapper that provides an interface between the API and
1063 the raw PadLock RNG */
1064static int
1065padlock_rand_bytes(unsigned char *output, int count)
1066{
1067 unsigned int eax, buf;
1068
1069 while (count >= 8) {
1070 eax = padlock_xstore(output, 0);
1071 if (!(eax & (1 << 6)))
1072 return 0; /* RNG disabled */
1073 /* this ---vv--- covers DC bias, Raw Bits and String Filter */
1074 if (eax & (0x1F << 10))
1075 return 0;
1076 if ((eax & 0x1F) == 0)
1077 continue; /* no data, retry... */
1078 if ((eax & 0x1F) != 8)
1079 return 0; /* fatal failure... */
1080 output += 8;
1081 count -= 8;
1082 }
1083 while (count > 0) {
1084 eax = padlock_xstore(&buf, 3);
1085 if (!(eax & (1 << 6)))
1086 return 0; /* RNG disabled */
1087 /* this ---vv--- covers DC bias, Raw Bits and String Filter */
1088 if (eax & (0x1F << 10))
1089 return 0;
1090 if ((eax & 0x1F) == 0)
1091 continue; /* no data, retry... */
1092 if ((eax & 0x1F) != 1)
1093 return 0; /* fatal failure... */
1094 *output++ = (unsigned char)buf;
1095 count--;
1096 }
1097 *(volatile unsigned int *)&buf = 0;
1098
1099 return 1;
1100}
1101
1102/* Dummy but necessary function */
1103static int
1104padlock_rand_status(void)
1105{
1106 return 1;
1107}
1108
1109/* Prepare structure for registration */
1110static RAND_METHOD padlock_rand = {
1111 .bytes = padlock_rand_bytes,
1112 .pseudorand = padlock_rand_bytes,
1113 .status = padlock_rand_status
1114};
1115
1116#else /* !COMPILE_HW_PADLOCK */
1117#ifndef OPENSSL_NO_DYNAMIC_ENGINE
1118extern int bind_engine(ENGINE *e, const char *id, const dynamic_fns *fns);
1119extern int
1120bind_engine(ENGINE *e, const char *id, const dynamic_fns *fns) {
1121 return 0;
1122}
1123IMPLEMENT_DYNAMIC_CHECK_FN()
1124#endif
1125#endif /* COMPILE_HW_PADLOCK */
1126
1127#endif /* !OPENSSL_NO_HW_PADLOCK */
1128#endif /* !OPENSSL_NO_HW */
diff --git a/src/lib/libcrypto/engine/eng_padlock.ec b/src/lib/libcrypto/engine/eng_padlock.ec
deleted file mode 100644
index a0e7cbd60d..0000000000
--- a/src/lib/libcrypto/engine/eng_padlock.ec
+++ /dev/null
@@ -1 +0,0 @@
1L PADLOCK eng_padlock_err.h eng_padlock_err.c