summaryrefslogtreecommitdiff
path: root/src/lib/libcrypto/sha/sha1.c
diff options
context:
space:
mode:
authorcvs2svn <admin@example.com>2025-04-14 17:32:06 +0000
committercvs2svn <admin@example.com>2025-04-14 17:32:06 +0000
commiteb8dd9dca1228af0cd132f515509051ecfabf6f6 (patch)
treeedb6da6af7e865d488dc1a29309f1e1ec226e603 /src/lib/libcrypto/sha/sha1.c
parent247f0352e0ed72a4f476db9dc91f4d982bc83eb2 (diff)
downloadopenbsd-tb_20250414.tar.gz
openbsd-tb_20250414.tar.bz2
openbsd-tb_20250414.zip
This commit was manufactured by cvs2git to create tag 'tb_20250414'.tb_20250414
Diffstat (limited to 'src/lib/libcrypto/sha/sha1.c')
-rw-r--r--src/lib/libcrypto/sha/sha1.c518
1 files changed, 0 insertions, 518 deletions
diff --git a/src/lib/libcrypto/sha/sha1.c b/src/lib/libcrypto/sha/sha1.c
deleted file mode 100644
index ab05709818..0000000000
--- a/src/lib/libcrypto/sha/sha1.c
+++ /dev/null
@@ -1,518 +0,0 @@
1/* $OpenBSD: sha1.c,v 1.16 2025/02/14 12:01:58 jsing Exp $ */
2/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
3 * All rights reserved.
4 *
5 * This package is an SSL implementation written
6 * by Eric Young (eay@cryptsoft.com).
7 * The implementation was written so as to conform with Netscapes SSL.
8 *
9 * This library is free for commercial and non-commercial use as long as
10 * the following conditions are aheared to. The following conditions
11 * apply to all code found in this distribution, be it the RC4, RSA,
12 * lhash, DES, etc., code; not just the SSL code. The SSL documentation
13 * included with this distribution is covered by the same copyright terms
14 * except that the holder is Tim Hudson (tjh@cryptsoft.com).
15 *
16 * Copyright remains Eric Young's, and as such any Copyright notices in
17 * the code are not to be removed.
18 * If this package is used in a product, Eric Young should be given attribution
19 * as the author of the parts of the library used.
20 * This can be in the form of a textual message at program startup or
21 * in documentation (online or textual) provided with the package.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 * 1. Redistributions of source code must retain the copyright
27 * notice, this list of conditions and the following disclaimer.
28 * 2. Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in the
30 * documentation and/or other materials provided with the distribution.
31 * 3. All advertising materials mentioning features or use of this software
32 * must display the following acknowledgement:
33 * "This product includes cryptographic software written by
34 * Eric Young (eay@cryptsoft.com)"
35 * The word 'cryptographic' can be left out if the rouines from the library
36 * being used are not cryptographic related :-).
37 * 4. If you include any Windows specific code (or a derivative thereof) from
38 * the apps directory (application code) you must include an acknowledgement:
39 * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
40 *
41 * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * SUCH DAMAGE.
52 *
53 * The licence and distribution terms for any publically available version or
54 * derivative of this code cannot be changed. i.e. this code cannot simply be
55 * copied and put under another distribution licence
56 * [including the GNU Public Licence.]
57 */
58
59#include <stdlib.h>
60#include <string.h>
61
62#include <openssl/opensslconf.h>
63
64#include <openssl/crypto.h>
65#include <openssl/sha.h>
66
67#include "crypto_internal.h"
68
69#if !defined(OPENSSL_NO_SHA1) && !defined(OPENSSL_NO_SHA)
70
71/* Ensure that SHA_LONG and uint32_t are equivalent sizes. */
72CTASSERT(sizeof(SHA_LONG) == sizeof(uint32_t));
73
74void sha1_block_data_order(SHA_CTX *ctx, const void *p, size_t num);
75void sha1_block_generic(SHA_CTX *ctx, const void *p, size_t num);
76
77#ifndef HAVE_SHA1_BLOCK_GENERIC
78static inline SHA_LONG
79Ch(SHA_LONG x, SHA_LONG y, SHA_LONG z)
80{
81 return (x & y) ^ (~x & z);
82}
83
84static inline SHA_LONG
85Parity(SHA_LONG x, SHA_LONG y, SHA_LONG z)
86{
87 return x ^ y ^ z;
88}
89
90static inline SHA_LONG
91Maj(SHA_LONG x, SHA_LONG y, SHA_LONG z)
92{
93 return (x & y) ^ (x & z) ^ (y & z);
94}
95
96static inline void
97sha1_msg_schedule_update(SHA_LONG *W0, SHA_LONG W2, SHA_LONG W8, SHA_LONG W13)
98{
99 *W0 = crypto_rol_u32(W13 ^ W8 ^ W2 ^ *W0, 1);
100}
101
102static inline void
103sha1_round1(SHA_LONG *a, SHA_LONG *b, SHA_LONG *c, SHA_LONG *d, SHA_LONG *e,
104 SHA_LONG Wt)
105{
106 SHA_LONG Kt, T;
107
108 Kt = 0x5a827999UL;
109 T = crypto_rol_u32(*a, 5) + Ch(*b, *c, *d) + *e + Kt + Wt;
110
111 *e = *d;
112 *d = *c;
113 *c = crypto_rol_u32(*b, 30);
114 *b = *a;
115 *a = T;
116}
117
118static inline void
119sha1_round2(SHA_LONG *a, SHA_LONG *b, SHA_LONG *c, SHA_LONG *d, SHA_LONG *e,
120 SHA_LONG Wt)
121{
122 SHA_LONG Kt, T;
123
124 Kt = 0x6ed9eba1UL;
125 T = crypto_rol_u32(*a, 5) + Parity(*b, *c, *d) + *e + Kt + Wt;
126
127 *e = *d;
128 *d = *c;
129 *c = crypto_rol_u32(*b, 30);
130 *b = *a;
131 *a = T;
132}
133
134static inline void
135sha1_round3(SHA_LONG *a, SHA_LONG *b, SHA_LONG *c, SHA_LONG *d, SHA_LONG *e,
136 SHA_LONG Wt)
137{
138 SHA_LONG Kt, T;
139
140 Kt = 0x8f1bbcdcUL;
141 T = crypto_rol_u32(*a, 5) + Maj(*b, *c, *d) + *e + Kt + Wt;
142
143 *e = *d;
144 *d = *c;
145 *c = crypto_rol_u32(*b, 30);
146 *b = *a;
147 *a = T;
148}
149
150static inline void
151sha1_round4(SHA_LONG *a, SHA_LONG *b, SHA_LONG *c, SHA_LONG *d, SHA_LONG *e,
152 SHA_LONG Wt)
153{
154 SHA_LONG Kt, T;
155
156 Kt = 0xca62c1d6UL;
157 T = crypto_rol_u32(*a, 5) + Parity(*b, *c, *d) + *e + Kt + Wt;
158
159 *e = *d;
160 *d = *c;
161 *c = crypto_rol_u32(*b, 30);
162 *b = *a;
163 *a = T;
164}
165
166void
167sha1_block_generic(SHA_CTX *ctx, const void *_in, size_t num)
168{
169 const uint8_t *in = _in;
170 const SHA_LONG *in32;
171 unsigned int a, b, c, d, e;
172 unsigned int X0, X1, X2, X3, X4, X5, X6, X7,
173 X8, X9, X10, X11, X12, X13, X14, X15;
174
175 while (num--) {
176 a = ctx->h0;
177 b = ctx->h1;
178 c = ctx->h2;
179 d = ctx->h3;
180 e = ctx->h4;
181
182 if ((size_t)in % 4 == 0) {
183 /* Input is 32 bit aligned. */
184 in32 = (const SHA_LONG *)in;
185 X0 = be32toh(in32[0]);
186 X1 = be32toh(in32[1]);
187 X2 = be32toh(in32[2]);
188 X3 = be32toh(in32[3]);
189 X4 = be32toh(in32[4]);
190 X5 = be32toh(in32[5]);
191 X6 = be32toh(in32[6]);
192 X7 = be32toh(in32[7]);
193 X8 = be32toh(in32[8]);
194 X9 = be32toh(in32[9]);
195 X10 = be32toh(in32[10]);
196 X11 = be32toh(in32[11]);
197 X12 = be32toh(in32[12]);
198 X13 = be32toh(in32[13]);
199 X14 = be32toh(in32[14]);
200 X15 = be32toh(in32[15]);
201 } else {
202 /* Input is not 32 bit aligned. */
203 X0 = crypto_load_be32toh(&in[0 * 4]);
204 X1 = crypto_load_be32toh(&in[1 * 4]);
205 X2 = crypto_load_be32toh(&in[2 * 4]);
206 X3 = crypto_load_be32toh(&in[3 * 4]);
207 X4 = crypto_load_be32toh(&in[4 * 4]);
208 X5 = crypto_load_be32toh(&in[5 * 4]);
209 X6 = crypto_load_be32toh(&in[6 * 4]);
210 X7 = crypto_load_be32toh(&in[7 * 4]);
211 X8 = crypto_load_be32toh(&in[8 * 4]);
212 X9 = crypto_load_be32toh(&in[9 * 4]);
213 X10 = crypto_load_be32toh(&in[10 * 4]);
214 X11 = crypto_load_be32toh(&in[11 * 4]);
215 X12 = crypto_load_be32toh(&in[12 * 4]);
216 X13 = crypto_load_be32toh(&in[13 * 4]);
217 X14 = crypto_load_be32toh(&in[14 * 4]);
218 X15 = crypto_load_be32toh(&in[15 * 4]);
219 }
220 in += SHA_CBLOCK;
221
222 sha1_round1(&a, &b, &c, &d, &e, X0);
223 sha1_round1(&a, &b, &c, &d, &e, X1);
224 sha1_round1(&a, &b, &c, &d, &e, X2);
225 sha1_round1(&a, &b, &c, &d, &e, X3);
226 sha1_round1(&a, &b, &c, &d, &e, X4);
227 sha1_round1(&a, &b, &c, &d, &e, X5);
228 sha1_round1(&a, &b, &c, &d, &e, X6);
229 sha1_round1(&a, &b, &c, &d, &e, X7);
230 sha1_round1(&a, &b, &c, &d, &e, X8);
231 sha1_round1(&a, &b, &c, &d, &e, X9);
232 sha1_round1(&a, &b, &c, &d, &e, X10);
233 sha1_round1(&a, &b, &c, &d, &e, X11);
234 sha1_round1(&a, &b, &c, &d, &e, X12);
235 sha1_round1(&a, &b, &c, &d, &e, X13);
236 sha1_round1(&a, &b, &c, &d, &e, X14);
237 sha1_round1(&a, &b, &c, &d, &e, X15);
238
239 sha1_msg_schedule_update(&X0, X2, X8, X13);
240 sha1_msg_schedule_update(&X1, X3, X9, X14);
241 sha1_msg_schedule_update(&X2, X4, X10, X15);
242 sha1_msg_schedule_update(&X3, X5, X11, X0);
243 sha1_msg_schedule_update(&X4, X6, X12, X1);
244 sha1_msg_schedule_update(&X5, X7, X13, X2);
245 sha1_msg_schedule_update(&X6, X8, X14, X3);
246 sha1_msg_schedule_update(&X7, X9, X15, X4);
247 sha1_msg_schedule_update(&X8, X10, X0, X5);
248 sha1_msg_schedule_update(&X9, X11, X1, X6);
249 sha1_msg_schedule_update(&X10, X12, X2, X7);
250 sha1_msg_schedule_update(&X11, X13, X3, X8);
251 sha1_msg_schedule_update(&X12, X14, X4, X9);
252 sha1_msg_schedule_update(&X13, X15, X5, X10);
253 sha1_msg_schedule_update(&X14, X0, X6, X11);
254 sha1_msg_schedule_update(&X15, X1, X7, X12);
255
256 sha1_round1(&a, &b, &c, &d, &e, X0);
257 sha1_round1(&a, &b, &c, &d, &e, X1);
258 sha1_round1(&a, &b, &c, &d, &e, X2);
259 sha1_round1(&a, &b, &c, &d, &e, X3);
260 sha1_round2(&a, &b, &c, &d, &e, X4);
261 sha1_round2(&a, &b, &c, &d, &e, X5);
262 sha1_round2(&a, &b, &c, &d, &e, X6);
263 sha1_round2(&a, &b, &c, &d, &e, X7);
264 sha1_round2(&a, &b, &c, &d, &e, X8);
265 sha1_round2(&a, &b, &c, &d, &e, X9);
266 sha1_round2(&a, &b, &c, &d, &e, X10);
267 sha1_round2(&a, &b, &c, &d, &e, X11);
268 sha1_round2(&a, &b, &c, &d, &e, X12);
269 sha1_round2(&a, &b, &c, &d, &e, X13);
270 sha1_round2(&a, &b, &c, &d, &e, X14);
271 sha1_round2(&a, &b, &c, &d, &e, X15);
272
273 sha1_msg_schedule_update(&X0, X2, X8, X13);
274 sha1_msg_schedule_update(&X1, X3, X9, X14);
275 sha1_msg_schedule_update(&X2, X4, X10, X15);
276 sha1_msg_schedule_update(&X3, X5, X11, X0);
277 sha1_msg_schedule_update(&X4, X6, X12, X1);
278 sha1_msg_schedule_update(&X5, X7, X13, X2);
279 sha1_msg_schedule_update(&X6, X8, X14, X3);
280 sha1_msg_schedule_update(&X7, X9, X15, X4);
281 sha1_msg_schedule_update(&X8, X10, X0, X5);
282 sha1_msg_schedule_update(&X9, X11, X1, X6);
283 sha1_msg_schedule_update(&X10, X12, X2, X7);
284 sha1_msg_schedule_update(&X11, X13, X3, X8);
285 sha1_msg_schedule_update(&X12, X14, X4, X9);
286 sha1_msg_schedule_update(&X13, X15, X5, X10);
287 sha1_msg_schedule_update(&X14, X0, X6, X11);
288 sha1_msg_schedule_update(&X15, X1, X7, X12);
289
290 sha1_round2(&a, &b, &c, &d, &e, X0);
291 sha1_round2(&a, &b, &c, &d, &e, X1);
292 sha1_round2(&a, &b, &c, &d, &e, X2);
293 sha1_round2(&a, &b, &c, &d, &e, X3);
294 sha1_round2(&a, &b, &c, &d, &e, X4);
295 sha1_round2(&a, &b, &c, &d, &e, X5);
296 sha1_round2(&a, &b, &c, &d, &e, X6);
297 sha1_round2(&a, &b, &c, &d, &e, X7);
298 sha1_round3(&a, &b, &c, &d, &e, X8);
299 sha1_round3(&a, &b, &c, &d, &e, X9);
300 sha1_round3(&a, &b, &c, &d, &e, X10);
301 sha1_round3(&a, &b, &c, &d, &e, X11);
302 sha1_round3(&a, &b, &c, &d, &e, X12);
303 sha1_round3(&a, &b, &c, &d, &e, X13);
304 sha1_round3(&a, &b, &c, &d, &e, X14);
305 sha1_round3(&a, &b, &c, &d, &e, X15);
306
307 sha1_msg_schedule_update(&X0, X2, X8, X13);
308 sha1_msg_schedule_update(&X1, X3, X9, X14);
309 sha1_msg_schedule_update(&X2, X4, X10, X15);
310 sha1_msg_schedule_update(&X3, X5, X11, X0);
311 sha1_msg_schedule_update(&X4, X6, X12, X1);
312 sha1_msg_schedule_update(&X5, X7, X13, X2);
313 sha1_msg_schedule_update(&X6, X8, X14, X3);
314 sha1_msg_schedule_update(&X7, X9, X15, X4);
315 sha1_msg_schedule_update(&X8, X10, X0, X5);
316 sha1_msg_schedule_update(&X9, X11, X1, X6);
317 sha1_msg_schedule_update(&X10, X12, X2, X7);
318 sha1_msg_schedule_update(&X11, X13, X3, X8);
319 sha1_msg_schedule_update(&X12, X14, X4, X9);
320 sha1_msg_schedule_update(&X13, X15, X5, X10);
321 sha1_msg_schedule_update(&X14, X0, X6, X11);
322 sha1_msg_schedule_update(&X15, X1, X7, X12);
323
324 sha1_round3(&a, &b, &c, &d, &e, X0);
325 sha1_round3(&a, &b, &c, &d, &e, X1);
326 sha1_round3(&a, &b, &c, &d, &e, X2);
327 sha1_round3(&a, &b, &c, &d, &e, X3);
328 sha1_round3(&a, &b, &c, &d, &e, X4);
329 sha1_round3(&a, &b, &c, &d, &e, X5);
330 sha1_round3(&a, &b, &c, &d, &e, X6);
331 sha1_round3(&a, &b, &c, &d, &e, X7);
332 sha1_round3(&a, &b, &c, &d, &e, X8);
333 sha1_round3(&a, &b, &c, &d, &e, X9);
334 sha1_round3(&a, &b, &c, &d, &e, X10);
335 sha1_round3(&a, &b, &c, &d, &e, X11);
336 sha1_round4(&a, &b, &c, &d, &e, X12);
337 sha1_round4(&a, &b, &c, &d, &e, X13);
338 sha1_round4(&a, &b, &c, &d, &e, X14);
339 sha1_round4(&a, &b, &c, &d, &e, X15);
340
341 sha1_msg_schedule_update(&X0, X2, X8, X13);
342 sha1_msg_schedule_update(&X1, X3, X9, X14);
343 sha1_msg_schedule_update(&X2, X4, X10, X15);
344 sha1_msg_schedule_update(&X3, X5, X11, X0);
345 sha1_msg_schedule_update(&X4, X6, X12, X1);
346 sha1_msg_schedule_update(&X5, X7, X13, X2);
347 sha1_msg_schedule_update(&X6, X8, X14, X3);
348 sha1_msg_schedule_update(&X7, X9, X15, X4);
349 sha1_msg_schedule_update(&X8, X10, X0, X5);
350 sha1_msg_schedule_update(&X9, X11, X1, X6);
351 sha1_msg_schedule_update(&X10, X12, X2, X7);
352 sha1_msg_schedule_update(&X11, X13, X3, X8);
353 sha1_msg_schedule_update(&X12, X14, X4, X9);
354 sha1_msg_schedule_update(&X13, X15, X5, X10);
355 sha1_msg_schedule_update(&X14, X0, X6, X11);
356 sha1_msg_schedule_update(&X15, X1, X7, X12);
357
358 sha1_round4(&a, &b, &c, &d, &e, X0);
359 sha1_round4(&a, &b, &c, &d, &e, X1);
360 sha1_round4(&a, &b, &c, &d, &e, X2);
361 sha1_round4(&a, &b, &c, &d, &e, X3);
362 sha1_round4(&a, &b, &c, &d, &e, X4);
363 sha1_round4(&a, &b, &c, &d, &e, X5);
364 sha1_round4(&a, &b, &c, &d, &e, X6);
365 sha1_round4(&a, &b, &c, &d, &e, X7);
366 sha1_round4(&a, &b, &c, &d, &e, X8);
367 sha1_round4(&a, &b, &c, &d, &e, X9);
368 sha1_round4(&a, &b, &c, &d, &e, X10);
369 sha1_round4(&a, &b, &c, &d, &e, X11);
370 sha1_round4(&a, &b, &c, &d, &e, X12);
371 sha1_round4(&a, &b, &c, &d, &e, X13);
372 sha1_round4(&a, &b, &c, &d, &e, X14);
373 sha1_round4(&a, &b, &c, &d, &e, X15);
374
375 ctx->h0 += a;
376 ctx->h1 += b;
377 ctx->h2 += c;
378 ctx->h3 += d;
379 ctx->h4 += e;
380 }
381}
382#endif
383
384#ifndef HAVE_SHA1_BLOCK_DATA_ORDER
385void
386sha1_block_data_order(SHA_CTX *ctx, const void *_in, size_t num)
387{
388 sha1_block_generic(ctx, _in, num);
389}
390#endif
391
392int
393SHA1_Init(SHA_CTX *c)
394{
395 memset(c, 0, sizeof(*c));
396
397 c->h0 = 0x67452301UL;
398 c->h1 = 0xefcdab89UL;
399 c->h2 = 0x98badcfeUL;
400 c->h3 = 0x10325476UL;
401 c->h4 = 0xc3d2e1f0UL;
402
403 return 1;
404}
405LCRYPTO_ALIAS(SHA1_Init);
406
407int
408SHA1_Update(SHA_CTX *c, const void *data_, size_t len)
409{
410 const unsigned char *data = data_;
411 unsigned char *p;
412 SHA_LONG l;
413 size_t n;
414
415 if (len == 0)
416 return 1;
417
418 l = (c->Nl + (((SHA_LONG)len) << 3))&0xffffffffUL;
419 /* 95-05-24 eay Fixed a bug with the overflow handling, thanks to
420 * Wei Dai <weidai@eskimo.com> for pointing it out. */
421 if (l < c->Nl) /* overflow */
422 c->Nh++;
423 c->Nh+=(SHA_LONG)(len>>29); /* might cause compiler warning on 16-bit */
424 c->Nl = l;
425
426 n = c->num;
427 if (n != 0) {
428 p = (unsigned char *)c->data;
429
430 if (len >= SHA_CBLOCK || len + n >= SHA_CBLOCK) {
431 memcpy(p + n, data, SHA_CBLOCK - n);
432 sha1_block_data_order(c, p, 1);
433 n = SHA_CBLOCK - n;
434 data += n;
435 len -= n;
436 c->num = 0;
437 memset(p,0,SHA_CBLOCK); /* keep it zeroed */
438 } else {
439 memcpy(p + n, data, len);
440 c->num += (unsigned int)len;
441 return 1;
442 }
443 }
444
445 n = len/SHA_CBLOCK;
446 if (n > 0) {
447 sha1_block_data_order(c, data, n);
448 n *= SHA_CBLOCK;
449 data += n;
450 len -= n;
451 }
452
453 if (len != 0) {
454 p = (unsigned char *)c->data;
455 c->num = (unsigned int)len;
456 memcpy(p, data, len);
457 }
458 return 1;
459}
460LCRYPTO_ALIAS(SHA1_Update);
461
462void
463SHA1_Transform(SHA_CTX *c, const unsigned char *data)
464{
465 sha1_block_data_order(c, data, 1);
466}
467LCRYPTO_ALIAS(SHA1_Transform);
468
469int
470SHA1_Final(unsigned char *md, SHA_CTX *c)
471{
472 unsigned char *p = (unsigned char *)c->data;
473 size_t n = c->num;
474
475 p[n] = 0x80; /* there is always room for one */
476 n++;
477
478 if (n > (SHA_CBLOCK - 8)) {
479 memset(p + n, 0, SHA_CBLOCK - n);
480 n = 0;
481 sha1_block_data_order(c, p, 1);
482 }
483
484 memset(p + n, 0, SHA_CBLOCK - 8 - n);
485 c->data[SHA_LBLOCK - 2] = htobe32(c->Nh);
486 c->data[SHA_LBLOCK - 1] = htobe32(c->Nl);
487
488 sha1_block_data_order(c, p, 1);
489 c->num = 0;
490 memset(p, 0, SHA_CBLOCK);
491
492 crypto_store_htobe32(&md[0 * 4], c->h0);
493 crypto_store_htobe32(&md[1 * 4], c->h1);
494 crypto_store_htobe32(&md[2 * 4], c->h2);
495 crypto_store_htobe32(&md[3 * 4], c->h3);
496 crypto_store_htobe32(&md[4 * 4], c->h4);
497
498 return 1;
499}
500LCRYPTO_ALIAS(SHA1_Final);
501
502unsigned char *
503SHA1(const unsigned char *d, size_t n, unsigned char *md)
504{
505 SHA_CTX c;
506
507 if (!SHA1_Init(&c))
508 return NULL;
509 SHA1_Update(&c, d, n);
510 SHA1_Final(md, &c);
511
512 explicit_bzero(&c, sizeof(c));
513
514 return (md);
515}
516LCRYPTO_ALIAS(SHA1);
517
518#endif