summaryrefslogtreecommitdiff
path: root/src/lib/libcrypto/rand/md_rand.c
diff options
context:
space:
mode:
authorbeck <>2001-08-01 19:51:17 +0000
committerbeck <>2001-08-01 19:51:17 +0000
commit4b790f68539c49ef91f5e82506c2624900c92106 (patch)
treed14ff9f79630cb04ad006cd2730646f44f62d786 /src/lib/libcrypto/rand/md_rand.c
parentedbfd6c7e91e15e92ef0df548474ac76b6dddca0 (diff)
downloadopenbsd-4b790f68539c49ef91f5e82506c2624900c92106.tar.gz
openbsd-4b790f68539c49ef91f5e82506c2624900c92106.tar.bz2
openbsd-4b790f68539c49ef91f5e82506c2624900c92106.zip
merge openssl 0.9.6b-engine
Note that this is a maintenence release, API's appear *not* to have changed. As such, I have only increased the minor number on these libraries
Diffstat (limited to 'src/lib/libcrypto/rand/md_rand.c')
-rw-r--r--src/lib/libcrypto/rand/md_rand.c99
1 files changed, 66 insertions, 33 deletions
diff --git a/src/lib/libcrypto/rand/md_rand.c b/src/lib/libcrypto/rand/md_rand.c
index ae57570608..04b9d695b0 100644
--- a/src/lib/libcrypto/rand/md_rand.c
+++ b/src/lib/libcrypto/rand/md_rand.c
@@ -141,10 +141,11 @@ static long md_count[2]={0,0};
141static double entropy=0; 141static double entropy=0;
142static int initialized=0; 142static int initialized=0;
143 143
144/* This should be set to 1 only when ssleay_rand_add() is called inside 144static unsigned int crypto_lock_rand = 0; /* may be set only when a thread
145 an already locked state, so it doesn't try to lock and thereby cause 145 * holds CRYPTO_LOCK_RAND
146 a hang. And it should always be reset back to 0 before unlocking. */ 146 * (to prevent double locking) */
147static int add_do_not_lock=0; 147static unsigned long locking_thread = 0; /* valid iff crypto_lock_rand is set */
148
148 149
149#ifdef PREDICT 150#ifdef PREDICT
150int rand_predictable=0; 151int rand_predictable=0;
@@ -191,6 +192,7 @@ static void ssleay_rand_add(const void *buf, int num, double add)
191 long md_c[2]; 192 long md_c[2];
192 unsigned char local_md[MD_DIGEST_LENGTH]; 193 unsigned char local_md[MD_DIGEST_LENGTH];
193 MD_CTX m; 194 MD_CTX m;
195 int do_not_lock;
194 196
195 /* 197 /*
196 * (Based on the rand(3) manpage) 198 * (Based on the rand(3) manpage)
@@ -207,7 +209,10 @@ static void ssleay_rand_add(const void *buf, int num, double add)
207 * hash function. 209 * hash function.
208 */ 210 */
209 211
210 if (!add_do_not_lock) CRYPTO_w_lock(CRYPTO_LOCK_RAND); 212 /* check if we already have the lock */
213 do_not_lock = crypto_lock_rand && (locking_thread == CRYPTO_thread_id());
214
215 if (!do_not_lock) CRYPTO_w_lock(CRYPTO_LOCK_RAND);
211 st_idx=state_index; 216 st_idx=state_index;
212 217
213 /* use our own copies of the counters so that even 218 /* use our own copies of the counters so that even
@@ -239,7 +244,7 @@ static void ssleay_rand_add(const void *buf, int num, double add)
239 244
240 md_count[1] += (num / MD_DIGEST_LENGTH) + (num % MD_DIGEST_LENGTH > 0); 245 md_count[1] += (num / MD_DIGEST_LENGTH) + (num % MD_DIGEST_LENGTH > 0);
241 246
242 if (!add_do_not_lock) CRYPTO_w_unlock(CRYPTO_LOCK_RAND); 247 if (!do_not_lock) CRYPTO_w_unlock(CRYPTO_LOCK_RAND);
243 248
244 for (i=0; i<num; i+=MD_DIGEST_LENGTH) 249 for (i=0; i<num; i+=MD_DIGEST_LENGTH)
245 { 250 {
@@ -281,7 +286,7 @@ static void ssleay_rand_add(const void *buf, int num, double add)
281 } 286 }
282 memset((char *)&m,0,sizeof(m)); 287 memset((char *)&m,0,sizeof(m));
283 288
284 if (!add_do_not_lock) CRYPTO_w_lock(CRYPTO_LOCK_RAND); 289 if (!do_not_lock) CRYPTO_w_lock(CRYPTO_LOCK_RAND);
285 /* Don't just copy back local_md into md -- this could mean that 290 /* Don't just copy back local_md into md -- this could mean that
286 * other thread's seeding remains without effect (except for 291 * other thread's seeding remains without effect (except for
287 * the incremented counter). By XORing it we keep at least as 292 * the incremented counter). By XORing it we keep at least as
@@ -292,7 +297,7 @@ static void ssleay_rand_add(const void *buf, int num, double add)
292 } 297 }
293 if (entropy < ENTROPY_NEEDED) /* stop counting when we have enough */ 298 if (entropy < ENTROPY_NEEDED) /* stop counting when we have enough */
294 entropy += add; 299 entropy += add;
295 if (!add_do_not_lock) CRYPTO_w_unlock(CRYPTO_LOCK_RAND); 300 if (!do_not_lock) CRYPTO_w_unlock(CRYPTO_LOCK_RAND);
296 301
297#if !defined(THREADS) && !defined(WIN32) 302#if !defined(THREADS) && !defined(WIN32)
298 assert(md_c[1] == md_count[1]); 303 assert(md_c[1] == md_count[1]);
@@ -340,28 +345,31 @@ static int ssleay_rand_bytes(unsigned char *buf, int num)
340 * 345 *
341 * For each group of 10 bytes (or less), we do the following: 346 * For each group of 10 bytes (or less), we do the following:
342 * 347 *
343 * Input into the hash function the top 10 bytes from the 348 * Input into the hash function the local 'md' (which is initialized from
344 * local 'md' (which is initialized from the global 'md' 349 * the global 'md' before any bytes are generated), the bytes that are to
345 * before any bytes are generated), the bytes that are 350 * be overwritten by the random bytes, and bytes from the 'state'
346 * to be overwritten by the random bytes, and bytes from the 351 * (incrementing looping index). From this digest output (which is kept
347 * 'state' (incrementing looping index). From this digest output 352 * in 'md'), the top (up to) 10 bytes are returned to the caller and the
348 * (which is kept in 'md'), the top (up to) 10 bytes are 353 * bottom 10 bytes are xored into the 'state'.
349 * returned to the caller and the bottom (up to) 10 bytes are xored 354 *
350 * into the 'state'.
351 * Finally, after we have finished 'num' random bytes for the 355 * Finally, after we have finished 'num' random bytes for the
352 * caller, 'count' (which is incremented) and the local and global 'md' 356 * caller, 'count' (which is incremented) and the local and global 'md'
353 * are fed into the hash function and the results are kept in the 357 * are fed into the hash function and the results are kept in the
354 * global 'md'. 358 * global 'md'.
355 */ 359 */
356 360
357 if (!initialized)
358 RAND_poll();
359
360 CRYPTO_w_lock(CRYPTO_LOCK_RAND); 361 CRYPTO_w_lock(CRYPTO_LOCK_RAND);
361 add_do_not_lock = 1; /* Since we call ssleay_rand_add while in
362 this locked state. */
363 362
364 initialized = 1; 363 /* prevent ssleay_rand_bytes() from trying to obtain the lock again */
364 crypto_lock_rand = 1;
365 locking_thread = CRYPTO_thread_id();
366
367 if (!initialized)
368 {
369 RAND_poll();
370 initialized = 1;
371 }
372
365 if (!stirred_pool) 373 if (!stirred_pool)
366 do_stir_pool = 1; 374 do_stir_pool = 1;
367 375
@@ -387,11 +395,11 @@ static int ssleay_rand_bytes(unsigned char *buf, int num)
387 395
388 if (do_stir_pool) 396 if (do_stir_pool)
389 { 397 {
390 /* Our output function chains only half of 'md', so we better 398 /* In the output function only half of 'md' remains secret,
391 * make sure that the required entropy gets 'evenly distributed' 399 * so we better make sure that the required entropy gets
392 * through 'state', our randomness pool. The input function 400 * 'evenly distributed' through 'state', our randomness pool.
393 * (ssleay_rand_add) chains all of 'md', which makes it more 401 * The input function (ssleay_rand_add) chains all of 'md',
394 * suitable for this purpose. 402 * which makes it more suitable for this purpose.
395 */ 403 */
396 404
397 int n = STATE_SIZE; /* so that the complete pool gets accessed */ 405 int n = STATE_SIZE; /* so that the complete pool gets accessed */
@@ -425,8 +433,9 @@ static int ssleay_rand_bytes(unsigned char *buf, int num)
425 433
426 md_count[0] += 1; 434 md_count[0] += 1;
427 435
428 add_do_not_lock = 0; /* If this would ever be forgotten, we can 436 /* before unlocking, we must clear 'crypto_lock_rand' */
429 expect any evil god to eat our souls. */ 437 crypto_lock_rand = 0;
438 locking_thread = 0;
430 CRYPTO_w_unlock(CRYPTO_LOCK_RAND); 439 CRYPTO_w_unlock(CRYPTO_LOCK_RAND);
431 440
432 while (num > 0) 441 while (num > 0)
@@ -492,11 +501,12 @@ static int ssleay_rand_bytes(unsigned char *buf, int num)
492static int ssleay_rand_pseudo_bytes(unsigned char *buf, int num) 501static int ssleay_rand_pseudo_bytes(unsigned char *buf, int num)
493 { 502 {
494 int ret; 503 int ret;
504 unsigned long err;
495 505
496 ret = RAND_bytes(buf, num); 506 ret = RAND_bytes(buf, num);
497 if (ret == 0) 507 if (ret == 0)
498 { 508 {
499 long err = ERR_peek_error(); 509 err = ERR_peek_error();
500 if (ERR_GET_LIB(err) == ERR_LIB_RAND && 510 if (ERR_GET_LIB(err) == ERR_LIB_RAND &&
501 ERR_GET_REASON(err) == RAND_R_PRNG_NOT_SEEDED) 511 ERR_GET_REASON(err) == RAND_R_PRNG_NOT_SEEDED)
502 (void)ERR_get_error(); 512 (void)ERR_get_error();
@@ -507,14 +517,37 @@ static int ssleay_rand_pseudo_bytes(unsigned char *buf, int num)
507static int ssleay_rand_status(void) 517static int ssleay_rand_status(void)
508 { 518 {
509 int ret; 519 int ret;
520 int do_not_lock;
510 521
522 /* check if we already have the lock
523 * (could happen if a RAND_poll() implementation calls RAND_status()) */
524 do_not_lock = crypto_lock_rand && (locking_thread == CRYPTO_thread_id());
525
526 if (!do_not_lock)
527 {
528 CRYPTO_w_lock(CRYPTO_LOCK_RAND);
529
530 /* prevent ssleay_rand_bytes() from trying to obtain the lock again */
531 crypto_lock_rand = 1;
532 locking_thread = CRYPTO_thread_id();
533 }
534
511 if (!initialized) 535 if (!initialized)
536 {
512 RAND_poll(); 537 RAND_poll();
538 initialized = 1;
539 }
513 540
514 CRYPTO_w_lock(CRYPTO_LOCK_RAND);
515 initialized = 1;
516 ret = entropy >= ENTROPY_NEEDED; 541 ret = entropy >= ENTROPY_NEEDED;
517 CRYPTO_w_unlock(CRYPTO_LOCK_RAND);
518 542
543 if (!do_not_lock)
544 {
545 /* before unlocking, we must clear 'crypto_lock_rand' */
546 crypto_lock_rand = 0;
547 locking_thread = 0;
548
549 CRYPTO_w_unlock(CRYPTO_LOCK_RAND);
550 }
551
519 return ret; 552 return ret;
520 } 553 }