summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorotto <>2009-12-16 08:23:53 +0000
committerotto <>2009-12-16 08:23:53 +0000
commitadcff29d3eb8a9b8623e2fc0f4f8554a1a1c9763 (patch)
tree3bee0f84f22b0c662f7377607d46ccae3064a6f9 /src
parent0ac52e6e4d02749d85405ea08400d4b7e6db2ca1 (diff)
downloadopenbsd-adcff29d3eb8a9b8623e2fc0f4f8554a1a1c9763.tar.gz
openbsd-adcff29d3eb8a9b8623e2fc0f4f8554a1a1c9763.tar.bz2
openbsd-adcff29d3eb8a9b8623e2fc0f4f8554a1a1c9763.zip
save calls to arc4random() by using a nibble at a time; not because
arc4random() is slow, but it induces getpid() calls; also saves a bit on stirring efforts
Diffstat (limited to 'src')
-rw-r--r--src/lib/libc/stdlib/malloc.c51
1 files changed, 27 insertions, 24 deletions
diff --git a/src/lib/libc/stdlib/malloc.c b/src/lib/libc/stdlib/malloc.c
index b3fa447ceb..9bdea862f2 100644
--- a/src/lib/libc/stdlib/malloc.c
+++ b/src/lib/libc/stdlib/malloc.c
@@ -1,4 +1,4 @@
1/* $OpenBSD: malloc.c,v 1.122 2009/12/07 18:47:38 miod Exp $ */ 1/* $OpenBSD: malloc.c,v 1.123 2009/12/16 08:23:53 otto Exp $ */
2/* 2/*
3 * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net> 3 * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net>
4 * 4 *
@@ -66,7 +66,7 @@
66 66
67#define MALLOC_MAXCHUNK (1 << (MALLOC_PAGESHIFT-1)) 67#define MALLOC_MAXCHUNK (1 << (MALLOC_PAGESHIFT-1))
68#define MALLOC_MAXCACHE 256 68#define MALLOC_MAXCACHE 256
69#define MALLOC_DELAYED_CHUNKS 16 /* should be power of 2 */ 69#define MALLOC_DELAYED_CHUNKS 15 /* max of getrnibble() */
70/* 70/*
71 * When the P option is active, we move allocations between half a page 71 * When the P option is active, we move allocations between half a page
72 * and a whole page towards the end, subject to alignment constraints. 72 * and a whole page towards the end, subject to alignment constraints.
@@ -112,7 +112,7 @@ struct dir_info {
112 /* free pages cache */ 112 /* free pages cache */
113 struct region_info free_regions[MALLOC_MAXCACHE]; 113 struct region_info free_regions[MALLOC_MAXCACHE];
114 /* delayed free chunk slots */ 114 /* delayed free chunk slots */
115 void *delayed_chunks[MALLOC_DELAYED_CHUNKS]; 115 void *delayed_chunks[MALLOC_DELAYED_CHUNKS + 1];
116#ifdef MALLOC_STATS 116#ifdef MALLOC_STATS
117 size_t inserts; 117 size_t inserts;
118 size_t insert_collisions; 118 size_t insert_collisions;
@@ -185,9 +185,9 @@ static int malloc_active; /* status of malloc */
185static size_t malloc_guarded; /* bytes used for guards */ 185static size_t malloc_guarded; /* bytes used for guards */
186static size_t malloc_used; /* bytes allocated */ 186static size_t malloc_used; /* bytes allocated */
187 187
188static size_t rbytesused; /* random bytes used */ 188static size_t rnibblesused; /* random nibbles used */
189static u_char rbytes[512]; /* random bytes */ 189static u_char rbytes[512]; /* random bytes */
190static u_char getrbyte(void); 190static u_char getrnibble(void);
191 191
192extern char *__progname; 192extern char *__progname;
193 193
@@ -380,6 +380,24 @@ wrterror(char *p)
380 abort(); 380 abort();
381} 381}
382 382
383static void
384rbytes_init(void)
385{
386 arc4random_buf(rbytes, sizeof(rbytes));
387 rnibblesused = 0;
388}
389
390static inline u_char
391getrnibble(void)
392{
393 u_char x;
394
395 if (rnibblesused >= 2 * sizeof(rbytes))
396 rbytes_init();
397 x = rbytes[rnibblesused++ / 2];
398 return (rnibblesused & 1 ? x & 0xf : x >> 4);
399}
400
383/* 401/*
384 * Cache maintenance. We keep at most malloc_cache pages cached. 402 * Cache maintenance. We keep at most malloc_cache pages cached.
385 * If the cache is becoming full, unmap pages in the cache for real, 403 * If the cache is becoming full, unmap pages in the cache for real,
@@ -410,7 +428,7 @@ unmap(struct dir_info *d, void *p, size_t sz)
410 rsz = mopts.malloc_cache - d->free_regions_size; 428 rsz = mopts.malloc_cache - d->free_regions_size;
411 if (psz > rsz) 429 if (psz > rsz)
412 tounmap = psz - rsz; 430 tounmap = psz - rsz;
413 offset = getrbyte(); 431 offset = getrnibble();
414 for (i = 0; tounmap > 0 && i < mopts.malloc_cache; i++) { 432 for (i = 0; tounmap > 0 && i < mopts.malloc_cache; i++) {
415 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; 433 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
416 if (r->p != NULL) { 434 if (r->p != NULL) {
@@ -491,7 +509,7 @@ map(struct dir_info *d, size_t sz, int zero_fill)
491 /* zero fill not needed */ 509 /* zero fill not needed */
492 return p; 510 return p;
493 } 511 }
494 offset = getrbyte(); 512 offset = getrnibble();
495 for (i = 0; i < mopts.malloc_cache; i++) { 513 for (i = 0; i < mopts.malloc_cache; i++) {
496 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; 514 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
497 if (r->p != NULL) { 515 if (r->p != NULL) {
@@ -538,21 +556,6 @@ map(struct dir_info *d, size_t sz, int zero_fill)
538 return p; 556 return p;
539} 557}
540 558
541static void
542rbytes_init(void)
543{
544 arc4random_buf(rbytes, sizeof(rbytes));
545 rbytesused = 0;
546}
547
548static u_char
549getrbyte(void)
550{
551 if (rbytesused >= sizeof(rbytes))
552 rbytes_init();
553 return rbytes[rbytesused++];
554}
555
556/* 559/*
557 * Initialize a dir_info, which should have been cleared by caller 560 * Initialize a dir_info, which should have been cleared by caller
558 */ 561 */
@@ -1012,7 +1015,7 @@ malloc_bytes(struct dir_info *d, size_t size)
1012 } 1015 }
1013 1016
1014 /* advance a random # of positions */ 1017 /* advance a random # of positions */
1015 i = (getrbyte() & (MALLOC_DELAYED_CHUNKS - 1)) % bp->free; 1018 i = getrnibble() % bp->free;
1016 while (i > 0) { 1019 while (i > 0) {
1017 u += u; 1020 u += u;
1018 k++; 1021 k++;
@@ -1275,7 +1278,7 @@ ofree(void *p)
1275 if (mopts.malloc_junk && sz > 0) 1278 if (mopts.malloc_junk && sz > 0)
1276 memset(p, SOME_FREEJUNK, sz); 1279 memset(p, SOME_FREEJUNK, sz);
1277 if (!mopts.malloc_freeprot) { 1280 if (!mopts.malloc_freeprot) {
1278 i = getrbyte() & (MALLOC_DELAYED_CHUNKS - 1); 1281 i = getrnibble();
1279 tmp = p; 1282 tmp = p;
1280 p = g_pool->delayed_chunks[i]; 1283 p = g_pool->delayed_chunks[i];
1281 g_pool->delayed_chunks[i] = tmp; 1284 g_pool->delayed_chunks[i] = tmp;