summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorotto <>2008-10-03 18:44:29 +0000
committerotto <>2008-10-03 18:44:29 +0000
commit5fe1692dea0332215b3fa1426d3b5365fe537f83 (patch)
treeaff25d998463a83917b55d709e069e6efa16592a
parente9d8e1f43896f8752171ff643f6493d7d037d8ea (diff)
downloadopenbsd-5fe1692dea0332215b3fa1426d3b5365fe537f83.tar.gz
openbsd-5fe1692dea0332215b3fa1426d3b5365fe537f83.tar.bz2
openbsd-5fe1692dea0332215b3fa1426d3b5365fe537f83.zip
when increasing the size of a larger than a page allocation try
mapping the region next to the existing one first; there's a pretty high chance there's a hole there we can use; ok deraadt@ tedu@
-rw-r--r--src/lib/libc/stdlib/malloc.c44
1 files changed, 41 insertions, 3 deletions
diff --git a/src/lib/libc/stdlib/malloc.c b/src/lib/libc/stdlib/malloc.c
index 1bd5c9f89c..a205253fd4 100644
--- a/src/lib/libc/stdlib/malloc.c
+++ b/src/lib/libc/stdlib/malloc.c
@@ -1,4 +1,4 @@
1/* $OpenBSD: malloc.c,v 1.99 2008/10/03 18:42:45 otto Exp $ */ 1/* $OpenBSD: malloc.c,v 1.100 2008/10/03 18:44:29 otto Exp $ */
2/* 2/*
3 * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net> 3 * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net>
4 * 4 *
@@ -78,6 +78,9 @@
78#define MMAP(sz) mmap(NULL, (size_t)(sz), PROT_READ | PROT_WRITE, \ 78#define MMAP(sz) mmap(NULL, (size_t)(sz), PROT_READ | PROT_WRITE, \
79 MAP_ANON | MAP_PRIVATE, -1, (off_t) 0) 79 MAP_ANON | MAP_PRIVATE, -1, (off_t) 0)
80 80
81#define MMAPA(a,sz) mmap((a), (size_t)(sz), PROT_READ | PROT_WRITE, \
82 MAP_ANON | MAP_PRIVATE, -1, (off_t) 0)
83
81struct region_info { 84struct region_info {
82 void *p; /* page; low bits used to mark chunks */ 85 void *p; /* page; low bits used to mark chunks */
83 uintptr_t size; /* size for pages, or chunk_info pointer */ 86 uintptr_t size; /* size for pages, or chunk_info pointer */
@@ -440,6 +443,27 @@ unmap(struct dir_info *d, void *p, size_t sz)
440 wrtwarning("malloc cache overflow"); 443 wrtwarning("malloc cache overflow");
441} 444}
442 445
446static void
447zapcacheregion(struct dir_info *d, void *p)
448{
449 u_int i;
450 struct region_info *r;
451 size_t rsz;
452
453 for (i = 0; i < malloc_cache; i++) {
454 r = &d->free_regions[i];
455 if (r->p == p) {
456 rsz = r->size << MALLOC_PAGESHIFT;
457 if (munmap(r->p, rsz))
458 wrterror("munmap");
459 r->p = NULL;
460 d->free_regions_size -= r->size;
461 r->size = 0;
462 malloc_used -= rsz;
463 }
464 }
465}
466
443static void * 467static void *
444map(struct dir_info *d, size_t sz, int zero_fill) 468map(struct dir_info *d, size_t sz, int zero_fill)
445{ 469{
@@ -1277,7 +1301,21 @@ orealloc(void *p, size_t newsz)
1277 size_t roldsz = PAGEROUND(goldsz); 1301 size_t roldsz = PAGEROUND(goldsz);
1278 size_t rnewsz = PAGEROUND(gnewsz); 1302 size_t rnewsz = PAGEROUND(gnewsz);
1279 1303
1280 if (rnewsz < roldsz) { 1304 if (rnewsz > roldsz) {
1305 if (!malloc_guard) {
1306 zapcacheregion(&g_pool, p + roldsz);
1307 q = MMAPA(p + roldsz, rnewsz - roldsz);
1308 if (q == p + roldsz) {
1309 malloc_used += rnewsz - roldsz;
1310 if (malloc_junk)
1311 memset(q, SOME_JUNK,
1312 rnewsz - roldsz);
1313 r->size = newsz;
1314 return p;
1315 } else if (q != MAP_FAILED)
1316 munmap(q, rnewsz - roldsz);
1317 }
1318 } else if (rnewsz < roldsz) {
1281 if (malloc_guard) { 1319 if (malloc_guard) {
1282 if (mprotect((char *)p + roldsz - malloc_guard, 1320 if (mprotect((char *)p + roldsz - malloc_guard,
1283 malloc_guard, PROT_READ | PROT_WRITE)) 1321 malloc_guard, PROT_READ | PROT_WRITE))
@@ -1289,7 +1327,7 @@ orealloc(void *p, size_t newsz)
1289 unmap(&g_pool, (char *)p + rnewsz, roldsz - rnewsz); 1327 unmap(&g_pool, (char *)p + rnewsz, roldsz - rnewsz);
1290 r->size = gnewsz; 1328 r->size = gnewsz;
1291 return p; 1329 return p;
1292 } else if (rnewsz == roldsz) { 1330 } else {
1293 if (newsz > oldsz && malloc_junk) 1331 if (newsz > oldsz && malloc_junk)
1294 memset((char *)p + newsz, SOME_JUNK, 1332 memset((char *)p + newsz, SOME_JUNK,
1295 rnewsz - malloc_guard - newsz); 1333 rnewsz - malloc_guard - newsz);