summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authormiod <>2024-03-30 07:50:39 +0000
committermiod <>2024-03-30 07:50:39 +0000
commit483e9a55e66d452ef6ee33193cb8233c6f9325b0 (patch)
tree22787ebcbc3d15feac1ece2a9d1b1d32cfd39569
parent2214bce1841d01c1c38687ff7bed8aa3bc1ad5d7 (diff)
downloadopenbsd-483e9a55e66d452ef6ee33193cb8233c6f9325b0.tar.gz
openbsd-483e9a55e66d452ef6ee33193cb8233c6f9325b0.tar.bz2
openbsd-483e9a55e66d452ef6ee33193cb8233c6f9325b0.zip
In _malloc_init(), round up the region being mprotected RW to the malloc
page size, rather than relying upon mprotect to round up to the actual mmu page size. This repairs malloc operation on systems where the malloc page size (1 << _MAX_PAGE_SHIFT) is larger than the mmu page size. ok otto@
-rw-r--r--src/lib/libc/stdlib/malloc.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/src/lib/libc/stdlib/malloc.c b/src/lib/libc/stdlib/malloc.c
index b5eb212227..17ba5fb127 100644
--- a/src/lib/libc/stdlib/malloc.c
+++ b/src/lib/libc/stdlib/malloc.c
@@ -1,4 +1,4 @@
1/* $OpenBSD: malloc.c,v 1.295 2023/12/19 06:59:28 otto Exp $ */ 1/* $OpenBSD: malloc.c,v 1.296 2024/03/30 07:50:39 miod Exp $ */
2/* 2/*
3 * Copyright (c) 2008, 2010, 2011, 2016, 2023 Otto Moerbeek <otto@drijf.net> 3 * Copyright (c) 2008, 2010, 2011, 2016, 2023 Otto Moerbeek <otto@drijf.net>
4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> 4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
@@ -1428,7 +1428,7 @@ _malloc_init(int from_rthreads)
1428 } 1428 }
1429 if (!mopts.malloc_canary) { 1429 if (!mopts.malloc_canary) {
1430 char *p; 1430 char *p;
1431 size_t sz, d_avail; 1431 size_t sz, roundup_sz, d_avail;
1432 1432
1433 omalloc_init(); 1433 omalloc_init();
1434 /* 1434 /*
@@ -1436,20 +1436,20 @@ _malloc_init(int from_rthreads)
1436 * randomise offset inside the page at which the dir_infos 1436 * randomise offset inside the page at which the dir_infos
1437 * lay (subject to alignment by 1 << MALLOC_MINSHIFT) 1437 * lay (subject to alignment by 1 << MALLOC_MINSHIFT)
1438 */ 1438 */
1439 sz = mopts.malloc_mutexes * sizeof(*d) + 2 * MALLOC_PAGESIZE; 1439 sz = mopts.malloc_mutexes * sizeof(*d);
1440 if ((p = MMAPNONE(sz, 0)) == MAP_FAILED) 1440 roundup_sz = (sz + MALLOC_PAGEMASK) & ~MALLOC_PAGEMASK;
1441 if ((p = MMAPNONE(roundup_sz + 2 * MALLOC_PAGESIZE, 0)) ==
1442 MAP_FAILED)
1441 wrterror(NULL, "malloc_init mmap1 failed"); 1443 wrterror(NULL, "malloc_init mmap1 failed");
1442 if (mprotect(p + MALLOC_PAGESIZE, mopts.malloc_mutexes * 1444 if (mprotect(p + MALLOC_PAGESIZE, roundup_sz,
1443 sizeof(*d), PROT_READ | PROT_WRITE)) 1445 PROT_READ | PROT_WRITE))
1444 wrterror(NULL, "malloc_init mprotect1 failed"); 1446 wrterror(NULL, "malloc_init mprotect1 failed");
1445 if (mimmutable(p, sz)) 1447 if (mimmutable(p, roundup_sz + 2 * MALLOC_PAGESIZE))
1446 wrterror(NULL, "malloc_init mimmutable1 failed"); 1448 wrterror(NULL, "malloc_init mimmutable1 failed");
1447 d_avail = (((mopts.malloc_mutexes * sizeof(*d) + 1449 d_avail = (roundup_sz - sz) >> MALLOC_MINSHIFT;
1448 MALLOC_PAGEMASK) & ~MALLOC_PAGEMASK) -
1449 (mopts.malloc_mutexes * sizeof(*d))) >> MALLOC_MINSHIFT;
1450 d = (struct dir_info *)(p + MALLOC_PAGESIZE + 1450 d = (struct dir_info *)(p + MALLOC_PAGESIZE +
1451 (arc4random_uniform(d_avail) << MALLOC_MINSHIFT)); 1451 (arc4random_uniform(d_avail) << MALLOC_MINSHIFT));
1452 STATS_ADD(d[1].malloc_used, sz); 1452 STATS_ADD(d[1].malloc_used, roundup_sz + 2 * MALLOC_PAGESIZE);
1453 for (i = 0; i < mopts.malloc_mutexes; i++) 1453 for (i = 0; i < mopts.malloc_mutexes; i++)
1454 mopts.malloc_pool[i] = &d[i]; 1454 mopts.malloc_pool[i] = &d[i];
1455 mopts.internal_funcs = 1; 1455 mopts.internal_funcs = 1;