diff options
| author | otto <> | 2022-12-27 17:31:09 +0000 | 
|---|---|---|
| committer | otto <> | 2022-12-27 17:31:09 +0000 | 
| commit | 729a541ba9157f5173003981ee57625e240b5a6f (patch) | |
| tree | b1094551be34331b7e0022577741d36d2c9e6b5f /src/lib/libc/stdlib/malloc.c | |
| parent | 782cd9477d454b4316ad7055e02ac2c645235d94 (diff) | |
| download | openbsd-729a541ba9157f5173003981ee57625e240b5a6f.tar.gz openbsd-729a541ba9157f5173003981ee57625e240b5a6f.tar.bz2 openbsd-729a541ba9157f5173003981ee57625e240b5a6f.zip | |
Change the way malloc_init() works so that the main data structures
can be made immutable to provide extra protection.  Also init pools
on-demand: only pools that are actually used are initialized.
Tested by many
Diffstat (limited to 'src/lib/libc/stdlib/malloc.c')
| -rw-r--r-- | src/lib/libc/stdlib/malloc.c | 131 | 
1 files changed, 66 insertions, 65 deletions
| diff --git a/src/lib/libc/stdlib/malloc.c b/src/lib/libc/stdlib/malloc.c index a0ee04f821..99249b24cb 100644 --- a/src/lib/libc/stdlib/malloc.c +++ b/src/lib/libc/stdlib/malloc.c | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | /* $OpenBSD: malloc.c,v 1.275 2022/10/14 04:38:39 deraadt Exp $ */ | 1 | /* $OpenBSD: malloc.c,v 1.276 2022/12/27 17:31:09 otto Exp $ */ | 
| 2 | /* | 2 | /* | 
| 3 | * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net> | 3 | * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net> | 
| 4 | * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> | 4 | * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> | 
| @@ -142,6 +142,7 @@ struct dir_info { | |||
| 142 | int malloc_junk; /* junk fill? */ | 142 | int malloc_junk; /* junk fill? */ | 
| 143 | int mmap_flag; /* extra flag for mmap */ | 143 | int mmap_flag; /* extra flag for mmap */ | 
| 144 | int mutex; | 144 | int mutex; | 
| 145 | int malloc_mt; /* multi-threaded mode? */ | ||
| 145 | /* lists of free chunk info structs */ | 146 | /* lists of free chunk info structs */ | 
| 146 | struct chunk_head chunk_info_list[MALLOC_MAXSHIFT + 1]; | 147 | struct chunk_head chunk_info_list[MALLOC_MAXSHIFT + 1]; | 
| 147 | /* lists of chunks with free slots */ | 148 | /* lists of chunks with free slots */ | 
| @@ -181,8 +182,6 @@ struct dir_info { | |||
| 181 | #endif /* MALLOC_STATS */ | 182 | #endif /* MALLOC_STATS */ | 
| 182 | u_int32_t canary2; | 183 | u_int32_t canary2; | 
| 183 | }; | 184 | }; | 
| 184 | #define DIR_INFO_RSZ ((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \ | ||
| 185 | ~MALLOC_PAGEMASK) | ||
| 186 | 185 | ||
| 187 | static void unmap(struct dir_info *d, void *p, size_t sz, size_t clear); | 186 | static void unmap(struct dir_info *d, void *p, size_t sz, size_t clear); | 
| 188 | 187 | ||
| @@ -208,7 +207,6 @@ struct malloc_readonly { | |||
| 208 | /* Main bookkeeping information */ | 207 | /* Main bookkeeping information */ | 
| 209 | struct dir_info *malloc_pool[_MALLOC_MUTEXES]; | 208 | struct dir_info *malloc_pool[_MALLOC_MUTEXES]; | 
| 210 | u_int malloc_mutexes; /* how much in actual use? */ | 209 | u_int malloc_mutexes; /* how much in actual use? */ | 
| 211 | int malloc_mt; /* multi-threaded mode? */ | ||
| 212 | int malloc_freecheck; /* Extensive double free check */ | 210 | int malloc_freecheck; /* Extensive double free check */ | 
| 213 | int malloc_freeunmap; /* mprotect free pages PROT_NONE? */ | 211 | int malloc_freeunmap; /* mprotect free pages PROT_NONE? */ | 
| 214 | int def_malloc_junk; /* junk fill? */ | 212 | int def_malloc_junk; /* junk fill? */ | 
| @@ -258,7 +256,7 @@ static void malloc_exit(void); | |||
| 258 | static inline void | 256 | static inline void | 
| 259 | _MALLOC_LEAVE(struct dir_info *d) | 257 | _MALLOC_LEAVE(struct dir_info *d) | 
| 260 | { | 258 | { | 
| 261 | if (mopts.malloc_mt) { | 259 | if (d->malloc_mt) { | 
| 262 | d->active--; | 260 | d->active--; | 
| 263 | _MALLOC_UNLOCK(d->mutex); | 261 | _MALLOC_UNLOCK(d->mutex); | 
| 264 | } | 262 | } | 
| @@ -267,7 +265,7 @@ _MALLOC_LEAVE(struct dir_info *d) | |||
| 267 | static inline void | 265 | static inline void | 
| 268 | _MALLOC_ENTER(struct dir_info *d) | 266 | _MALLOC_ENTER(struct dir_info *d) | 
| 269 | { | 267 | { | 
| 270 | if (mopts.malloc_mt) { | 268 | if (d->malloc_mt) { | 
| 271 | _MALLOC_LOCK(d->mutex); | 269 | _MALLOC_LOCK(d->mutex); | 
| 272 | d->active++; | 270 | d->active++; | 
| 273 | } | 271 | } | 
| @@ -292,7 +290,7 @@ hash(void *p) | |||
| 292 | static inline struct dir_info * | 290 | static inline struct dir_info * | 
| 293 | getpool(void) | 291 | getpool(void) | 
| 294 | { | 292 | { | 
| 295 | if (!mopts.malloc_mt) | 293 | if (mopts.malloc_pool[1] == NULL || !mopts.malloc_pool[1]->malloc_mt) | 
| 296 | return mopts.malloc_pool[1]; | 294 | return mopts.malloc_pool[1]; | 
| 297 | else /* first one reserved for special pool */ | 295 | else /* first one reserved for special pool */ | 
| 298 | return mopts.malloc_pool[1 + TIB_GET()->tib_tid % | 296 | return mopts.malloc_pool[1 + TIB_GET()->tib_tid % | 
| @@ -497,46 +495,22 @@ omalloc_init(void) | |||
| 497 | } | 495 | } | 
| 498 | 496 | ||
| 499 | static void | 497 | static void | 
| 500 | omalloc_poolinit(struct dir_info **dp, int mmap_flag) | 498 | omalloc_poolinit(struct dir_info *d, int mmap_flag) | 
| 501 | { | 499 | { | 
| 502 | char *p; | ||
| 503 | size_t d_avail, regioninfo_size; | ||
| 504 | struct dir_info *d; | ||
| 505 | int i, j; | 500 | int i, j; | 
| 506 | 501 | ||
| 507 | /* | 502 | d->r = NULL; | 
| 508 | * Allocate dir_info with a guard page on either side. Also | 503 | d->rbytesused = sizeof(d->rbytes); | 
| 509 | * randomise offset inside the page at which the dir_info | 504 | d->regions_free = d->regions_total = 0; | 
| 510 | * lies (subject to alignment by 1 << MALLOC_MINSHIFT) | ||
| 511 | */ | ||
| 512 | if ((p = MMAPNONE(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2), mmap_flag)) == | ||
| 513 | MAP_FAILED) | ||
| 514 | wrterror(NULL, "malloc init mmap failed"); | ||
| 515 | mprotect(p + MALLOC_PAGESIZE, DIR_INFO_RSZ, PROT_READ | PROT_WRITE); | ||
| 516 | d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT; | ||
| 517 | d = (struct dir_info *)(p + MALLOC_PAGESIZE + | ||
| 518 | (arc4random_uniform(d_avail) << MALLOC_MINSHIFT)); | ||
| 519 | |||
| 520 | rbytes_init(d); | ||
| 521 | d->regions_free = d->regions_total = MALLOC_INITIAL_REGIONS; | ||
| 522 | regioninfo_size = d->regions_total * sizeof(struct region_info); | ||
| 523 | d->r = MMAP(regioninfo_size, mmap_flag); | ||
| 524 | if (d->r == MAP_FAILED) { | ||
| 525 | d->regions_total = 0; | ||
| 526 | wrterror(NULL, "malloc init mmap failed"); | ||
| 527 | } | ||
| 528 | for (i = 0; i <= MALLOC_MAXSHIFT; i++) { | 505 | for (i = 0; i <= MALLOC_MAXSHIFT; i++) { | 
| 529 | LIST_INIT(&d->chunk_info_list[i]); | 506 | LIST_INIT(&d->chunk_info_list[i]); | 
| 530 | for (j = 0; j < MALLOC_CHUNK_LISTS; j++) | 507 | for (j = 0; j < MALLOC_CHUNK_LISTS; j++) | 
| 531 | LIST_INIT(&d->chunk_dir[i][j]); | 508 | LIST_INIT(&d->chunk_dir[i][j]); | 
| 532 | } | 509 | } | 
| 533 | STATS_ADD(d->malloc_used, regioninfo_size + 3 * MALLOC_PAGESIZE); | ||
| 534 | d->mmap_flag = mmap_flag; | 510 | d->mmap_flag = mmap_flag; | 
| 535 | d->malloc_junk = mopts.def_malloc_junk; | 511 | d->malloc_junk = mopts.def_malloc_junk; | 
| 536 | d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d; | 512 | d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d; | 
| 537 | d->canary2 = ~d->canary1; | 513 | d->canary2 = ~d->canary1; | 
| 538 | |||
| 539 | *dp = d; | ||
| 540 | } | 514 | } | 
| 541 | 515 | ||
| 542 | static int | 516 | static int | 
| @@ -551,7 +525,8 @@ omalloc_grow(struct dir_info *d) | |||
| 551 | if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2) | 525 | if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2) | 
| 552 | return 1; | 526 | return 1; | 
| 553 | 527 | ||
| 554 | newtotal = d->regions_total * 2; | 528 | newtotal = d->regions_total == 0 ? MALLOC_INITIAL_REGIONS : | 
| 529 | d->regions_total * 2; | ||
| 555 | newsize = PAGEROUND(newtotal * sizeof(struct region_info)); | 530 | newsize = PAGEROUND(newtotal * sizeof(struct region_info)); | 
| 556 | mask = newtotal - 1; | 531 | mask = newtotal - 1; | 
| 557 | 532 | ||
| @@ -576,10 +551,12 @@ omalloc_grow(struct dir_info *d) | |||
| 576 | } | 551 | } | 
| 577 | } | 552 | } | 
| 578 | 553 | ||
| 579 | oldpsz = PAGEROUND(d->regions_total * sizeof(struct region_info)); | 554 | if (d->regions_total > 0) { | 
| 580 | /* clear to avoid meta info ending up in the cache */ | 555 | oldpsz = PAGEROUND(d->regions_total * sizeof(struct region_info)); | 
| 581 | unmap(d, d->r, oldpsz, oldpsz); | 556 | /* clear to avoid meta info ending up in the cache */ | 
| 582 | d->regions_free += d->regions_total; | 557 | unmap(d, d->r, oldpsz, oldpsz); | 
| 558 | } | ||
| 559 | d->regions_free += newtotal - d->regions_total; | ||
| 583 | d->regions_total = newtotal; | 560 | d->regions_total = newtotal; | 
| 584 | d->r = p; | 561 | d->r = p; | 
| 585 | return 0; | 562 | return 0; | 
| @@ -596,7 +573,7 @@ insert(struct dir_info *d, void *p, size_t sz, void *f) | |||
| 596 | size_t mask; | 573 | size_t mask; | 
| 597 | void *q; | 574 | void *q; | 
| 598 | 575 | ||
| 599 | if (d->regions_free * 4 < d->regions_total) { | 576 | if (d->regions_free * 4 < d->regions_total || d->regions_total == 0) { | 
| 600 | if (omalloc_grow(d)) | 577 | if (omalloc_grow(d)) | 
| 601 | return 1; | 578 | return 1; | 
| 602 | } | 579 | } | 
| @@ -628,6 +605,8 @@ find(struct dir_info *d, void *p) | |||
| 628 | if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || | 605 | if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || | 
| 629 | d->canary1 != ~d->canary2) | 606 | d->canary1 != ~d->canary2) | 
| 630 | wrterror(d, "internal struct corrupt"); | 607 | wrterror(d, "internal struct corrupt"); | 
| 608 | if (d->r == NULL) | ||
| 609 | return NULL; | ||
| 631 | p = MASK_POINTER(p); | 610 | p = MASK_POINTER(p); | 
| 632 | index = hash(p) & mask; | 611 | index = hash(p) & mask; | 
| 633 | r = d->r[index].p; | 612 | r = d->r[index].p; | 
| @@ -1300,18 +1279,50 @@ _malloc_init(int from_rthreads) | |||
| 1300 | _MALLOC_UNLOCK(1); | 1279 | _MALLOC_UNLOCK(1); | 
| 1301 | return; | 1280 | return; | 
| 1302 | } | 1281 | } | 
| 1303 | if (!mopts.malloc_canary) | 1282 | if (!mopts.malloc_canary) { | 
| 1283 | char *p; | ||
| 1284 | size_t sz, d_avail; | ||
| 1285 | |||
| 1304 | omalloc_init(); | 1286 | omalloc_init(); | 
| 1287 | /* | ||
| 1288 | * Allocate dir_infos with a guard page on either side. Also | ||
| 1289 | * randomise offset inside the page at which the dir_infos | ||
| 1290 | * lay (subject to alignment by 1 << MALLOC_MINSHIFT) | ||
| 1291 | */ | ||
| 1292 | sz = mopts.malloc_mutexes * sizeof(*d) + 2 * MALLOC_PAGESIZE; | ||
| 1293 | if ((p = MMAPNONE(sz, 0)) == MAP_FAILED) | ||
| 1294 | wrterror(NULL, "malloc_init mmap1 failed"); | ||
| 1295 | if (mprotect(p + MALLOC_PAGESIZE, mopts.malloc_mutexes * sizeof(*d), | ||
| 1296 | PROT_READ | PROT_WRITE)) | ||
| 1297 | wrterror(NULL, "malloc_init mprotect1 failed"); | ||
| 1298 | if (mimmutable(p, sz)) | ||
| 1299 | wrterror(NULL, "malloc_init mimmutable1 failed"); | ||
| 1300 | d_avail = (((mopts.malloc_mutexes * sizeof(*d) + MALLOC_PAGEMASK) & | ||
| 1301 | ~MALLOC_PAGEMASK) - (mopts.malloc_mutexes * sizeof(*d))) >> | ||
| 1302 | MALLOC_MINSHIFT; | ||
| 1303 | d = (struct dir_info *)(p + MALLOC_PAGESIZE + | ||
| 1304 | (arc4random_uniform(d_avail) << MALLOC_MINSHIFT)); | ||
| 1305 | STATS_ADD(d[1].malloc_used, sz); | ||
| 1306 | for (i = 0; i < mopts.malloc_mutexes; i++) | ||
| 1307 | mopts.malloc_pool[i] = &d[i]; | ||
| 1308 | mopts.internal_funcs = 1; | ||
| 1309 | if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0) { | ||
| 1310 | if (mprotect(&malloc_readonly, sizeof(malloc_readonly), | ||
| 1311 | PROT_READ)) | ||
| 1312 | wrterror(NULL, "malloc_init mprotect r/o failed"); | ||
| 1313 | if (mimmutable(&malloc_readonly, sizeof(malloc_readonly))) | ||
| 1314 | wrterror(NULL, "malloc_init mimmutable r/o failed"); | ||
| 1315 | } | ||
| 1316 | } | ||
| 1305 | 1317 | ||
| 1306 | nmutexes = from_rthreads ? mopts.malloc_mutexes : 2; | 1318 | nmutexes = from_rthreads ? mopts.malloc_mutexes : 2; | 
| 1307 | if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0) | ||
| 1308 | mprotect(&malloc_readonly, sizeof(malloc_readonly), | ||
| 1309 | PROT_READ | PROT_WRITE); | ||
| 1310 | for (i = 0; i < nmutexes; i++) { | 1319 | for (i = 0; i < nmutexes; i++) { | 
| 1311 | if (mopts.malloc_pool[i]) | 1320 | d = mopts.malloc_pool[i]; | 
| 1321 | d->malloc_mt = from_rthreads; | ||
| 1322 | if (d->canary1 == ~d->canary2) | ||
| 1312 | continue; | 1323 | continue; | 
| 1313 | if (i == 0) { | 1324 | if (i == 0) { | 
| 1314 | omalloc_poolinit(&d, MAP_CONCEAL); | 1325 | omalloc_poolinit(d, MAP_CONCEAL); | 
| 1315 | d->malloc_junk = 2; | 1326 | d->malloc_junk = 2; | 
| 1316 | d->bigcache_size = 0; | 1327 | d->bigcache_size = 0; | 
| 1317 | for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++) | 1328 | for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++) | 
| @@ -1319,7 +1330,7 @@ _malloc_init(int from_rthreads) | |||
| 1319 | } else { | 1330 | } else { | 
| 1320 | size_t sz = 0; | 1331 | size_t sz = 0; | 
| 1321 | 1332 | ||
| 1322 | omalloc_poolinit(&d, 0); | 1333 | omalloc_poolinit(d, 0); | 
| 1323 | d->malloc_junk = mopts.def_malloc_junk; | 1334 | d->malloc_junk = mopts.def_malloc_junk; | 
| 1324 | d->bigcache_size = mopts.def_maxcache; | 1335 | d->bigcache_size = mopts.def_maxcache; | 
| 1325 | for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++) { | 1336 | for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++) { | 
| @@ -1332,7 +1343,9 @@ _malloc_init(int from_rthreads) | |||
| 1332 | void *p = MMAP(sz, 0); | 1343 | void *p = MMAP(sz, 0); | 
| 1333 | if (p == MAP_FAILED) | 1344 | if (p == MAP_FAILED) | 
| 1334 | wrterror(NULL, | 1345 | wrterror(NULL, | 
| 1335 | "malloc init mmap failed"); | 1346 | "malloc_init mmap2 failed"); | 
| 1347 | if (mimmutable(p, sz)) | ||
| 1348 | wrterror(NULL, "malloc_init mimmutable2 failed"); | ||
| 1336 | for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++) { | 1349 | for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++) { | 
| 1337 | d->smallcache[j].pages = p; | 1350 | d->smallcache[j].pages = p; | 
| 1338 | p = (char *)p + d->smallcache[j].max * | 1351 | p = (char *)p + d->smallcache[j].max * | 
| @@ -1342,20 +1355,8 @@ _malloc_init(int from_rthreads) | |||
| 1342 | } | 1355 | } | 
| 1343 | } | 1356 | } | 
| 1344 | d->mutex = i; | 1357 | d->mutex = i; | 
| 1345 | mopts.malloc_pool[i] = d; | ||
| 1346 | } | 1358 | } | 
| 1347 | 1359 | ||
| 1348 | if (from_rthreads) | ||
| 1349 | mopts.malloc_mt = 1; | ||
| 1350 | else | ||
| 1351 | mopts.internal_funcs = 1; | ||
| 1352 | |||
| 1353 | /* | ||
| 1354 | * Options have been set and will never be reset. | ||
| 1355 | * Prevent further tampering with them. | ||
| 1356 | */ | ||
| 1357 | if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0) | ||
| 1358 | mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ); | ||
| 1359 | _MALLOC_UNLOCK(1); | 1360 | _MALLOC_UNLOCK(1); | 
| 1360 | } | 1361 | } | 
| 1361 | DEF_STRONG(_malloc_init); | 1362 | DEF_STRONG(_malloc_init); | 
| @@ -1420,7 +1421,7 @@ findpool(void *p, struct dir_info *argpool, struct dir_info **foundpool, | |||
| 1420 | if (r == NULL) { | 1421 | if (r == NULL) { | 
| 1421 | u_int i, nmutexes; | 1422 | u_int i, nmutexes; | 
| 1422 | 1423 | ||
| 1423 | nmutexes = mopts.malloc_mt ? mopts.malloc_mutexes : 2; | 1424 | nmutexes = mopts.malloc_pool[1]->malloc_mt ? mopts.malloc_mutexes : 2; | 
| 1424 | STATS_INC(pool->other_pool); | 1425 | STATS_INC(pool->other_pool); | 
| 1425 | for (i = 1; i < nmutexes; i++) { | 1426 | for (i = 1; i < nmutexes; i++) { | 
| 1426 | u_int j = (argpool->mutex + i) & (nmutexes - 1); | 1427 | u_int j = (argpool->mutex + i) & (nmutexes - 1); | 
| @@ -2332,7 +2333,7 @@ malloc_dump1(int fd, int poolno, struct dir_info *d) | |||
| 2332 | dprintf(fd, "Malloc dir of %s pool %d at %p\n", __progname, poolno, d); | 2333 | dprintf(fd, "Malloc dir of %s pool %d at %p\n", __progname, poolno, d); | 
| 2333 | if (d == NULL) | 2334 | if (d == NULL) | 
| 2334 | return; | 2335 | return; | 
| 2335 | dprintf(fd, "J=%d Fl=%x\n", d->malloc_junk, d->mmap_flag); | 2336 | dprintf(fd, "MT=%d J=%d Fl=%x\n", d->malloc_mt, d->malloc_junk, d->mmap_flag); | 
| 2336 | dprintf(fd, "Region slots free %zu/%zu\n", | 2337 | dprintf(fd, "Region slots free %zu/%zu\n", | 
| 2337 | d->regions_free, d->regions_total); | 2338 | d->regions_free, d->regions_total); | 
| 2338 | dprintf(fd, "Finds %zu/%zu\n", d->finds, d->find_collisions); | 2339 | dprintf(fd, "Finds %zu/%zu\n", d->finds, d->find_collisions); | 
| @@ -2421,9 +2422,9 @@ malloc_exit(void) | |||
| 2421 | if (fd != -1) { | 2422 | if (fd != -1) { | 
| 2422 | dprintf(fd, "******** Start dump %s *******\n", __progname); | 2423 | dprintf(fd, "******** Start dump %s *******\n", __progname); | 
| 2423 | dprintf(fd, | 2424 | dprintf(fd, | 
| 2424 | "MT=%d M=%u I=%d F=%d U=%d J=%d R=%d X=%d C=%d cache=%u " | 2425 | "M=%u I=%d F=%d U=%d J=%d R=%d X=%d C=%d cache=%u " | 
| 2425 | "G=%zu\n", | 2426 | "G=%zu\n", | 
| 2426 | mopts.malloc_mt, mopts.malloc_mutexes, | 2427 | mopts.malloc_mutexes, | 
| 2427 | mopts.internal_funcs, mopts.malloc_freecheck, | 2428 | mopts.internal_funcs, mopts.malloc_freecheck, | 
| 2428 | mopts.malloc_freeunmap, mopts.def_malloc_junk, | 2429 | mopts.malloc_freeunmap, mopts.def_malloc_junk, | 
| 2429 | mopts.malloc_realloc, mopts.malloc_xmalloc, | 2430 | mopts.malloc_realloc, mopts.malloc_xmalloc, | 
