diff options
Diffstat (limited to 'src/lj_alloc.c')
-rw-r--r-- | src/lj_alloc.c | 83 |
1 files changed, 38 insertions, 45 deletions
diff --git a/src/lj_alloc.c b/src/lj_alloc.c index 70ca1e3b..bf2ae847 100644 --- a/src/lj_alloc.c +++ b/src/lj_alloc.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include "lj_def.h" | 31 | #include "lj_def.h" |
32 | #include "lj_arch.h" | 32 | #include "lj_arch.h" |
33 | #include "lj_alloc.h" | 33 | #include "lj_alloc.h" |
34 | #include "lj_prng.h" | ||
34 | 35 | ||
35 | #ifndef LUAJIT_USE_SYSMALLOC | 36 | #ifndef LUAJIT_USE_SYSMALLOC |
36 | 37 | ||
@@ -140,7 +141,7 @@ static void init_mmap(void) | |||
140 | #define INIT_MMAP() init_mmap() | 141 | #define INIT_MMAP() init_mmap() |
141 | 142 | ||
142 | /* Win64 32 bit MMAP via NtAllocateVirtualMemory. */ | 143 | /* Win64 32 bit MMAP via NtAllocateVirtualMemory. */ |
143 | static void *CALL_MMAP(size_t size) | 144 | static void *mmap_plain(size_t size) |
144 | { | 145 | { |
145 | DWORD olderr = GetLastError(); | 146 | DWORD olderr = GetLastError(); |
146 | void *ptr = NULL; | 147 | void *ptr = NULL; |
@@ -164,7 +165,7 @@ static void *direct_mmap(size_t size) | |||
164 | #else | 165 | #else |
165 | 166 | ||
166 | /* Win32 MMAP via VirtualAlloc */ | 167 | /* Win32 MMAP via VirtualAlloc */ |
167 | static void *CALL_MMAP(size_t size) | 168 | static void *mmap_plain(size_t size) |
168 | { | 169 | { |
169 | DWORD olderr = GetLastError(); | 170 | DWORD olderr = GetLastError(); |
170 | void *ptr = LJ_WIN_VALLOC(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); | 171 | void *ptr = LJ_WIN_VALLOC(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); |
@@ -184,7 +185,8 @@ static void *direct_mmap(size_t size) | |||
184 | 185 | ||
185 | #endif | 186 | #endif |
186 | 187 | ||
187 | #define DIRECT_MMAP(size) direct_mmap(size) | 188 | #define CALL_MMAP(prng, size) mmap_plain(size) |
189 | #define DIRECT_MMAP(prng, size) direct_mmap(size) | ||
188 | 190 | ||
189 | /* This function supports releasing coalesed segments */ | 191 | /* This function supports releasing coalesed segments */ |
190 | static int CALL_MUNMAP(void *ptr, size_t size) | 192 | static int CALL_MUNMAP(void *ptr, size_t size) |
@@ -228,30 +230,10 @@ static int CALL_MUNMAP(void *ptr, size_t size) | |||
228 | 230 | ||
229 | #define LJ_ALLOC_MMAP_PROBE_LOWER ((uintptr_t)0x4000) | 231 | #define LJ_ALLOC_MMAP_PROBE_LOWER ((uintptr_t)0x4000) |
230 | 232 | ||
231 | /* No point in a giant ifdef mess. Just try to open /dev/urandom. | 233 | static void *mmap_probe(PRNGState *rs, size_t size) |
232 | ** It doesn't really matter if this fails, since we get some ASLR bits from | ||
233 | ** every unsuitable allocation, too. And we prefer linear allocation, anyway. | ||
234 | */ | ||
235 | #include <fcntl.h> | ||
236 | #include <unistd.h> | ||
237 | |||
238 | static uintptr_t mmap_probe_seed(void) | ||
239 | { | ||
240 | uintptr_t val; | ||
241 | int fd = open("/dev/urandom", O_RDONLY); | ||
242 | if (fd != -1) { | ||
243 | int ok = ((size_t)read(fd, &val, sizeof(val)) == sizeof(val)); | ||
244 | (void)close(fd); | ||
245 | if (ok) return val; | ||
246 | } | ||
247 | return 1; /* Punt. */ | ||
248 | } | ||
249 | |||
250 | static void *mmap_probe(size_t size) | ||
251 | { | 234 | { |
252 | /* Hint for next allocation. Doesn't need to be thread-safe. */ | 235 | /* Hint for next allocation. Doesn't need to be thread-safe. */ |
253 | static uintptr_t hint_addr = 0; | 236 | static uintptr_t hint_addr = 0; |
254 | static uintptr_t hint_prng = 0; | ||
255 | int olderr = errno; | 237 | int olderr = errno; |
256 | int retry; | 238 | int retry; |
257 | for (retry = 0; retry < LJ_ALLOC_MMAP_PROBE_MAX; retry++) { | 239 | for (retry = 0; retry < LJ_ALLOC_MMAP_PROBE_MAX; retry++) { |
@@ -283,15 +265,8 @@ static void *mmap_probe(size_t size) | |||
283 | } | 265 | } |
284 | } | 266 | } |
285 | /* Finally, try pseudo-random probing. */ | 267 | /* Finally, try pseudo-random probing. */ |
286 | if (LJ_UNLIKELY(hint_prng == 0)) { | 268 | do { |
287 | hint_prng = mmap_probe_seed(); | 269 | hint_addr = lj_prng_u64(rs) & (((uintptr_t)1<<LJ_ALLOC_MBITS)-LJ_PAGESIZE); |
288 | } | ||
289 | /* The unsuitable address we got has some ASLR PRNG bits. */ | ||
290 | hint_addr ^= addr & ~((uintptr_t)(LJ_PAGESIZE-1)); | ||
291 | do { /* The PRNG itself is very weak, but see above. */ | ||
292 | hint_prng = hint_prng * 1103515245 + 12345; | ||
293 | hint_addr ^= hint_prng * (uintptr_t)LJ_PAGESIZE; | ||
294 | hint_addr &= (((uintptr_t)1 << LJ_ALLOC_MBITS)-1); | ||
295 | } while (hint_addr < LJ_ALLOC_MMAP_PROBE_LOWER); | 270 | } while (hint_addr < LJ_ALLOC_MMAP_PROBE_LOWER); |
296 | } | 271 | } |
297 | errno = olderr; | 272 | errno = olderr; |
@@ -308,12 +283,16 @@ static void *mmap_probe(size_t size) | |||
308 | #define LJ_ALLOC_MMAP32_START ((uintptr_t)0) | 283 | #define LJ_ALLOC_MMAP32_START ((uintptr_t)0) |
309 | #endif | 284 | #endif |
310 | 285 | ||
286 | #if LJ_ALLOC_MMAP_PROBE | ||
287 | static void *mmap_map32(PRNGState *rs, size_t size) | ||
288 | #else | ||
311 | static void *mmap_map32(size_t size) | 289 | static void *mmap_map32(size_t size) |
290 | #endif | ||
312 | { | 291 | { |
313 | #if LJ_ALLOC_MMAP_PROBE | 292 | #if LJ_ALLOC_MMAP_PROBE |
314 | static int fallback = 0; | 293 | static int fallback = 0; |
315 | if (fallback) | 294 | if (fallback) |
316 | return mmap_probe(size); | 295 | return mmap_probe(rs, size); |
317 | #endif | 296 | #endif |
318 | { | 297 | { |
319 | int olderr = errno; | 298 | int olderr = errno; |
@@ -323,7 +302,7 @@ static void *mmap_map32(size_t size) | |||
323 | #if LJ_ALLOC_MMAP_PROBE | 302 | #if LJ_ALLOC_MMAP_PROBE |
324 | if (ptr == MFAIL) { | 303 | if (ptr == MFAIL) { |
325 | fallback = 1; | 304 | fallback = 1; |
326 | return mmap_probe(size); | 305 | return mmap_probe(rs, size); |
327 | } | 306 | } |
328 | #endif | 307 | #endif |
329 | return ptr; | 308 | return ptr; |
@@ -333,17 +312,22 @@ static void *mmap_map32(size_t size) | |||
333 | #endif | 312 | #endif |
334 | 313 | ||
335 | #if LJ_ALLOC_MMAP32 | 314 | #if LJ_ALLOC_MMAP32 |
336 | #define CALL_MMAP(size) mmap_map32(size) | 315 | #if LJ_ALLOC_MMAP_PROBE |
316 | #define CALL_MMAP(prng, size) mmap_map32(prng, size) | ||
317 | #else | ||
318 | #define CALL_MMAP(prng, size) mmap_map32(size) | ||
319 | #endif | ||
337 | #elif LJ_ALLOC_MMAP_PROBE | 320 | #elif LJ_ALLOC_MMAP_PROBE |
338 | #define CALL_MMAP(size) mmap_probe(size) | 321 | #define CALL_MMAP(prng, size) mmap_probe(prng, size) |
339 | #else | 322 | #else |
340 | static void *CALL_MMAP(size_t size) | 323 | static void *mmap_plain(size_t size) |
341 | { | 324 | { |
342 | int olderr = errno; | 325 | int olderr = errno; |
343 | void *ptr = mmap(NULL, size, MMAP_PROT, MMAP_FLAGS, -1, 0); | 326 | void *ptr = mmap(NULL, size, MMAP_PROT, MMAP_FLAGS, -1, 0); |
344 | errno = olderr; | 327 | errno = olderr; |
345 | return ptr; | 328 | return ptr; |
346 | } | 329 | } |
330 | #define CALL_MMAP(prng, size) mmap_plain(size) | ||
347 | #endif | 331 | #endif |
348 | 332 | ||
349 | #if LJ_64 && !LJ_GC64 && ((defined(__FreeBSD__) && __FreeBSD__ < 10) || defined(__FreeBSD_kernel__)) && !LJ_TARGET_PS4 | 333 | #if LJ_64 && !LJ_GC64 && ((defined(__FreeBSD__) && __FreeBSD__ < 10) || defined(__FreeBSD_kernel__)) && !LJ_TARGET_PS4 |
@@ -396,7 +380,7 @@ static void *CALL_MREMAP_(void *ptr, size_t osz, size_t nsz, int flags) | |||
396 | #endif | 380 | #endif |
397 | 381 | ||
398 | #ifndef DIRECT_MMAP | 382 | #ifndef DIRECT_MMAP |
399 | #define DIRECT_MMAP(s) CALL_MMAP(s) | 383 | #define DIRECT_MMAP(prng, s) CALL_MMAP(prng, s) |
400 | #endif | 384 | #endif |
401 | 385 | ||
402 | #ifndef CALL_MREMAP | 386 | #ifndef CALL_MREMAP |
@@ -555,6 +539,7 @@ struct malloc_state { | |||
555 | mchunkptr smallbins[(NSMALLBINS+1)*2]; | 539 | mchunkptr smallbins[(NSMALLBINS+1)*2]; |
556 | tbinptr treebins[NTREEBINS]; | 540 | tbinptr treebins[NTREEBINS]; |
557 | msegment seg; | 541 | msegment seg; |
542 | PRNGState *prng; | ||
558 | }; | 543 | }; |
559 | 544 | ||
560 | typedef struct malloc_state *mstate; | 545 | typedef struct malloc_state *mstate; |
@@ -837,11 +822,11 @@ static int has_segment_link(mstate m, msegmentptr ss) | |||
837 | 822 | ||
838 | /* ----------------------- Direct-mmapping chunks ----------------------- */ | 823 | /* ----------------------- Direct-mmapping chunks ----------------------- */ |
839 | 824 | ||
840 | static void *direct_alloc(size_t nb) | 825 | static void *direct_alloc(mstate m, size_t nb) |
841 | { | 826 | { |
842 | size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); | 827 | size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); |
843 | if (LJ_LIKELY(mmsize > nb)) { /* Check for wrap around 0 */ | 828 | if (LJ_LIKELY(mmsize > nb)) { /* Check for wrap around 0 */ |
844 | char *mm = (char *)(DIRECT_MMAP(mmsize)); | 829 | char *mm = (char *)(DIRECT_MMAP(m->prng, mmsize)); |
845 | if (mm != CMFAIL) { | 830 | if (mm != CMFAIL) { |
846 | size_t offset = align_offset(chunk2mem(mm)); | 831 | size_t offset = align_offset(chunk2mem(mm)); |
847 | size_t psize = mmsize - offset - DIRECT_FOOT_PAD; | 832 | size_t psize = mmsize - offset - DIRECT_FOOT_PAD; |
@@ -853,6 +838,7 @@ static void *direct_alloc(size_t nb) | |||
853 | return chunk2mem(p); | 838 | return chunk2mem(p); |
854 | } | 839 | } |
855 | } | 840 | } |
841 | UNUSED(m); | ||
856 | return NULL; | 842 | return NULL; |
857 | } | 843 | } |
858 | 844 | ||
@@ -1001,7 +987,7 @@ static void *alloc_sys(mstate m, size_t nb) | |||
1001 | 987 | ||
1002 | /* Directly map large chunks */ | 988 | /* Directly map large chunks */ |
1003 | if (LJ_UNLIKELY(nb >= DEFAULT_MMAP_THRESHOLD)) { | 989 | if (LJ_UNLIKELY(nb >= DEFAULT_MMAP_THRESHOLD)) { |
1004 | void *mem = direct_alloc(nb); | 990 | void *mem = direct_alloc(m, nb); |
1005 | if (mem != 0) | 991 | if (mem != 0) |
1006 | return mem; | 992 | return mem; |
1007 | } | 993 | } |
@@ -1010,7 +996,7 @@ static void *alloc_sys(mstate m, size_t nb) | |||
1010 | size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE; | 996 | size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE; |
1011 | size_t rsize = granularity_align(req); | 997 | size_t rsize = granularity_align(req); |
1012 | if (LJ_LIKELY(rsize > nb)) { /* Fail if wraps around zero */ | 998 | if (LJ_LIKELY(rsize > nb)) { /* Fail if wraps around zero */ |
1013 | char *mp = (char *)(CALL_MMAP(rsize)); | 999 | char *mp = (char *)(CALL_MMAP(m->prng, rsize)); |
1014 | if (mp != CMFAIL) { | 1000 | if (mp != CMFAIL) { |
1015 | tbase = mp; | 1001 | tbase = mp; |
1016 | tsize = rsize; | 1002 | tsize = rsize; |
@@ -1237,12 +1223,13 @@ static void *tmalloc_small(mstate m, size_t nb) | |||
1237 | 1223 | ||
1238 | /* ----------------------------------------------------------------------- */ | 1224 | /* ----------------------------------------------------------------------- */ |
1239 | 1225 | ||
1240 | void *lj_alloc_create(void) | 1226 | void *lj_alloc_create(PRNGState *rs) |
1241 | { | 1227 | { |
1242 | size_t tsize = DEFAULT_GRANULARITY; | 1228 | size_t tsize = DEFAULT_GRANULARITY; |
1243 | char *tbase; | 1229 | char *tbase; |
1244 | INIT_MMAP(); | 1230 | INIT_MMAP(); |
1245 | tbase = (char *)(CALL_MMAP(tsize)); | 1231 | UNUSED(rs); |
1232 | tbase = (char *)(CALL_MMAP(rs, tsize)); | ||
1246 | if (tbase != CMFAIL) { | 1233 | if (tbase != CMFAIL) { |
1247 | size_t msize = pad_request(sizeof(struct malloc_state)); | 1234 | size_t msize = pad_request(sizeof(struct malloc_state)); |
1248 | mchunkptr mn; | 1235 | mchunkptr mn; |
@@ -1261,6 +1248,12 @@ void *lj_alloc_create(void) | |||
1261 | return NULL; | 1248 | return NULL; |
1262 | } | 1249 | } |
1263 | 1250 | ||
1251 | void lj_alloc_setprng(void *msp, PRNGState *rs) | ||
1252 | { | ||
1253 | mstate ms = (mstate)msp; | ||
1254 | ms->prng = rs; | ||
1255 | } | ||
1256 | |||
1264 | void lj_alloc_destroy(void *msp) | 1257 | void lj_alloc_destroy(void *msp) |
1265 | { | 1258 | { |
1266 | mstate ms = (mstate)msp; | 1259 | mstate ms = (mstate)msp; |