diff options
Diffstat (limited to 'src/lj_mcode.c')
-rw-r--r-- | src/lj_mcode.c | 57 |
1 files changed, 20 insertions, 37 deletions
diff --git a/src/lj_mcode.c b/src/lj_mcode.c index 02ade1d4..a5153b25 100644 --- a/src/lj_mcode.c +++ b/src/lj_mcode.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include "lj_mcode.h" | 14 | #include "lj_mcode.h" |
15 | #include "lj_trace.h" | 15 | #include "lj_trace.h" |
16 | #include "lj_dispatch.h" | 16 | #include "lj_dispatch.h" |
17 | #include "lj_prng.h" | ||
17 | #endif | 18 | #endif |
18 | #if LJ_HASJIT || LJ_HASFFI | 19 | #if LJ_HASJIT || LJ_HASFFI |
19 | #include "lj_vm.h" | 20 | #include "lj_vm.h" |
@@ -44,7 +45,7 @@ void lj_mcode_sync(void *start, void *end) | |||
44 | sys_icache_invalidate(start, (char *)end-(char *)start); | 45 | sys_icache_invalidate(start, (char *)end-(char *)start); |
45 | #elif LJ_TARGET_PPC | 46 | #elif LJ_TARGET_PPC |
46 | lj_vm_cachesync(start, end); | 47 | lj_vm_cachesync(start, end); |
47 | #elif defined(__GNUC__) | 48 | #elif defined(__GNUC__) || defined(__clang__) |
48 | __clear_cache(start, end); | 49 | __clear_cache(start, end); |
49 | #else | 50 | #else |
50 | #error "Missing builtin to flush instruction cache" | 51 | #error "Missing builtin to flush instruction cache" |
@@ -66,8 +67,8 @@ void lj_mcode_sync(void *start, void *end) | |||
66 | 67 | ||
67 | static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, DWORD prot) | 68 | static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, DWORD prot) |
68 | { | 69 | { |
69 | void *p = VirtualAlloc((void *)hint, sz, | 70 | void *p = LJ_WIN_VALLOC((void *)hint, sz, |
70 | MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, prot); | 71 | MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, prot); |
71 | if (!p && !hint) | 72 | if (!p && !hint) |
72 | lj_trace_err(J, LJ_TRERR_MCODEAL); | 73 | lj_trace_err(J, LJ_TRERR_MCODEAL); |
73 | return p; | 74 | return p; |
@@ -82,7 +83,7 @@ static void mcode_free(jit_State *J, void *p, size_t sz) | |||
82 | static int mcode_setprot(void *p, size_t sz, DWORD prot) | 83 | static int mcode_setprot(void *p, size_t sz, DWORD prot) |
83 | { | 84 | { |
84 | DWORD oprot; | 85 | DWORD oprot; |
85 | return !VirtualProtect(p, sz, prot, &oprot); | 86 | return !LJ_WIN_VPROTECT(p, sz, prot, &oprot); |
86 | } | 87 | } |
87 | 88 | ||
88 | #elif LJ_TARGET_POSIX | 89 | #elif LJ_TARGET_POSIX |
@@ -118,52 +119,34 @@ static int mcode_setprot(void *p, size_t sz, int prot) | |||
118 | return mprotect(p, sz, prot); | 119 | return mprotect(p, sz, prot); |
119 | } | 120 | } |
120 | 121 | ||
121 | #elif LJ_64 | ||
122 | |||
123 | #error "Missing OS support for explicit placement of executable memory" | ||
124 | |||
125 | #else | 122 | #else |
126 | 123 | ||
127 | /* Fallback allocator. This will fail if memory is not executable by default. */ | 124 | #error "Missing OS support for explicit placement of executable memory" |
128 | #define LUAJIT_UNPROTECT_MCODE | ||
129 | #define MCPROT_RW 0 | ||
130 | #define MCPROT_RX 0 | ||
131 | #define MCPROT_RWX 0 | ||
132 | |||
133 | static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot) | ||
134 | { | ||
135 | UNUSED(hint); UNUSED(prot); | ||
136 | return lj_mem_new(J->L, sz); | ||
137 | } | ||
138 | |||
139 | static void mcode_free(jit_State *J, void *p, size_t sz) | ||
140 | { | ||
141 | lj_mem_free(J2G(J), p, sz); | ||
142 | } | ||
143 | 125 | ||
144 | #endif | 126 | #endif |
145 | 127 | ||
146 | /* -- MCode area protection ----------------------------------------------- */ | 128 | /* -- MCode area protection ----------------------------------------------- */ |
147 | 129 | ||
148 | /* Define this ONLY if page protection twiddling becomes a bottleneck. */ | 130 | #if LUAJIT_SECURITY_MCODE == 0 |
149 | #ifdef LUAJIT_UNPROTECT_MCODE | ||
150 | 131 | ||
151 | /* It's generally considered to be a potential security risk to have | 132 | /* Define this ONLY if page protection twiddling becomes a bottleneck. |
133 | ** | ||
134 | ** It's generally considered to be a potential security risk to have | ||
152 | ** pages with simultaneous write *and* execute access in a process. | 135 | ** pages with simultaneous write *and* execute access in a process. |
153 | ** | 136 | ** |
154 | ** Do not even think about using this mode for server processes or | 137 | ** Do not even think about using this mode for server processes or |
155 | ** apps handling untrusted external data (such as a browser). | 138 | ** apps handling untrusted external data. |
156 | ** | 139 | ** |
157 | ** The security risk is not in LuaJIT itself -- but if an adversary finds | 140 | ** The security risk is not in LuaJIT itself -- but if an adversary finds |
158 | ** any *other* flaw in your C application logic, then any RWX memory page | 141 | ** any *other* flaw in your C application logic, then any RWX memory pages |
159 | ** simplifies writing an exploit considerably. | 142 | ** simplify writing an exploit considerably. |
160 | */ | 143 | */ |
161 | #define MCPROT_GEN MCPROT_RWX | 144 | #define MCPROT_GEN MCPROT_RWX |
162 | #define MCPROT_RUN MCPROT_RWX | 145 | #define MCPROT_RUN MCPROT_RWX |
163 | 146 | ||
164 | static void mcode_protect(jit_State *J, int prot) | 147 | static void mcode_protect(jit_State *J, int prot) |
165 | { | 148 | { |
166 | UNUSED(J); UNUSED(prot); | 149 | UNUSED(J); UNUSED(prot); UNUSED(mcode_setprot); |
167 | } | 150 | } |
168 | 151 | ||
169 | #else | 152 | #else |
@@ -221,8 +204,8 @@ static void *mcode_alloc(jit_State *J, size_t sz) | |||
221 | */ | 204 | */ |
222 | #if LJ_TARGET_MIPS | 205 | #if LJ_TARGET_MIPS |
223 | /* Use the middle of the 256MB-aligned region. */ | 206 | /* Use the middle of the 256MB-aligned region. */ |
224 | uintptr_t target = ((uintptr_t)(void *)lj_vm_exit_handler & 0xf0000000u) + | 207 | uintptr_t target = ((uintptr_t)(void *)lj_vm_exit_handler & |
225 | 0x08000000u; | 208 | ~(uintptr_t)0x0fffffffu) + 0x08000000u; |
226 | #else | 209 | #else |
227 | uintptr_t target = (uintptr_t)(void *)lj_vm_exit_handler & ~(uintptr_t)0xffff; | 210 | uintptr_t target = (uintptr_t)(void *)lj_vm_exit_handler & ~(uintptr_t)0xffff; |
228 | #endif | 211 | #endif |
@@ -242,7 +225,7 @@ static void *mcode_alloc(jit_State *J, size_t sz) | |||
242 | } | 225 | } |
243 | /* Next try probing 64K-aligned pseudo-random addresses. */ | 226 | /* Next try probing 64K-aligned pseudo-random addresses. */ |
244 | do { | 227 | do { |
245 | hint = LJ_PRNG_BITS(J, LJ_TARGET_JUMPRANGE-16) << 16; | 228 | hint = lj_prng_u64(&J2G(J)->prng) & ((1u<<LJ_TARGET_JUMPRANGE)-0x10000); |
246 | } while (!(hint + sz < range+range)); | 229 | } while (!(hint + sz < range+range)); |
247 | hint = target + hint - range; | 230 | hint = target + hint - range; |
248 | } | 231 | } |
@@ -255,7 +238,7 @@ static void *mcode_alloc(jit_State *J, size_t sz) | |||
255 | /* All memory addresses are reachable by relative jumps. */ | 238 | /* All memory addresses are reachable by relative jumps. */ |
256 | static void *mcode_alloc(jit_State *J, size_t sz) | 239 | static void *mcode_alloc(jit_State *J, size_t sz) |
257 | { | 240 | { |
258 | #ifdef __OpenBSD__ | 241 | #if defined(__OpenBSD__) || LJ_TARGET_UWP |
259 | /* Allow better executable memory allocation for OpenBSD W^X mode. */ | 242 | /* Allow better executable memory allocation for OpenBSD W^X mode. */ |
260 | void *p = mcode_alloc_at(J, 0, sz, MCPROT_RUN); | 243 | void *p = mcode_alloc_at(J, 0, sz, MCPROT_RUN); |
261 | if (p && mcode_setprot(p, sz, MCPROT_GEN)) { | 244 | if (p && mcode_setprot(p, sz, MCPROT_GEN)) { |
@@ -331,7 +314,7 @@ void lj_mcode_abort(jit_State *J) | |||
331 | /* Set/reset protection to allow patching of MCode areas. */ | 314 | /* Set/reset protection to allow patching of MCode areas. */ |
332 | MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish) | 315 | MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish) |
333 | { | 316 | { |
334 | #ifdef LUAJIT_UNPROTECT_MCODE | 317 | #if LUAJIT_SECURITY_MCODE == 0 |
335 | UNUSED(J); UNUSED(ptr); UNUSED(finish); | 318 | UNUSED(J); UNUSED(ptr); UNUSED(finish); |
336 | return NULL; | 319 | return NULL; |
337 | #else | 320 | #else |
@@ -351,7 +334,7 @@ MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish) | |||
351 | /* Otherwise search through the list of MCode areas. */ | 334 | /* Otherwise search through the list of MCode areas. */ |
352 | for (;;) { | 335 | for (;;) { |
353 | mc = ((MCLink *)mc)->next; | 336 | mc = ((MCLink *)mc)->next; |
354 | lua_assert(mc != NULL); | 337 | lj_assertJ(mc != NULL, "broken MCode area chain"); |
355 | if (ptr >= mc && ptr < (MCode *)((char *)mc + ((MCLink *)mc)->size)) { | 338 | if (ptr >= mc && ptr < (MCode *)((char *)mc + ((MCLink *)mc)->size)) { |
356 | if (LJ_UNLIKELY(mcode_setprot(mc, ((MCLink *)mc)->size, MCPROT_GEN))) | 339 | if (LJ_UNLIKELY(mcode_setprot(mc, ((MCLink *)mc)->size, MCPROT_GEN))) |
357 | mcode_protfail(J); | 340 | mcode_protfail(J); |