diff options
Diffstat (limited to '')
-rw-r--r-- | CHANGES | 751 | ||||
-rw-r--r-- | Makefile | 2 | ||||
-rw-r--r-- | deep_test/deep_test.args.json | 16 | ||||
-rw-r--r-- | deep_test/deep_test.c | 273 | ||||
-rw-r--r-- | deep_test/deep_test.cpp | 259 | ||||
-rw-r--r-- | deep_test/deep_test.vcxproj | 44 | ||||
-rw-r--r-- | deep_test/deep_test.vcxproj.filters | 14 | ||||
-rw-r--r-- | deep_test/deep_test.vcxproj.user | 3 | ||||
-rw-r--r-- | docs/index.html | 358 | ||||
-rw-r--r-- | lanes-4.0.0-0.rockspec (renamed from lanes-3.17.0-0.rockspec) | 24 | ||||
-rw-r--r-- | make-vc.cmd | 4 | ||||
-rw-r--r-- | src/Makefile | 2 | ||||
-rw-r--r-- | src/cancel.c | 302 | ||||
-rw-r--r-- | src/cancel.cpp | 282 | ||||
-rw-r--r-- | src/cancel.h | 67 | ||||
-rw-r--r-- | src/compat.cpp (renamed from src/compat.c) | 25 | ||||
-rw-r--r-- | src/compat.h | 38 | ||||
-rw-r--r-- | src/deep.cpp (renamed from src/deep.c) | 326 | ||||
-rw-r--r-- | src/deep.h | 73 | ||||
-rw-r--r-- | src/keeper.c | 862 | ||||
-rw-r--r-- | src/keeper.cpp | 884 | ||||
-rw-r--r-- | src/keeper.h | 82 | ||||
-rw-r--r-- | src/lanes.c | 2147 | ||||
-rw-r--r-- | src/lanes.cpp | 2054 | ||||
-rw-r--r-- | src/lanes.h | 26 | ||||
-rw-r--r-- | src/lanes.lua | 7 | ||||
-rw-r--r-- | src/lanes_private.h | 122 | ||||
-rw-r--r-- | src/lanesconf.h | 17 | ||||
-rw-r--r-- | src/linda.c | 948 | ||||
-rw-r--r-- | src/linda.cpp | 1025 | ||||
-rw-r--r-- | src/macros_and_utils.h | 220 | ||||
-rw-r--r-- | src/platform.h | 5 | ||||
-rw-r--r-- | src/state.cpp (renamed from src/state.c) | 253 | ||||
-rw-r--r-- | src/state.h | 23 | ||||
-rw-r--r-- | src/threading.c | 1041 | ||||
-rw-r--r-- | src/threading.cpp | 448 | ||||
-rw-r--r-- | src/threading.h | 278 | ||||
-rw-r--r-- | src/threading_osx.h | 15 | ||||
-rw-r--r-- | src/tools.cpp (renamed from src/tools.c) | 1219 | ||||
-rw-r--r-- | src/tools.h | 56 | ||||
-rw-r--r-- | src/uniquekey.h | 83 | ||||
-rw-r--r-- | src/universe.c | 75 | ||||
-rw-r--r-- | src/universe.cpp | 106 | ||||
-rw-r--r-- | src/universe.h | 177 | ||||
-rw-r--r-- | tests/cancel.lua | 17 | ||||
-rw-r--r-- | tests/keeper.lua | 2 | ||||
-rw-r--r-- | tests/perftest.lua | 74 |
47 files changed, 6940 insertions, 8189 deletions
@@ -1,753 +1,6 @@ | |||
1 | CHANGES: | 1 | CHANGES: |
2 | 2 | ||
3 | CHANGE 160: BGe 11-Apr-24 | 3 | CHANGE 1: BGe 9-Apr-24 |
4 | * add manual control over GC behavior in keeper states | 4 | * reset changelog, next entry will list API changes since last C-implementation. |
5 | * update a bunch of test scripts | ||
6 | * minor internal fixes | ||
7 | * probably the last C implementation update, unless a critical bug creeps up | ||
8 | * internal version bumped to 3.17.0 | ||
9 | |||
10 | CHANGE 159: BGe 19-Mar-24 | ||
11 | * fix small internal issue with when hitting timeout on thread kill during thread_cancel() in pthread implementation | ||
12 | |||
13 | CHANGE 158: BGe 22-Feb-24 | ||
14 | * naive luajit detection in PUC-Lua-based builds, and vice-versa to detect mismatches | ||
15 | * internal version bumped to 3.16.3 | ||
16 | |||
17 | CHANGE 157: Mitalie 17-Aug-23 | ||
18 | * Prevent crash on linux as non-root | ||
19 | * internal version bumped to 3.16.2 | ||
20 | |||
21 | CHANGE 156: BGe 9-Aug-23 | ||
22 | * new configuration option .internal_allocator to help LuaJIT users. | ||
23 | * internal version bumped to 3.16.1 | ||
24 | |||
25 | CHANGE 155: BGe 28-Jul-23 | ||
26 | * tweaks to linux thread priority management: do nothing if not super-user. if super-user, do nothing if nothing is provided (instead of trying to force a prio when LINUX_SCHED_RR is defined). | ||
27 | |||
28 | CHANGE 154: eligovision 1-Mar-22 | ||
29 | * Fix 3-parametrized __lanesclone | ||
30 | |||
31 | CHANGE 153: BGe 17-Feb-22 | ||
32 | * NEVER use allocator obtained from lua_getallocf to allocate stuff manually when compiling for LuaJIT | ||
33 | |||
34 | CHANGE 152: BGe 7-Feb-22 | ||
35 | * bumped version to 3.16.0 | ||
36 | * __lanesclone is now called only once with 3 parameters dest, source, size -> BREAKS CUSTOM DEEP USERDATA API | ||
37 | |||
38 | CHANGE 151: BGe 7-Feb-22 | ||
39 | * bumped version to 3.15.2 | ||
40 | * Lanes no longer relies on malloc/free for internal allocations, but uses the primary alloc function from the master Lua state | ||
41 | |||
42 | CHANGE 150: BGe 22-Sep-21 | ||
43 | * fix require() wrapper to return all values returned by original require() | ||
44 | |||
45 | CHANGE 149: BGe 8-Jul-21 | ||
46 | * bumped version to 3.15.1 | ||
47 | * fix function transfer with lua_dump for Lua 5.4 failing for functions big enough to necessitate a buffer reallocation | ||
48 | |||
49 | CHANGE 148: BGe 23-Jun-21 | ||
50 | * __lanesclone now receives the original as light userdata the first time it is called -> BREAKS CUSTOM DEEP USERDATA API | ||
51 | |||
52 | CHANGE 147: BGe 16-Jun-21 | ||
53 | * changed lanes.threads() output so that several lanes with the same name don't clobber each other in the result table -> BREAKS API | ||
54 | * bumped version to 3.15 because of the API change | ||
55 | |||
56 | CHANGE 146: BGe 26-Apr-19 | ||
57 | * lane:cancel() rework (see doc). | ||
58 | * opt.cancelstep is gone, hook is installed by lane:cancel() if requested | ||
59 | |||
60 | CHANGE 145: BGe 28-Nov-18 | ||
61 | * more code refacto | ||
62 | * don't test __lanesignore for POD types (-> slightly faster when trasnfering lots of data) | ||
63 | |||
64 | CHANGE 144: BGe 28-Nov-18 | ||
65 | * some code refacto | ||
66 | |||
67 | CHANGE 143: BGe 27-Nov-18 | ||
68 | * Lua 5.4 support | ||
69 | * __lanesclone and lanes.nameof support userdata uservalue(s) | ||
70 | |||
71 | CHANGE 142: BGe 26-Nov-18 | ||
72 | * Version is available in public header | ||
73 | |||
74 | CHANGE 141: BGe 25-Nov-18 | ||
75 | * protect_allocator configure option is gone, long live allocator (more embedders-friendly) | ||
76 | |||
77 | CHANGE 140: BGe 22-Nov-18 | ||
78 | * Raise an error instead of crashing when attempting to transfer a non-deep full userdata | ||
79 | |||
80 | CHANGE 139: BGe 21-Nov-18 | ||
81 | * more DEBUGSPEW | ||
82 | |||
83 | CHANGE 138: BGe 19-Nov-18 | ||
84 | * Registry access code utility macros | ||
85 | * CONFIG_REGKEY and LOOKUP_REGKEY are now lightuserdata instead of strings | ||
86 | * Stack checking debug macros improvements | ||
87 | |||
88 | CHANGE 137: BGe 15-Nov-18 | ||
89 | * Deep userdata must embed DeepPrelude to save an allocation (also changes Deep protocol) | ||
90 | |||
91 | CHANGE 136: BGe 15-Nov-18 | ||
92 | * split linda code in a separate file | ||
93 | * rockspec for version v3.13.0 | ||
94 | |||
95 | CHANGE 135: BGe 11-Nov-18 | ||
96 | * fix a bunch of compilation warnings | ||
97 | |||
98 | CHANGE 134: BGe 3-Dec-13 | ||
99 | * new API lanes.set_thread_affinity() | ||
100 | * set_debug_threadname implemented with win32 pthread | ||
101 | |||
102 | CHANGE 133: BGe 8-Nov-18 | ||
103 | * Make sure any linda operation that can raise an error won't ever leave a mutex unreleased | ||
104 | * lane:join() now returns nil, "timeout" in case of timeout | ||
105 | |||
106 | CHANGE 132: BGe 7-Nov-18 | ||
107 | * __lanesclone mechanism should actually work now | ||
108 | |||
109 | CHANGE 131: BGe 7-Nov-18 | ||
110 | * Fix potential crash at application shutdown when deep userdata were created before Lanes is required | ||
111 | |||
112 | CHANGE 130: BGe 2-Nov-18 | ||
113 | * always duplicate the config structure in new lanes even when no libraries are initialized by the generator | ||
114 | (fixes an internal error trying to call on_state_create in a lane without any libs loaded) | ||
115 | |||
116 | CHANGE 129: BGe 2-Nov-18 | ||
117 | * Bumped version to 3.13 | ||
118 | * fix error when autodetecting protect_allocator when running under LuaJIT | ||
119 | |||
120 | CHANGE 128: BGe 31-Oct-18 | ||
121 | * Better default value autodetection for protect_allocator setting | ||
122 | |||
123 | CHANGE 127: BGe 30-Oct-18 | ||
124 | * restrict internal light userdata constants to 47 significant bits when building against LuaJIT-x64 | ||
125 | |||
126 | CHANGE 126: Bge 29-Oct-18 | ||
127 | * Add deep user data cloning support | ||
128 | |||
129 | CHANGE 125: BGe 25-Oct-18 | ||
130 | * Fix Lanes build by reorganizing types around a bit | ||
131 | |||
132 | CHANGE 124: BGe 9-Jul-18 | ||
133 | * Fix a stack overflow when copying large tables with verbose_errors option enabled | ||
134 | * Support for integer formatting in verbose errors | ||
135 | |||
136 | CHANGE 123: BGe 2-Aug-17 | ||
137 | * added support for user-provided __gc in deep userdata | ||
138 | * more complete deep userdata sample | ||
139 | |||
140 | CHANGE 122: BGe 1-Aug-17 | ||
141 | * fix crash trying to use a deep-aware module while not requiring Lanes | ||
142 | * bumped version to 3.12 | ||
143 | |||
144 | CHANGE 121: BGe 13-Jun-17 | ||
145 | * no longer internally assert when an error message is not a string | ||
146 | |||
147 | CHANGE 120: BGe 5-Jun-17 | ||
148 | * new API function lanes.register( "name", module) to manually register a module table after it was required | ||
149 | * Transfering registered module tables will link the equivalent in the destination state instead of cloning it | ||
150 | * bumped version to 3.11 | ||
151 | |||
152 | CHANGE 119: BGe 10-May-17 | ||
153 | * Fixed some compilation warnings | ||
154 | * Improved LuaJIT support | ||
155 | |||
156 | CHANGE 118: trukanduk 21-Nov-16 | ||
157 | * bumped version to 3.10.1 | ||
158 | * objects with a metatable that contains __lanesignore are skipped during data transfers | ||
159 | |||
160 | CHANGE 117: mpeterv 21-Nov-16 | ||
161 | * Fix an implicit number-to-string conversion | ||
162 | |||
163 | CHANGE 116: BGe, mpeterv 27-Apr-15 | ||
164 | * bumped version to 3.10.0 | ||
165 | * segfault fixed in LG_lane_new | ||
166 | * Lua 5.3 support | ||
167 | |||
168 | CHANGE 115: BGe 18-Sep-14 | ||
169 | * bumped version to 3.9.7 | ||
170 | * new function lanes.sleep() | ||
171 | |||
172 | CHANGE 114: BGe 8-Jul-14 | ||
173 | * Postponed _G scan for function lookup database to after on_state_create invocation | ||
174 | * Fixed a crash when USE_DEBUG_SPEW == 1 | ||
175 | |||
176 | CHANGE 113: BGe 17-Jun-14 | ||
177 | * bumped version to 3.9.6 | ||
178 | * separate deep userdata code in a dedicated file to allow external modules to implement Lanes-compatible deep userdata without requiring a binary dependency against the Lanes module | ||
179 | because of this linda_id function(eDO_metatable) must push 2 values on the stack: a metatable and a deep version string obtained from luaG_pushdeepversion() | ||
180 | |||
181 | CHANGE 112 BGe 16-May-14 | ||
182 | * bumped version to 3.9.5 | ||
183 | * fix linda.__towatch to return non-nil when the linda is empty | ||
184 | * lanes.gen() error reporting improvements | ||
185 | |||
186 | CHANGE 111 BGe 24-Apr-14 | ||
187 | * fixed linda:send() possibly returning an undefined value | ||
188 | |||
189 | CHANGE 110 Stepets 20-Apr-14 | ||
190 | * fix LuaJIT detection issues | ||
191 | |||
192 | CHANGE 109 BGe 03-Apr-14 | ||
193 | * moved some Lua-version compatibility code in separate source files | ||
194 | |||
195 | CHANGE 108: BGe 20-Mar-14 | ||
196 | * bumped version to 3.9.4 | ||
197 | * set_finalizer throws an error if provided finalizer isn't a function | ||
198 | * fix error handling when the error doesn't generate an error handler call (IOW, all errors but LUA_ERRRUN) | ||
199 | * provide callstack if LUA_ERRRUN occurs inside a finalizer | ||
200 | |||
201 | CHANGE 107: BGe 19-Mar-14 | ||
202 | * Make sure we don't mutex-wrap require() more than once, just in case | ||
203 | |||
204 | CHANGE 106: BGe 17-Mar-14 | ||
205 | * Fixed crash when using protect_allocator option | ||
206 | |||
207 | CHANGE 105: BGe 27-Feb-14 | ||
208 | * Bumped version to 3.9.3 | ||
209 | * new exposed variable linda.null that exposes the internal NIL_SENTINEL marker | ||
210 | * linda:send() interprets send key linda.null as authorization to silently send a single nil when not provided with anything to send | ||
211 | (useful when sending results of a function that can return nothing) | ||
212 | |||
213 | CHANGE 104: BGe 25-Feb-14 | ||
214 | * Bumped version to 3.9.2 | ||
215 | * Internal rework: the whole Lanes engine now works "per universe" to allow concurrent Lanes execution in more than one embedded master state | ||
216 | * this universe is a full userdata created in the master state, selfdestruct_gc is the __gc for this userdata | ||
217 | * most of what was initialized only once is now per-universe | ||
218 | * Fixed potential crashes at desinit if problems occur during keeper states initialisation | ||
219 | * Fixed require() not always serialized properly | ||
220 | * Raise an error instead of crashing on deep userdata prelude memory allocation failure | ||
221 | * Added forgotten mutex desinitialisation at universe shutdown | ||
222 | |||
223 | CHANGE 103: BGe 24-Feb-14 | ||
224 | * Fix lookup database table not being created when it should if Lanes is required in more than one Lua master state | ||
225 | |||
226 | CHANGE 102: BGe 18-Feb-14 | ||
227 | * raise an error instead of dereferencing a NULL pointer on deep userdata creation and lane struct creation | ||
228 | |||
229 | CHANGE 101: BGe 18-Feb-14 | ||
230 | * version 3.9.1 | ||
231 | * removed some keeper desinit legacy dead code | ||
232 | * keeper array is allocated with master state's alloc function instead of malloc()/free() | ||
233 | * prevent application crash when specifying a very large number of keepers in the configuration options | ||
234 | * any error occuring during one-time inits is raised outside the one-time mutex protected code region | ||
235 | |||
236 | CHANGE 100: BGe 17-Feb-14 | ||
237 | * lanes.linda() accepts an optional integer group to give control on keeper state repartition | ||
238 | |||
239 | CHANGE 99: BGe 17-Feb-14 | ||
240 | * version 3.9.0 | ||
241 | * keepers now require "package", receive package.path & package.cpath, and call on_state_create() if it is a C function | ||
242 | * changed the deep public API (improved deep idfunc signature, renamed luaG_deep_userdata to luaG_newdeepuserdata) | ||
243 | * if an error occurs while copying a deep userdata, don't raise inside the keeper state | ||
244 | * fixed situations where raised errors could lead to memory leaks (deep gc) | ||
245 | |||
246 | CHANGE 98: BGe 13-Feb-14 | ||
247 | * version 3.8.5 | ||
248 | * linda:limit() returns lanes.cancel_error on a limited linda | ||
249 | * lanes.genlock() and lanes.genatomic() support cancelled lindas by returning lanes.cancel_error whenever appropriate | ||
250 | * fixed a possible Lua stack overflow when calling linda:dump() | ||
251 | * fixed cases where linda:send() and linda:receive() would not return lanes.cancel_error when they should | ||
252 | |||
253 | CHANGE 97: BGe 10-Feb-14 | ||
254 | * version 3.8.4 | ||
255 | * new API linda:cancel("read"|"write"|"both"|"none") | ||
256 | * all linda operations return lanes.cancel_error on a cancelled linda | ||
257 | * raised an internal string length so that longer linda names are fully output before truncation applies when doing tostring( linda) | ||
258 | |||
259 | CHANGE 96: BGe 24-Jan-14 | ||
260 | * another Lua stack overflow fix when sending complex function through lindas or as lane body | ||
261 | |||
262 | CHANGE 95: BGe 22-Jan-14 | ||
263 | * version 3.8.3 | ||
264 | * fixed a possible Lua stack overflow when sending complex function through lindas or as lane body | ||
265 | * experimental: lanes.nameof() scans the registry if a regular search didn't yield anything interesting | ||
266 | * fixed lanes.nameof() misbehaving when encountering a LUA_TTHREAD object | ||
267 | |||
268 | CHANGE 94: BGe 22-Jan-14 | ||
269 | * version 3.8.2 | ||
270 | * new lane launcher option gc_cb to set a callback that is invoked when a lane is garbage collected | ||
271 | * Fix more invalid memory accesses when fetching the name of a joined lane with lanes:threads() (because its lua_State is closed) | ||
272 | |||
273 | CHANGE 93: BGe 20-Jan-14 | ||
274 | * slightly improve linda performance when the producer/consumer scenario leaves leave the key empty | ||
275 | |||
276 | CHANGE 92: BGe 20-Jan-14 | ||
277 | * version 3.8.1 | ||
278 | * new function lane:get_debug_threadname() | ||
279 | * Fix invalid memory accesses when fetching the name of a joined lane with lanes:threads() (because its lua_State is closed) | ||
280 | * use luaL_newmetatable() to create the metatable for lane objects | ||
281 | * prevent malicious code from crashing by calling lane methods without passing the lane as first argument (raise an error instead) | ||
282 | * set_debug_threadname() is no longer registered in the function lookup databases because it holds a C pointer as upvalue and it might crash if used maliciously | ||
283 | |||
284 | CHANGE 91: BGe 20-Jan-14 | ||
285 | * version 3.8.0 | ||
286 | * linda:set() accepts multiple values to set in the specified slot | ||
287 | * linda:get() accepts an optional count to peek several values at once | ||
288 | |||
289 | CHANGE 90: BGe 16-Jan-14 | ||
290 | * version 3.7.8 | ||
291 | * lane:cancel() now accepts a boolean second argument when soft cancelling (negative timeout) to wake the thread if necessary | ||
292 | * if a blocked linda send() or receive() call is interrupted by a cancellation request, | ||
293 | it returns CANCEL_ERROR so that this case can be differentiated from a simple timeout | ||
294 | * fixed WIN32 THREAD_CREATE() wrong _beginthreadex() error detection | ||
295 | * fatal WIN32 threading errors retrieve and output the error description string with FormatMessage() | ||
296 | * fixed missing lanes.set_singlethreaded | ||
297 | * fixed perftest.lua | ||
298 | * added test/cancel.lua | ||
299 | |||
300 | CHANGE 89: BGe 09-Jan-14 | ||
301 | * version 3.7.7 | ||
302 | * fix crash when calling linda:count() on unknown keys | ||
303 | * purge key storage with linda:set( key, nil) on an unlimited key to reduce memory usage with lots of keys | ||
304 | * linda:limit() wakes write-blocked threads if necessary when the new limit enables writes to occur again | ||
305 | * linda:set() wakes write-blocked threads if necessary if the operation created some room to write into | ||
306 | |||
307 | CHANGE 88: BGe 06-Jan-14 | ||
308 | * version 3.7.6 | ||
309 | * if config.on_state_create() is a C function, call it by direct C closure reconstruction in newly created states | ||
310 | |||
311 | CHANGE 87: BGe 20-Dec-13 | ||
312 | * version 3.7.5 | ||
313 | * fixed a crash that can occur at shutdown when an object stored inside a keeper state performs a linda operation on a linda making use of another keeper | ||
314 | * new setting demote_full_userdata to select between light userdata demotion or raising an error when attempting to transfer a non-deep full userdata | ||
315 | |||
316 | CHANGE 86: BGe 3-Dec-13 | ||
317 | * version 3.7.4 | ||
318 | * internal refactoring of pthread priority management code | ||
319 | * new API lanes.set_thread_priority() | ||
320 | |||
321 | CHANGE 85: BGe 28-Nov-13 | ||
322 | * version 3.7.3 | ||
323 | * set pthread thread cancel type to PTHREAD_CANCEL_ASYNCHRONOUS | ||
324 | * lane_h:cancel() accepts a 3rd timeout argument used when waiting for actual thread termination (hitting the timeout raises an error) | ||
325 | * added PROPAGATE_ALLOCF macro to select state creation mode (lua_newstate or luaL_newstate) | ||
326 | |||
327 | CHANGE 84: BGe 18-Nov-13 | ||
328 | * Fix a deadlock when GCing during a linda operation. | ||
329 | * Fix a compilation warning about an unused variable | ||
330 | * Get rid of uintptr_t to remove dependency on stdint.h | ||
331 | * Fix internal error at lane creation when the generator doesn't open any base library | ||
332 | |||
333 | CHANGE 83: BGe 16-Nov-13 | ||
334 | * version 3.7.2 | ||
335 | * Fixed function returned by lanes.genlock() not handling numeric keys properly when release lock | ||
336 | * Enable lanes.genlock() to attempt lock with an optional "try" mode | ||
337 | * make EnableCrashingOnCrashes a one-time operation | ||
338 | |||
339 | CHANGE 82: BGe 13-Nov-13 | ||
340 | * Fix a case where an error could be raised inside a keeper state | ||
341 | |||
342 | CHANGE 81: BGe 07-Nov-13 | ||
343 | * Make set_finalizer(), set_debug_threadname(), cancel_test() and set_error_reporting() transferable from lane to lane | ||
344 | * Improved some DEBUGSPEW output | ||
345 | |||
346 | CHANGE 80: BGe 06-Nov-13 | ||
347 | * Fix a few compilation warnings about uninitialized variables | ||
348 | * Fixed a bad extern variable declaration resulting in multiple instances (crashes the Pelles-C build) | ||
349 | |||
350 | CHANGE 79: BGe 04-Nov-13 | ||
351 | * Fix lanes.nameof() crashing when encountering a light userdata | ||
352 | |||
353 | CHANGE 78: BGe 25-Oct-13 | ||
354 | * Fix windows build not exporting public 'deep' API | ||
355 | * Don't call on_state_create in keeper states, as it is no longer necessary | ||
356 | * Remove inclusion of stdint.h | ||
357 | * Fix windows build for WINVER > 0x400 | ||
358 | |||
359 | CHANGE 77: BGe 22-Oct-13 | ||
360 | * version 3.7.1 | ||
361 | * errors inside finalizers generate a full stack just like any other error | ||
362 | |||
363 | CHANGE 76: BGe 10-Oct-13 | ||
364 | * version 3.7.0 | ||
365 | * fix lanes.threads() not being available in a lane where lanes.configure() settings didn't contain track_lanes although the initial configure() call did. | ||
366 | |||
367 | CHANGE 75: BGe 7-Oct-13 | ||
368 | * require "lanes".configure() sequence is only necessary at the first require "lanes". | ||
369 | |||
370 | CHANGE 74: BGe 7-Oct-13 | ||
371 | * fix a crash at application shutdown where in some situations we could deinitialize the protected allocator mutex while a lane was still using it. | ||
372 | |||
373 | CHANGE 73: BGe 4-Oct-13 | ||
374 | * fix timers broken by change 69 | ||
375 | |||
376 | CHANGE 72: BGe 3-Oct-13 | ||
377 | * bugfix: no longer create a global named "lanes.core" inside lanes having "*" as library list | ||
378 | |||
379 | CHANGE 71: BGe 30-Sept-13 | ||
380 | * version 3.6.6 | ||
381 | * properly handle cases when a Lua C module is a C function | ||
382 | |||
383 | CHANGE 70: BGe 27-Step-13 | ||
384 | * no longer call core.configure with dummy params when requiring lanes more than once (fixes potential multithreading issues with LuaJIT allocator) | ||
385 | * activated EnableCrashingOnCrashes() is active on Win32 debug builds | ||
386 | * fixed some comments in code | ||
387 | |||
388 | CHANGE 69: BGe 26-Sept-13 | ||
389 | * version 3.6.5 | ||
390 | * Reduce memory footprint, simplify module order setup in conjuction with Lanes, and send over native functions a bit faster as well | ||
391 | * Lanes no longer has to internally require modules inside the keeper states because they no longer need a lookup database | ||
392 | the lookup name is stored as-is and actually converted in the destination state | ||
393 | * optimisation: bypass cache when sending native functions over | ||
394 | * removed all the KEEPER_MODEL_LUA code, as it can no longer work anyway | ||
395 | |||
396 | CHANGE 68: BGe 24-Sept-13 | ||
397 | * version 3.6.4 | ||
398 | * Fix possible application hang at shutdown if a keeper state referenced a linda. | ||
399 | |||
400 | CHANGE 67: BGe 2-Aug-13 | ||
401 | * version 3.6.3 | ||
402 | * lane:cancel(<negative-timeout>) only causes cancel_test() to return true but won't interrupt execution of the lane during linda operations | ||
403 | |||
404 | CHANGE 66: BGe 31-Jul-13 | ||
405 | * more explicit errors when trying to transfer unknown source functions (with new configure option verbose_errors) | ||
406 | |||
407 | CHANGE 65: BGe 23-Jul-13 | ||
408 | * default options wrap allocator around a mutex when run by LuaJIT | ||
409 | |||
410 | CHANGE 64: BGe 20-Jul-13 | ||
411 | * WIN32 builds against pre-Vista versions no longer use PulseEvent to fix occasional hangs when a wake event is missed | ||
412 | |||
413 | CHANGE 63: BGe 20-May-13 | ||
414 | * version 3.6.2 | ||
415 | * WIN32 builds use condition variables instead of PulseEvent() when available. | ||
416 | * first steps toward fixing make-vc.cmd | ||
417 | |||
418 | CHANGE 62: BGe 05-Apr-13 | ||
419 | * version 3.6.1 | ||
420 | * function lookup database population keeps the 'smaller' name in case of multiple hits, to remove the no-LUA_COMPAT_ALL restriction on Lua5.2 builds | ||
421 | |||
422 | CHANGE 61: BGe 14-Mar-13 | ||
423 | * version 3.6.0 | ||
424 | * protect_allocator is an API change -> version bump | ||
425 | * bugfix: allocator protection should be done once per primary Lua state, not once only the first time ever Lanes is required | ||
426 | |||
427 | CHANGE 60: BGe 13-Mar-13 | ||
428 | * version 3.5.2 | ||
429 | * stricter validation of with_timers config option: validator was accepting any non-boolean value | ||
430 | * new configuration option protect_allocator for VMs with thread unsafe allocators (such as LuaJIT) | ||
431 | * removed some obsolete bits of dead code | ||
432 | |||
433 | CHANGE 59: BGe 12-Feb-13 | ||
434 | * version 3.5.1 | ||
435 | * new lanes.h header and API call luaopen_lanes_embedded() for embedders | ||
436 | * "lanes.core" is an acceptable library in the generator libs argument | ||
437 | * library "*" wildcard also opens lanes.core | ||
438 | * tweaked code for Xbox 360 build | ||
439 | |||
440 | CHANGE 58: BGe 30-Jan-13 | ||
441 | * version 3.5.0 | ||
442 | * new: API lanes.require(), use it instead of regular require() for modules that export C functions you need to send over. | ||
443 | * new: lanes no longer require 'lanes.core' by default in every created state. Use {required={"lanes.core"}} if you need to transfer lanes functions. | ||
444 | * internal: because of the above, reworked the timer implementation to remove upvalue-dependency on lanes.core | ||
445 | * new: API lanes.timer_lane, to be able to operate on timer lane if need be | ||
446 | * improved: if a module is a full userdata, scan its metatable for function database population | ||
447 | * improved: on_state_create can be a Lua function | ||
448 | * changed: on_state_create is called after the base libraries are loaded | ||
449 | * package[loaders|searchers] is no longer transfered as function naming depends on slot order | ||
450 | * internal: changed separator from '.' to '/' in lookup databases to be able to distinguish search levels and dot coming from module names | ||
451 | * added some mode debug spew | ||
452 | * updated tests to reflect the above changes | ||
453 | |||
454 | CHANGE 57: BGe 28-Jan-13 | ||
455 | * More detailed DEBUG_SPEW logs | ||
456 | * A bit of code cosmetics | ||
457 | |||
458 | CHANGE 56: BGe 25-Jan-13 | ||
459 | * version 3.4.4 | ||
460 | * bugfix: take into account the fact that "coroutine" is no longer part of base library in Lua 5.2 | ||
461 | * bugfix: if "bit32" was listed in the libraries, it wouldn't open (library list parsing failing on digits) | ||
462 | * bugfix: Use luaL_requiref() to open standard libraries in Lua 5.2 as we should | ||
463 | * bugfix: any Lua state created by Lanes reuses the allocator function of the originating state | ||
464 | * bugfix: don't call on_state_create() while GC is suspended during lua state initialization | ||
465 | |||
466 | CHANGE 55: BGe 24-Jan-13 | ||
467 | * version 3.4.3 | ||
468 | * raise an error if lane generator libs specification contains a lib more than once | ||
469 | * bit32 is a valid lib name in the libs specification (silently ignored by the Lua 5.1 build) | ||
470 | * improved lanes.nameof to search inside table- and userdata- metatables for an object's name | ||
471 | * bugfix: fixed an unwarranted error when trying to discover a function name upon a failed transfer | ||
472 | * contents of package.[path,cpath,preload,loaders|searchers] are pulled *only once* inside keeper states at initialisation | ||
473 | * Lua function upvalues equal to the global environment aren't copied by value, but bound to the destination's global environment | ||
474 | especially useful for Lua 5.2 _ENV | ||
475 | * bugfix: fixed loading of base libraries that didn't create the global tables when built for Lua 5.2 | ||
476 | |||
477 | CHANGE 54: BGe 10-Jan-13 | ||
478 | * version 3.4.2 | ||
479 | * Don't pull "package" settings in the timer lane | ||
480 | * removed a limitation preventing Lua functions with indirect recursive upvalue references from being transferable | ||
481 | |||
482 | CHANGE 53: BGe 11-Dec-2012 | ||
483 | * version 3.4.1 | ||
484 | * new function lanes.timers(), returns a list of all active timers. | ||
485 | |||
486 | CHANGE 52: BGe 03-Dec-2012 | ||
487 | * linda:send() and linda:receive() no longer triggers string->number autocoercion when checking for the optional timeout argument: | ||
488 | a string is always a linda slot, even if coercible. | ||
489 | |||
490 | CHANGE 51: BGe 27-Nov-2012 | ||
491 | * linux flavors with older glibc use prctl instead of pthread_setname_np | ||
492 | * selfdestruct chain handling is now the same on all platforms | ||
493 | |||
494 | CHANGE 50: BGe 22-Nov-2012 | ||
495 | * bugfix: linda:set() no longer clears the storage limit | ||
496 | |||
497 | CHANGE 49: BGe 21-Nov-2012 | ||
498 | * fix application shutdown crash by not registering anything with atexit() | ||
499 | * rockspec for version v3.4.0 | ||
500 | |||
501 | CHANGE 48: BGe 25-Sep-2012 | ||
502 | * version 3.4.0 | ||
503 | * new method linda:dump() that outputs the full contents of a linda as a table, also linked to __towatch for Decoda support | ||
504 | * linda:receive() API change! | ||
505 | * instead of [val, key], linda:receive( timeout, key) returns [key, val] | ||
506 | * instead of [val, [...]], linda:receive( timeout, linda.batched key) returns [key, val[, ...]] | ||
507 | this is to unify the return values of regular and batched mode, and to be able to tell when batched mode is interrupted by a lane cancellation | ||
508 | * fixed Lua 5.2 build to take into account the "loaders"->"searchers" name change in 'package' module. | ||
509 | * a bit of html cleanup and added some infos in the documentation regarding the Lanes internals | ||
510 | |||
511 | CHANGE 47: BGe 13-Sep-2012 | ||
512 | * implemented set_debug_threadname() for pthread builds where possible | ||
513 | * refactored linda __tostring and __concat | ||
514 | |||
515 | CHANGE 46: BGe 10-Sep-2012 | ||
516 | * version 3.3.0 | ||
517 | * lane.status can return "killed" if lane was forcefully killed with lanes:cancel() | ||
518 | * lane:join(): return nil, "killed" if called on a killed lane. | ||
519 | * lane[<n>]: produces [1] = nil, [2] = "killed" if the lane was killed | ||
520 | * lane:join(): fixed an assertion in debug builds when joining a lane forcefully cancelled with lane:cancel( <x>, true). | ||
521 | * indexing a lane with a string other than "join", "cancel" or "status" raises an error. | ||
522 | * fixed configure() to correctly apply defaults when they are missing from the provided settings | ||
523 | * added a shutdown_timeout to control the duration Lanes will wait for graceful termination of running lanes at application shutdown. Default is 0.25. | ||
524 | |||
525 | CHANGE 45: BGe 21-Aug-2012 | ||
526 | * keeper internals implemented in C instead of Lua for better performances | ||
527 | * fixed arguments checks in linda:limit() and linda:set() | ||
528 | |||
529 | CHANGE 44: BGe 13-Aug-2012 | ||
530 | * lanes code updated to build against Lua 5.1 and Lua 5.2 | ||
531 | * removed the search for MSVCR80.DLL when building for MinGW32 since it no longer seems to be necessary | ||
532 | |||
533 | CHANGE 43: BGe 09-Aug-2012 | ||
534 | * fix possible crash at application shutdown when a race condition causes linda objects to be collected after the keeper states are cleaned up. | ||
535 | |||
536 | CHANGE 42: BGe 06-Aug-2012 | ||
537 | * lanes.linda() accepts an optional name for debug purposes | ||
538 | |||
539 | CHANGE 41: BGe 07-Jul-2012 | ||
540 | * lua51-lanes renamed lanes/core | ||
541 | * keeper state microcode is no longer embedded inside lanes.core, but located and loaded with package.loaders[2] | ||
542 | * changed rockspec build type from "make" to "builtin" | ||
543 | |||
544 | CHANGE 40: BGe 26-Jun-2012 | ||
545 | * when a transfered function is not found in source, guess its name to help the user find out what's wrong | ||
546 | * new function lanes.nameof() | ||
547 | |||
548 | CHANGE 39: BGe 23-Jun-2012 | ||
549 | * lanes.timer() accepts a first_secs=nil to stop a timer | ||
550 | * timer lane catches errors and prints them | ||
551 | * fixed some typos in manual | ||
552 | |||
553 | CHANGE 38: BGe 11-Jun-2012 | ||
554 | * linda:receive() batched mode now accepts a max_count optional argument | ||
555 | |||
556 | CHANGE 37: BGe 4-Jun-2012 (fix and idea courtesy of sonoro1234) | ||
557 | * fixed thread_cancel() not working when called without argument | ||
558 | * new lane-global function set_error_reporting() to enable more data detailed data provided by lane_error() | ||
559 | |||
560 | CHANGE 36 BGe 26-Apr-2012 | ||
561 | * improved LuaJIT2 compatibility by handling "*" library set through luaL_openlibs() | ||
562 | |||
563 | CHANGE 35 BGe 17-Feb-2012 | ||
564 | * changed lanes.configure signature to receive a table instead of individual parameters | ||
565 | * added support for an on_state_create callback called to load custom functions in a state in addition to the base libraries | ||
566 | |||
567 | CHANGE 34 BGe 14-Nov-2011 | ||
568 | * removed packagepath and packagecpath options, replaced by a package table, whose fields path, cpath, loaders, preload are transfered | ||
569 | * code cleanup to facilitate transition between WIN32 and PTHREAD implementations | ||
570 | * tentative fix for desinit crashes when free running lanes are killed at process shutdown | ||
571 | |||
572 | CHANGE 33 BGe 5-Nov-2011: Lanes version 3.0-beta | ||
573 | * process exit change: close everything at GC when main state closes, not when atexit() handlers are processed | ||
574 | * Lua 5.2-style module: | ||
575 | * module() is no longer used to implement lanes.lua | ||
576 | * a global "lanes" variable is no longer created when the module is required | ||
577 | * the Lanes module table is returned instead | ||
578 | * Lanes must be initialized before used: | ||
579 | * the first occurence of 'require "lanes"' produces a minimal interface that only contains a configure() function | ||
580 | * the remainder of the interface is made available once this function is called | ||
581 | * subsequent calls to configure() do nothing | ||
582 | * configure() controls the number of keeper states and the startup of timers | ||
583 | * LuaJIT 2 compatibility | ||
584 | * non-Lua functions are no longer copied by creating a C closure from a C pointer, but through 2-way lookup tables | ||
585 | * this means that if a lane function body pulls non-Lua functions, the lane generator description must contain the list of libraries and modules that exports them | ||
586 | * introduces a change in configuration .globals management: contents are copied *after* std libs are loaded | ||
587 | * new .required configuration entry to list modules that must be require()'ed before lane body is transferred | ||
588 | * lane:cancel() wakes up waiting lindas like what is done at lane shutdown | ||
589 | |||
590 | CHANGE 32 BGe 14-May-2011 | ||
591 | * raise an error when linda:send() has nothing to send | ||
592 | |||
593 | CHANGE 31 BGe 17-Apr-2011 | ||
594 | * linda uses a fast FIFO implementation to speed up data exchanges | ||
595 | * new linda:count() method | ||
596 | * new linda batched data read mode | ||
597 | * proper key type check in all linda methods | ||
598 | * fix setup-vc.cmd to support Visual Studio 2010 and Windows 7 64 bits | ||
599 | * bugfix: release keeper state mutex at desinit | ||
600 | |||
601 | CHANGE 30 BGe 30-Mar-2011 | ||
602 | * linda honors __tostring and __concat | ||
603 | * new accessor linda:keys(), to retrieve the list of keys with pending data inside a linda | ||
604 | * new lanes options packagepath and packagecpath, in case one needs to set them differently than the default | ||
605 | |||
606 | CHANGE 29 BGe 1-Mar-2011 | ||
607 | fixed potential crash at application shutdown when calling lua_close() on a killed thread's VM. | ||
608 | exposed cancel_test() in the lanes to enable manual testing for cancellation requests. | ||
609 | removed kludgy {globals={threadName}} support, replaced with a new function set_debug_threadname(). | ||
610 | |||
611 | CHANGE 28 BGe 18-Feb-2011 | ||
612 | - moved keeper-related code in a separate source file | ||
613 | - keeper.lua is now embedded in text form instead of bytecode to improve LuaJIT2-compatibility | ||
614 | |||
615 | CHANGE 27 BGe 17-Feb-2011 | ||
616 | - we know Lanes is loaded in the master state, so we don't force it | ||
617 | to be required in every lane too when a linda deep userdata is copied | ||
618 | - Refactor lane proxy implementation: it is now a full userdata instead | ||
619 | of a table, and its methods are implemented in C instead of Lua | ||
620 | * its metatable is no longer accessible | ||
621 | * writing to the proxy raises an error | ||
622 | * it is no longer possible to overwrite its join() and cancel() methods | ||
623 | - when a deep userdata idfunc requests a module to be required, manually | ||
624 | check that it is not loaded before requiring it instead of relying on | ||
625 | the require function's loop detection feature | ||
626 | - when a module must be required, raise an error if the 'require' function | ||
627 | is not found in the target state | ||
628 | |||
629 | CHANGE 26 BGe 14-Feb-2011: | ||
630 | Fixed application hang-up because keeper state was not released in case of errors thrown by | ||
631 | inter-state data copy for unsupported types | ||
632 | |||
633 | CHANGE 25 BGe 12-Feb-2011: | ||
634 | Changed idfunc signature and contract to clarify that fact it is not lua-callable | ||
635 | and to be able to require the module it was exported from in the target lanes | ||
636 | |||
637 | CHANGE 24 DPtr 25-Jan-2011: | ||
638 | Changed lanes.c to export functions as a module rather than writing them directly to the globals table. | ||
639 | |||
640 | CHANGE 23 DPtr 23-Jan-2011: | ||
641 | Fixed bug where reference to Linda object was dropped for a short time ( crashing if GC was run during that time ). | ||
642 | Changed the atexit code to trip the timer thread's write signal. | ||
643 | |||
644 | CHANGE 22 DPtr 19-Jan-2011: | ||
645 | Changed luaG_push_proxy to cache deep userdata proxies. | ||
646 | |||
647 | CHANGE 21 (bugfixes) BGe 3-Jan-2011: | ||
648 | Several fixes by Martin Krpan: | ||
649 | - linda_send was waiting on the wrong signal | ||
650 | - buildfix when using i586-mingw32msvc-gcc cross compiler | ||
651 | - lanes_h:cancel() returns a boolean as it should | ||
652 | - timers could get blocked sometimes because they were initialized with negative values | ||
653 | - prepare_timeout could generate an illegal setting | ||
654 | |||
655 | CHANGE 20 BGe 3-Dec-2010: | ||
656 | Enable to specify a string as lane code instead of a function so that we dont use lua_dump, which | ||
657 | isn't supported by LuaJIT. | ||
658 | |||
659 | CHANGE 19 BGe 2-Dec-2010: | ||
660 | No longer rely on global function 'tostring' to generate unique identifiers when caching data being transfered through la linda. Should fix a compatilibity issue with LuaJIT2. | ||
661 | |||
662 | CHANGE 18 BGe 6-Oct-2010: | ||
663 | Fixed 'memory leak' in some situations where a free running lane is collected before application shutdown | ||
664 | A bit of code cleanup | ||
665 | |||
666 | CHANGE 17 BGe 21-Sept-2010: | ||
667 | Fixed stupid compilation errors. | ||
668 | |||
669 | CHANGE 16 PLM 24-Aug-2010: | ||
670 | Releasing memory at gc / atexit. | ||
671 | Finalizers actually get error strings. | ||
672 | Fixed missing argument propagation in lane:cancel | ||
673 | Added variable threadName sent trough globals-table. Set in s_lane, and in debuggers on windows. | ||
674 | Added argument checking for linda-objects, where missing them caused crashes. | ||
675 | |||
676 | CHANGE 15 (minor) BGe 27-Jul-2010: | ||
677 | Version bump for a true upgrade release (2.0.4 package was only a renamed 2.0.3) | ||
678 | |||
679 | CHANGE 14 (bug fix) BGe 09-Jul-2010: | ||
680 | Fixed lane status to be correctly returned as "waiting" when it should. | ||
681 | |||
682 | CHANGE 13 (fix for multithreaded host apps) AKa 24-Jun-2009: | ||
683 | <borisusun-at-gmail> mentioned Lanes expects the host application to be singlethreaded, | ||
684 | and there are troubles if Lanes is used from multiple threads, opened by the host | ||
685 | (before requiring Lanes). This is true, and fix should now be in place. | ||
686 | |||
687 | CHANGE 12 (bug fix on Windows, 2.0.3) AKa 25-Jan-2009: | ||
688 | Did CHANGE 9 the way it should be done. | ||
689 | |||
690 | CHANGE 11 (new feature, 2.0.3) AKa 23-Jan-2009: | ||
691 | Finalizers ('set_finalizer()') for being able to do cleanup of a lane's | ||
692 | resources, whether it returned succesfully or via an error. | ||
693 | |||
694 | CHANGE 10 (new feature, 2.0.3) AKa 23-Jan-2009: | ||
695 | Call stack showing where an error occurred is not merged with the error | ||
696 | message, but delivered as a separate stack table ({ "filename:line" [, ...] }). | ||
697 | Getting call stacks of errorred lanes is now possible. | ||
698 | |||
699 | CHANGE 9 (bug fix on Windows) AKa 10-Dec-2008 (> 2.0.2): | ||
700 | Applied patch from Kriss Daniels to avoid issues on 'now_time()' in Win32 | ||
701 | (http://luaforge.net/forum/forum.php?thread_id=22704&forum_id=1781). | ||
702 | |||
703 | CHANGE 8 (bug fix) AKa 26-Oct-2008: | ||
704 | Avoids occasional segfault at process exit (on multicore CPUs). Does this | ||
705 | by keeping track of "free running" threads (s.a. the time thread) and | ||
706 | cancelling them at process exit. | ||
707 | |||
708 | Tested (2.0.2) on Linux 64,x86, OS X, WinXP. | ||
709 | |||
710 | CHANGE 7 (bug fix) AKa 15-Oct-2008: | ||
711 | Recursive functions that use themselves as direct upvalue can now be | ||
712 | passed to other lanes, and used as a lane function. | ||
713 | |||
714 | CHANGE 6 (bug fix) AKa 15-Oct-2008: | ||
715 | Added local caches of the following to src/lanes.lua (was otherwise getting | ||
716 | errors at least in 'tests/irayo_recursive.lua'). | ||
717 | |||
718 | local assert= assert | ||
719 | local string_gmatch= assert( string.gmatch ) | ||
720 | local select= assert( select ) | ||
721 | local type= assert( type ) | ||
722 | local pairs= assert( pairs ) | ||
723 | local tostring= assert( tostring ) | ||
724 | local error= assert( error ) | ||
725 | local setmetatable= assert( setmetatable ) | ||
726 | local rawget= assert( rawget ) | ||
727 | |||
728 | Thanks to Irayo for detecting and reporting this. | ||
729 | |||
730 | CHANGE 5 (new feature): | ||
731 | Modifying Makefile so it's better suited to LuaRocks. | ||
732 | |||
733 | CHANGE 4 (new feature): | ||
734 | Metatable copying, allowing Lua objects to be copied across lanes. | ||
735 | |||
736 | CHANGE 3 (bug fix) AKa 5-Aug-2008: | ||
737 | The '__gc' method was not tied to thread userdata, at all. Caused memory | ||
738 | lifespan problems at least on OS X when threads were cancelled (EINVAL). | ||
739 | |||
740 | CHANGE 2 (bug fix) AKa 5-Aug-2008: | ||
741 | Better calculation of timeouts, always making them absolute (even in Win32) | ||
742 | to allow for events that wake the lane up but don't read/write the Linda | ||
743 | key that it was observing. | ||
744 | |||
745 | CHANGE 1 (bug fix) AKa 4-Aug-2008: | ||
746 | Signalling woke up only one waiting thread, not all. This caused i.e. | ||
747 | receive to not wake up if there was another thread waiting on the same | ||
748 | Linda object. | ||
749 | |||
750 | PThread fix: using 'pthread_cond_broadcast()' instead of 'pthread_cond_signal()' | ||
751 | Win32 fix: using manual events and 'PulseEvent()' | ||
752 | 5 | ||
753 | (end) | 6 | (end) |
@@ -42,7 +42,7 @@ _PREFIX=LUA_CPATH="./src/?.$(_SO)" LUA_PATH="./src/?.lua;./tests/?.lua" | |||
42 | #--- | 42 | #--- |
43 | all: $(_TARGET_SO) | 43 | all: $(_TARGET_SO) |
44 | 44 | ||
45 | $(_TARGET_SO): src/*.lua src/*.c src/*.h | 45 | $(_TARGET_SO): src/*.lua src/*.cpp src/*.h |
46 | cd src && $(MAKE) LUA=$(LUA) | 46 | cd src && $(MAKE) LUA=$(LUA) |
47 | 47 | ||
48 | clean: | 48 | clean: |
diff --git a/deep_test/deep_test.args.json b/deep_test/deep_test.args.json new file mode 100644 index 0000000..a540fc2 --- /dev/null +++ b/deep_test/deep_test.args.json | |||
@@ -0,0 +1,16 @@ | |||
1 | { | ||
2 | "FileVersion": 2, | ||
3 | "Id": "4c40bd18-3bab-46d7-8f14-602a6fbe5910", | ||
4 | "Items": [ | ||
5 | { | ||
6 | "Id": "d9d87866-8c63-44a8-b88a-1d42097985d4", | ||
7 | "Command": "DeepTest", | ||
8 | "Items": [ | ||
9 | { | ||
10 | "Id": "10e30bb2-dc23-4882-b918-b5939c14e588", | ||
11 | "Command": "-i deeptest.lua" | ||
12 | } | ||
13 | ] | ||
14 | } | ||
15 | ] | ||
16 | } \ No newline at end of file | ||
diff --git a/deep_test/deep_test.c b/deep_test/deep_test.c deleted file mode 100644 index cb89741..0000000 --- a/deep_test/deep_test.c +++ /dev/null | |||
@@ -1,273 +0,0 @@ | |||
1 | #include <malloc.h> | ||
2 | #include <memory.h> | ||
3 | #include <assert.h> | ||
4 | |||
5 | #include "lua.h" | ||
6 | #include "lualib.h" | ||
7 | #include "lauxlib.h" | ||
8 | |||
9 | #include "lanes/src/deep.h" | ||
10 | #include "lanes/src/compat.h" | ||
11 | |||
12 | #if (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC) | ||
13 | #define LANES_API __declspec(dllexport) | ||
14 | #else | ||
15 | #define LANES_API | ||
16 | #endif // (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC) | ||
17 | |||
18 | // ################################################################################################ | ||
19 | |||
20 | // a lanes-deep userdata. needs DeepPrelude and luaG_newdeepuserdata from Lanes code. | ||
21 | struct s_MyDeepUserdata | ||
22 | { | ||
23 | DeepPrelude prelude; // Deep userdata MUST start with this header | ||
24 | lua_Integer val; | ||
25 | }; | ||
26 | static void* deep_test_id( lua_State* L, enum eDeepOp op_); | ||
27 | |||
28 | // ################################################################################################ | ||
29 | |||
30 | static int deep_set( lua_State* L) | ||
31 | { | ||
32 | struct s_MyDeepUserdata* self = luaG_todeep( L, deep_test_id, 1); | ||
33 | lua_Integer i = lua_tointeger( L, 2); | ||
34 | self->val = i; | ||
35 | return 0; | ||
36 | } | ||
37 | |||
38 | // ################################################################################################ | ||
39 | |||
40 | // won't actually do anything as deep userdata don't have uservalue slots | ||
41 | static int deep_setuv( lua_State* L) | ||
42 | { | ||
43 | struct s_MyDeepUserdata* self = luaG_todeep( L, deep_test_id, 1); | ||
44 | int uv = (int) luaL_optinteger( L, 2, 1); | ||
45 | lua_settop( L, 3); | ||
46 | lua_pushboolean( L, lua_setiuservalue( L, 1, uv) != 0); | ||
47 | return 1; | ||
48 | } | ||
49 | |||
50 | // ################################################################################################ | ||
51 | |||
52 | // won't actually do anything as deep userdata don't have uservalue slots | ||
53 | static int deep_getuv( lua_State* L) | ||
54 | { | ||
55 | struct s_MyDeepUserdata* self = luaG_todeep( L, deep_test_id, 1); | ||
56 | int uv = (int) luaL_optinteger( L, 2, 1); | ||
57 | lua_getiuservalue( L, 1, uv); | ||
58 | return 1; | ||
59 | } | ||
60 | |||
61 | // ################################################################################################ | ||
62 | |||
63 | static int deep_tostring( lua_State* L) | ||
64 | { | ||
65 | struct s_MyDeepUserdata* self = luaG_todeep( L, deep_test_id, 1); | ||
66 | lua_pushfstring( L, "%p:deep(%d)", lua_topointer( L, 1), self->val); | ||
67 | return 1; | ||
68 | } | ||
69 | |||
70 | // ################################################################################################ | ||
71 | |||
72 | static int deep_gc( lua_State* L) | ||
73 | { | ||
74 | struct s_MyDeepUserdata* self = luaG_todeep( L, deep_test_id, 1); | ||
75 | return 0; | ||
76 | } | ||
77 | |||
78 | // ################################################################################################ | ||
79 | |||
80 | static luaL_Reg const deep_mt[] = | ||
81 | { | ||
82 | { "__tostring", deep_tostring}, | ||
83 | { "__gc", deep_gc}, | ||
84 | { "set", deep_set}, | ||
85 | { "setuv", deep_setuv}, | ||
86 | { "getuv", deep_getuv}, | ||
87 | { NULL, NULL } | ||
88 | }; | ||
89 | |||
90 | // ################################################################################################ | ||
91 | |||
92 | static void* deep_test_id( lua_State* L, enum eDeepOp op_) | ||
93 | { | ||
94 | switch( op_) | ||
95 | { | ||
96 | case eDO_new: | ||
97 | { | ||
98 | struct s_MyDeepUserdata* deep_test = (struct s_MyDeepUserdata*) malloc( sizeof(struct s_MyDeepUserdata)); | ||
99 | deep_test->prelude.magic.value = DEEP_VERSION.value; | ||
100 | deep_test->val = 0; | ||
101 | return deep_test; | ||
102 | } | ||
103 | |||
104 | case eDO_delete: | ||
105 | { | ||
106 | struct s_MyDeepUserdata* deep_test = (struct s_MyDeepUserdata*) lua_touserdata( L, 1); | ||
107 | free( deep_test); | ||
108 | return NULL; | ||
109 | } | ||
110 | |||
111 | case eDO_metatable: | ||
112 | { | ||
113 | luaL_getmetatable( L, "deep"); // mt | ||
114 | return NULL; | ||
115 | } | ||
116 | |||
117 | case eDO_module: | ||
118 | return "deep_test"; | ||
119 | |||
120 | default: | ||
121 | { | ||
122 | return NULL; | ||
123 | } | ||
124 | } | ||
125 | } | ||
126 | |||
127 | // ################################################################################################ | ||
128 | |||
129 | int luaD_new_deep( lua_State* L) | ||
130 | { | ||
131 | int nuv = (int) luaL_optinteger( L, 1, 0); | ||
132 | // no additional parameter to luaG_newdeepuserdata! | ||
133 | lua_settop( L, 0); | ||
134 | return luaG_newdeepuserdata( L, deep_test_id, nuv); | ||
135 | } | ||
136 | |||
137 | // ################################################################################################ | ||
138 | // ################################################################################################ | ||
139 | |||
140 | struct s_MyClonableUserdata | ||
141 | { | ||
142 | lua_Integer val; | ||
143 | }; | ||
144 | |||
145 | // ################################################################################################ | ||
146 | |||
147 | static int clonable_set( lua_State* L) | ||
148 | { | ||
149 | struct s_MyClonableUserdata* self = (struct s_MyClonableUserdata*) lua_touserdata( L, 1); | ||
150 | lua_Integer i = lua_tointeger( L, 2); | ||
151 | self->val = i; | ||
152 | return 0; | ||
153 | } | ||
154 | |||
155 | // ################################################################################################ | ||
156 | |||
157 | static int clonable_setuv( lua_State* L) | ||
158 | { | ||
159 | struct s_MyClonableUserdata* self = (struct s_MyClonableUserdata*) lua_touserdata( L, 1); | ||
160 | int uv = (int) luaL_optinteger( L, 2, 1); | ||
161 | lua_settop( L, 3); | ||
162 | lua_pushboolean( L, lua_setiuservalue( L, 1, uv) != 0); | ||
163 | return 1; | ||
164 | } | ||
165 | |||
166 | // ################################################################################################ | ||
167 | |||
168 | static int clonable_getuv( lua_State* L) | ||
169 | { | ||
170 | struct s_MyClonableUserdata* self = (struct s_MyClonableUserdata*) lua_touserdata( L, 1); | ||
171 | int uv = (int) luaL_optinteger( L, 2, 1); | ||
172 | lua_getiuservalue( L, 1, uv); | ||
173 | return 1; | ||
174 | } | ||
175 | |||
176 | // ################################################################################################ | ||
177 | |||
178 | static int clonable_tostring(lua_State* L) | ||
179 | { | ||
180 | struct s_MyClonableUserdata* self = (struct s_MyClonableUserdata*) lua_touserdata( L, 1); | ||
181 | lua_pushfstring( L, "%p:clonable(%d)", lua_topointer( L, 1), self->val); | ||
182 | return 1; | ||
183 | } | ||
184 | |||
185 | // ################################################################################################ | ||
186 | |||
187 | static int clonable_gc( lua_State* L) | ||
188 | { | ||
189 | struct s_MyClonableUserdata* self = (struct s_MyClonableUserdata*) lua_touserdata( L, 1); | ||
190 | return 0; | ||
191 | } | ||
192 | |||
193 | // ################################################################################################ | ||
194 | |||
195 | // this is all we need to make a userdata lanes-clonable. no dependency on Lanes code. | ||
196 | static int clonable_lanesclone( lua_State* L) | ||
197 | { | ||
198 | switch( lua_gettop( L)) | ||
199 | { | ||
200 | case 3: | ||
201 | { | ||
202 | struct s_MyClonableUserdata* self = lua_touserdata( L, 1); | ||
203 | struct s_MyClonableUserdata* from = lua_touserdata( L, 2); | ||
204 | size_t len = lua_tointeger( L, 3); | ||
205 | assert( len == sizeof(struct s_MyClonableUserdata)); | ||
206 | *self = *from; | ||
207 | } | ||
208 | return 0; | ||
209 | |||
210 | default: | ||
211 | (void) luaL_error( L, "Lanes called clonable_lanesclone with unexpected parameters"); | ||
212 | } | ||
213 | return 0; | ||
214 | } | ||
215 | |||
216 | // ################################################################################################ | ||
217 | |||
218 | static luaL_Reg const clonable_mt[] = | ||
219 | { | ||
220 | { "__tostring", clonable_tostring}, | ||
221 | { "__gc", clonable_gc}, | ||
222 | { "__lanesclone", clonable_lanesclone}, | ||
223 | { "set", clonable_set}, | ||
224 | { "setuv", clonable_setuv}, | ||
225 | { "getuv", clonable_getuv}, | ||
226 | { NULL, NULL } | ||
227 | }; | ||
228 | |||
229 | // ################################################################################################ | ||
230 | |||
231 | int luaD_new_clonable( lua_State* L) | ||
232 | { | ||
233 | int nuv = (int) luaL_optinteger( L, 1, 1); | ||
234 | lua_newuserdatauv( L, sizeof( struct s_MyClonableUserdata), nuv); | ||
235 | luaL_setmetatable( L, "clonable"); | ||
236 | return 1; | ||
237 | } | ||
238 | |||
239 | // ################################################################################################ | ||
240 | // ################################################################################################ | ||
241 | |||
242 | static luaL_Reg const deep_module[] = | ||
243 | { | ||
244 | { "new_deep", luaD_new_deep}, | ||
245 | { "new_clonable", luaD_new_clonable}, | ||
246 | { NULL, NULL} | ||
247 | }; | ||
248 | |||
249 | // ################################################################################################ | ||
250 | |||
251 | extern int __declspec(dllexport) luaopen_deep_test(lua_State* L) | ||
252 | { | ||
253 | luaL_newlib( L, deep_module); // M | ||
254 | |||
255 | // preregister the metatables for the types we can instantiate so that Lanes can know about them | ||
256 | if( luaL_newmetatable( L, "clonable")) // M mt | ||
257 | { | ||
258 | luaL_setfuncs( L, clonable_mt, 0); | ||
259 | lua_pushvalue(L, -1); // M mt mt | ||
260 | lua_setfield(L, -2, "__index"); // M mt | ||
261 | } | ||
262 | lua_setfield(L, -2, "__clonableMT"); // M | ||
263 | |||
264 | if( luaL_newmetatable( L, "deep")) // mt | ||
265 | { | ||
266 | luaL_setfuncs( L, deep_mt, 0); | ||
267 | lua_pushvalue(L, -1); // mt mt | ||
268 | lua_setfield(L, -2, "__index"); // mt | ||
269 | } | ||
270 | lua_setfield(L, -2, "__deepMT"); // M | ||
271 | |||
272 | return 1; | ||
273 | } | ||
diff --git a/deep_test/deep_test.cpp b/deep_test/deep_test.cpp new file mode 100644 index 0000000..3467939 --- /dev/null +++ b/deep_test/deep_test.cpp | |||
@@ -0,0 +1,259 @@ | |||
1 | #include "lanes/src/deep.h" | ||
2 | #include "lanes/src/compat.h" | ||
3 | |||
4 | #include <malloc.h> | ||
5 | #include <memory.h> | ||
6 | #include <assert.h> | ||
7 | |||
8 | // ################################################################################################ | ||
9 | |||
10 | // a lanes-deep userdata. needs DeepPrelude and luaG_newdeepuserdata from Lanes code. | ||
11 | struct MyDeepUserdata : public DeepPrelude // Deep userdata MUST start with a DeepPrelude | ||
12 | { | ||
13 | lua_Integer val{ 0 }; | ||
14 | }; | ||
15 | |||
16 | // ################################################################################################ | ||
17 | |||
18 | [[nodiscard]] static void* deep_test_id(lua_State* L, DeepOp op_) | ||
19 | { | ||
20 | switch( op_) | ||
21 | { | ||
22 | case DeepOp::New: | ||
23 | { | ||
24 | MyDeepUserdata* deep_test = new MyDeepUserdata; | ||
25 | return deep_test; | ||
26 | } | ||
27 | |||
28 | case DeepOp::Delete: | ||
29 | { | ||
30 | MyDeepUserdata* deep_test = static_cast<MyDeepUserdata*>(lua_touserdata( L, 1)); | ||
31 | delete deep_test; | ||
32 | return nullptr; | ||
33 | } | ||
34 | |||
35 | case DeepOp::Metatable: | ||
36 | { | ||
37 | luaL_getmetatable( L, "deep"); // mt | ||
38 | return nullptr; | ||
39 | } | ||
40 | |||
41 | case DeepOp::Module: | ||
42 | return (void*)"deep_test"; | ||
43 | |||
44 | default: | ||
45 | { | ||
46 | return nullptr; | ||
47 | } | ||
48 | } | ||
49 | } | ||
50 | |||
51 | // ################################################################################################ | ||
52 | |||
53 | [[nodiscard]] static int deep_set(lua_State* L) | ||
54 | { | ||
55 | MyDeepUserdata* self = static_cast<MyDeepUserdata*>(luaG_todeep(L, deep_test_id, 1)); | ||
56 | lua_Integer i = lua_tointeger( L, 2); | ||
57 | self->val = i; | ||
58 | return 0; | ||
59 | } | ||
60 | |||
61 | // ################################################################################################ | ||
62 | |||
63 | // won't actually do anything as deep userdata don't have uservalue slots | ||
64 | [[nodiscard]] static int deep_setuv(lua_State* L) | ||
65 | { | ||
66 | MyDeepUserdata* self = static_cast<MyDeepUserdata*>(luaG_todeep(L, deep_test_id, 1)); | ||
67 | int uv = (int) luaL_optinteger(L, 2, 1); | ||
68 | lua_settop( L, 3); | ||
69 | lua_pushboolean( L, lua_setiuservalue( L, 1, uv) != 0); | ||
70 | return 1; | ||
71 | } | ||
72 | |||
73 | // ################################################################################################ | ||
74 | |||
75 | // won't actually do anything as deep userdata don't have uservalue slots | ||
76 | [[nodiscard]] static int deep_getuv(lua_State* L) | ||
77 | { | ||
78 | MyDeepUserdata* self = static_cast<MyDeepUserdata*>(luaG_todeep(L, deep_test_id, 1)); | ||
79 | int uv = (int) luaL_optinteger(L, 2, 1); | ||
80 | lua_getiuservalue( L, 1, uv); | ||
81 | return 1; | ||
82 | } | ||
83 | |||
84 | // ################################################################################################ | ||
85 | |||
86 | [[nodiscard]] static int deep_tostring(lua_State* L) | ||
87 | { | ||
88 | MyDeepUserdata* self = static_cast<MyDeepUserdata*>(luaG_todeep(L, deep_test_id, 1)); | ||
89 | lua_pushfstring(L, "%p:deep(%d)", lua_topointer(L, 1), self->val); | ||
90 | return 1; | ||
91 | } | ||
92 | |||
93 | // ################################################################################################ | ||
94 | |||
95 | [[nodiscard]] static int deep_gc(lua_State* L) | ||
96 | { | ||
97 | MyDeepUserdata* self = static_cast<MyDeepUserdata*>(luaG_todeep(L, deep_test_id, 1)); | ||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | // ################################################################################################ | ||
102 | |||
103 | static luaL_Reg const deep_mt[] = | ||
104 | { | ||
105 | { "__tostring", deep_tostring}, | ||
106 | { "__gc", deep_gc}, | ||
107 | { "set", deep_set}, | ||
108 | { "setuv", deep_setuv}, | ||
109 | { "getuv", deep_getuv}, | ||
110 | { nullptr, nullptr } | ||
111 | }; | ||
112 | |||
113 | // ################################################################################################ | ||
114 | |||
115 | int luaD_new_deep( lua_State* L) | ||
116 | { | ||
117 | int const nuv{ static_cast<int>(luaL_optinteger(L, 1, 0)) }; | ||
118 | // no additional parameter to luaG_newdeepuserdata! | ||
119 | lua_settop(L, 0); | ||
120 | return luaG_newdeepuserdata(Dest{ L }, deep_test_id, nuv); | ||
121 | } | ||
122 | |||
123 | // ################################################################################################ | ||
124 | // ################################################################################################ | ||
125 | |||
126 | struct MyClonableUserdata | ||
127 | { | ||
128 | lua_Integer val; | ||
129 | }; | ||
130 | |||
131 | // ################################################################################################ | ||
132 | |||
133 | [[nodiscard]] static int clonable_set(lua_State* L) | ||
134 | { | ||
135 | MyClonableUserdata* self = static_cast<MyClonableUserdata*>(lua_touserdata(L, 1)); | ||
136 | lua_Integer i = lua_tointeger(L, 2); | ||
137 | self->val = i; | ||
138 | return 0; | ||
139 | } | ||
140 | |||
141 | // ################################################################################################ | ||
142 | |||
143 | [[nodiscard]] static int clonable_setuv(lua_State* L) | ||
144 | { | ||
145 | MyClonableUserdata* self = static_cast<MyClonableUserdata*>(lua_touserdata(L, 1)); | ||
146 | int uv = (int) luaL_optinteger(L, 2, 1); | ||
147 | lua_settop( L, 3); | ||
148 | lua_pushboolean( L, lua_setiuservalue( L, 1, uv) != 0); | ||
149 | return 1; | ||
150 | } | ||
151 | |||
152 | // ################################################################################################ | ||
153 | |||
154 | [[nodiscard]] static int clonable_getuv(lua_State* L) | ||
155 | { | ||
156 | MyClonableUserdata* self = static_cast<MyClonableUserdata*>(lua_touserdata(L, 1)); | ||
157 | int uv = (int) luaL_optinteger(L, 2, 1); | ||
158 | lua_getiuservalue( L, 1, uv); | ||
159 | return 1; | ||
160 | } | ||
161 | |||
162 | // ################################################################################################ | ||
163 | |||
164 | [[nodiscard]] static int clonable_tostring(lua_State* L) | ||
165 | { | ||
166 | MyClonableUserdata* self = static_cast<MyClonableUserdata*>(lua_touserdata(L, 1)); | ||
167 | lua_pushfstring(L, "%p:clonable(%d)", lua_topointer(L, 1), self->val); | ||
168 | return 1; | ||
169 | } | ||
170 | |||
171 | // ################################################################################################ | ||
172 | |||
173 | [[nodiscard]] static int clonable_gc(lua_State* L) | ||
174 | { | ||
175 | MyClonableUserdata* self = static_cast<MyClonableUserdata*>(lua_touserdata(L, 1)); | ||
176 | return 0; | ||
177 | } | ||
178 | |||
179 | // ################################################################################################ | ||
180 | |||
181 | // this is all we need to make a userdata lanes-clonable. no dependency on Lanes code. | ||
182 | [[nodiscard]] static int clonable_lanesclone(lua_State* L) | ||
183 | { | ||
184 | switch( lua_gettop( L)) | ||
185 | { | ||
186 | case 3: | ||
187 | { | ||
188 | MyClonableUserdata* self = static_cast<MyClonableUserdata*>(lua_touserdata(L, 1)); | ||
189 | MyClonableUserdata* from = static_cast<MyClonableUserdata*>(lua_touserdata(L, 2)); | ||
190 | size_t len = lua_tointeger( L, 3); | ||
191 | assert( len == sizeof(MyClonableUserdata)); | ||
192 | *self = *from; | ||
193 | } | ||
194 | return 0; | ||
195 | |||
196 | default: | ||
197 | (void) luaL_error( L, "Lanes called clonable_lanesclone with unexpected parameters"); | ||
198 | } | ||
199 | return 0; | ||
200 | } | ||
201 | |||
202 | // ################################################################################################ | ||
203 | |||
204 | static luaL_Reg const clonable_mt[] = | ||
205 | { | ||
206 | { "__tostring", clonable_tostring}, | ||
207 | { "__gc", clonable_gc}, | ||
208 | { "__lanesclone", clonable_lanesclone}, | ||
209 | { "set", clonable_set}, | ||
210 | { "setuv", clonable_setuv}, | ||
211 | { "getuv", clonable_getuv}, | ||
212 | { nullptr, nullptr } | ||
213 | }; | ||
214 | |||
215 | // ################################################################################################ | ||
216 | |||
217 | int luaD_new_clonable( lua_State* L) | ||
218 | { | ||
219 | int const nuv{ static_cast<int>(luaL_optinteger(L, 1, 1)) }; | ||
220 | lua_newuserdatauv( L, sizeof(MyClonableUserdata), nuv); | ||
221 | luaL_setmetatable( L, "clonable"); | ||
222 | return 1; | ||
223 | } | ||
224 | |||
225 | // ################################################################################################ | ||
226 | // ################################################################################################ | ||
227 | |||
228 | static luaL_Reg const deep_module[] = | ||
229 | { | ||
230 | { "new_deep", luaD_new_deep}, | ||
231 | { "new_clonable", luaD_new_clonable}, | ||
232 | { nullptr, nullptr } | ||
233 | }; | ||
234 | |||
235 | // ################################################################################################ | ||
236 | |||
237 | LANES_API int luaopen_deep_test(lua_State* L) | ||
238 | { | ||
239 | luaL_newlib( L, deep_module); // M | ||
240 | |||
241 | // preregister the metatables for the types we can instantiate so that Lanes can know about them | ||
242 | if( luaL_newmetatable( L, "clonable")) // M mt | ||
243 | { | ||
244 | luaL_setfuncs( L, clonable_mt, 0); | ||
245 | lua_pushvalue(L, -1); // M mt mt | ||
246 | lua_setfield(L, -2, "__index"); // M mt | ||
247 | } | ||
248 | lua_setfield(L, -2, "__clonableMT"); // M | ||
249 | |||
250 | if( luaL_newmetatable( L, "deep")) // mt | ||
251 | { | ||
252 | luaL_setfuncs( L, deep_mt, 0); | ||
253 | lua_pushvalue(L, -1); // mt mt | ||
254 | lua_setfield(L, -2, "__index"); // mt | ||
255 | } | ||
256 | lua_setfield(L, -2, "__deepMT"); // M | ||
257 | |||
258 | return 1; | ||
259 | } | ||
diff --git a/deep_test/deep_test.vcxproj b/deep_test/deep_test.vcxproj index 6ff7685..5cd3c55 100644 --- a/deep_test/deep_test.vcxproj +++ b/deep_test/deep_test.vcxproj | |||
@@ -342,6 +342,7 @@ | |||
342 | <AdditionalIncludeDirectories>$(SolutionDir)..\Lua53\include;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> | 342 | <AdditionalIncludeDirectories>$(SolutionDir)..\Lua53\include;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> |
343 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> | 343 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> |
344 | <PreprocessorDefinitions>_WINDLL;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions> | 344 | <PreprocessorDefinitions>_WINDLL;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions> |
345 | <LanguageStandard>stdcpp20</LanguageStandard> | ||
345 | </ClCompile> | 346 | </ClCompile> |
346 | <Link> | 347 | <Link> |
347 | <EnableCOMDATFolding>true</EnableCOMDATFolding> | 348 | <EnableCOMDATFolding>true</EnableCOMDATFolding> |
@@ -350,8 +351,8 @@ | |||
350 | <AdditionalLibraryDirectories>$(SolutionDir)..\Lua53\bin\$(Platform)\Release</AdditionalLibraryDirectories> | 351 | <AdditionalLibraryDirectories>$(SolutionDir)..\Lua53\bin\$(Platform)\Release</AdditionalLibraryDirectories> |
351 | </Link> | 352 | </Link> |
352 | <PostBuildEvent> | 353 | <PostBuildEvent> |
353 | <Command>xcopy /R /F /Y /I "$(TargetPath)" $(SolutionDir)..\framework\</Command> | 354 | <Command>xcopy /R /F /Y /I "$(TargetPath)" $(SolutionDir)..\Lua53\bin\$(Platform)\Release\</Command> |
354 | <Message>Copy to framework</Message> | 355 | <Message>Copy to Lua 5.3</Message> |
355 | </PostBuildEvent> | 356 | </PostBuildEvent> |
356 | </ItemDefinitionGroup> | 357 | </ItemDefinitionGroup> |
357 | <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release 5.4|x64'"> | 358 | <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release 5.4|x64'"> |
@@ -365,6 +366,7 @@ | |||
365 | <AdditionalIncludeDirectories>$(SolutionDir)..\Lua54\include;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> | 366 | <AdditionalIncludeDirectories>$(SolutionDir)..\Lua54\include;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> |
366 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> | 367 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> |
367 | <PreprocessorDefinitions>_WINDLL;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions> | 368 | <PreprocessorDefinitions>_WINDLL;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions> |
369 | <LanguageStandard>stdcpp20</LanguageStandard> | ||
368 | </ClCompile> | 370 | </ClCompile> |
369 | <Link> | 371 | <Link> |
370 | <EnableCOMDATFolding>true</EnableCOMDATFolding> | 372 | <EnableCOMDATFolding>true</EnableCOMDATFolding> |
@@ -385,6 +387,7 @@ | |||
385 | <ConformanceMode>true</ConformanceMode> | 387 | <ConformanceMode>true</ConformanceMode> |
386 | <AdditionalIncludeDirectories>$(SolutionDir)..\Lua53\include;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> | 388 | <AdditionalIncludeDirectories>$(SolutionDir)..\Lua53\include;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> |
387 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> | 389 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> |
390 | <LanguageStandard>stdcpp20</LanguageStandard> | ||
388 | </ClCompile> | 391 | </ClCompile> |
389 | <PostBuildEvent> | 392 | <PostBuildEvent> |
390 | <Command>xcopy /R /F /Y /I "$(TargetPath)" $(SolutionDir)..\Lua53\bin\$(Platform)\Debug\</Command> | 393 | <Command>xcopy /R /F /Y /I "$(TargetPath)" $(SolutionDir)..\Lua53\bin\$(Platform)\Debug\</Command> |
@@ -403,6 +406,7 @@ | |||
403 | <ConformanceMode>true</ConformanceMode> | 406 | <ConformanceMode>true</ConformanceMode> |
404 | <AdditionalIncludeDirectories>$(SolutionDir)..\Lua51\include;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> | 407 | <AdditionalIncludeDirectories>$(SolutionDir)..\Lua51\include;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> |
405 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> | 408 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> |
409 | <LanguageStandard>stdcpp20</LanguageStandard> | ||
406 | </ClCompile> | 410 | </ClCompile> |
407 | <PostBuildEvent> | 411 | <PostBuildEvent> |
408 | <Command>xcopy /R /F /Y /I "$(TargetPath)" $(SolutionDir)..\Lua51\bin\$(Platform)\Debug\</Command> | 412 | <Command>xcopy /R /F /Y /I "$(TargetPath)" $(SolutionDir)..\Lua51\bin\$(Platform)\Debug\</Command> |
@@ -421,6 +425,7 @@ | |||
421 | <ConformanceMode>true</ConformanceMode> | 425 | <ConformanceMode>true</ConformanceMode> |
422 | <AdditionalIncludeDirectories>$(SolutionDir)..\Lua51\include;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> | 426 | <AdditionalIncludeDirectories>$(SolutionDir)..\Lua51\include;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> |
423 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> | 427 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> |
428 | <LanguageStandard>stdcpp20</LanguageStandard> | ||
424 | </ClCompile> | 429 | </ClCompile> |
425 | <PostBuildEvent> | 430 | <PostBuildEvent> |
426 | <Command>xcopy /R /F /Y /I "$(TargetPath)" $(SolutionDir)..\Lua52\bin\$(Platform)\Debug\</Command> | 431 | <Command>xcopy /R /F /Y /I "$(TargetPath)" $(SolutionDir)..\Lua52\bin\$(Platform)\Debug\</Command> |
@@ -440,6 +445,7 @@ | |||
440 | <AdditionalIncludeDirectories>$(SolutionDir)..\LuaJIT-2.1.0-beta3\src;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> | 445 | <AdditionalIncludeDirectories>$(SolutionDir)..\LuaJIT-2.1.0-beta3\src;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> |
441 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> | 446 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> |
442 | <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions> | 447 | <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions> |
448 | <LanguageStandard>stdcpp20</LanguageStandard> | ||
443 | </ClCompile> | 449 | </ClCompile> |
444 | <PostBuildEvent> | 450 | <PostBuildEvent> |
445 | <Command>xcopy /R /F /Y /I "$(TargetPath)" $(SolutionDir)..\LuaJIT-2.1.0-beta3\bin\$(Platform)\</Command> | 451 | <Command>xcopy /R /F /Y /I "$(TargetPath)" $(SolutionDir)..\LuaJIT-2.1.0-beta3\bin\$(Platform)\</Command> |
@@ -459,6 +465,7 @@ | |||
459 | <AdditionalIncludeDirectories>$(SolutionDir)..\LuaJIT-2.0.5\src;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> | 465 | <AdditionalIncludeDirectories>$(SolutionDir)..\LuaJIT-2.0.5\src;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> |
460 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> | 466 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> |
461 | <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions> | 467 | <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions> |
468 | <LanguageStandard>stdcpp20</LanguageStandard> | ||
462 | </ClCompile> | 469 | </ClCompile> |
463 | <PostBuildEvent> | 470 | <PostBuildEvent> |
464 | <Command>xcopy /R /F /Y /I "$(TargetPath)" $(SolutionDir)..\LuaJIT-2.0.5\bin\$(Platform)\</Command> | 471 | <Command>xcopy /R /F /Y /I "$(TargetPath)" $(SolutionDir)..\LuaJIT-2.0.5\bin\$(Platform)\</Command> |
@@ -477,6 +484,7 @@ | |||
477 | <ConformanceMode>true</ConformanceMode> | 484 | <ConformanceMode>true</ConformanceMode> |
478 | <AdditionalIncludeDirectories>$(SolutionDir)..\Lua54\include;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> | 485 | <AdditionalIncludeDirectories>$(SolutionDir)..\Lua54\include;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> |
479 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> | 486 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> |
487 | <LanguageStandard>stdcpp20</LanguageStandard> | ||
480 | </ClCompile> | 488 | </ClCompile> |
481 | <PostBuildEvent> | 489 | <PostBuildEvent> |
482 | <Command>xcopy /R /F /Y /I "$(TargetPath)" $(SolutionDir)..\framework\</Command> | 490 | <Command>xcopy /R /F /Y /I "$(TargetPath)" $(SolutionDir)..\framework\</Command> |
@@ -495,6 +503,7 @@ | |||
495 | <ConformanceMode>true</ConformanceMode> | 503 | <ConformanceMode>true</ConformanceMode> |
496 | <AdditionalIncludeDirectories>$(SolutionDir)..\MoonJIT\src;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> | 504 | <AdditionalIncludeDirectories>$(SolutionDir)..\MoonJIT\src;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> |
497 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> | 505 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> |
506 | <LanguageStandard>stdcpp20</LanguageStandard> | ||
498 | </ClCompile> | 507 | </ClCompile> |
499 | <PostBuildEvent> | 508 | <PostBuildEvent> |
500 | <Command>xcopy /R /F /Y /I "$(TargetPath)" $(SolutionDir)..\MoonJIT\bin\$(Platform)\</Command> | 509 | <Command>xcopy /R /F /Y /I "$(TargetPath)" $(SolutionDir)..\MoonJIT\bin\$(Platform)\</Command> |
@@ -514,10 +523,11 @@ | |||
514 | <AdditionalIncludeDirectories>$(SolutionDir)..\Lua53\include;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> | 523 | <AdditionalIncludeDirectories>$(SolutionDir)..\Lua53\include;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> |
515 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> | 524 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> |
516 | <PreprocessorDefinitions>_WINDLL;_DEBUG;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions> | 525 | <PreprocessorDefinitions>_WINDLL;_DEBUG;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions> |
526 | <LanguageStandard>stdcpp20</LanguageStandard> | ||
517 | </ClCompile> | 527 | </ClCompile> |
518 | <PostBuildEvent> | 528 | <PostBuildEvent> |
519 | <Command>xcopy /R /F /Y /I "$(TargetPath)" $(SolutionDir)..\Lua53\bin\$(Platform)\Debug\</Command> | 529 | <Command>xcopy /R /F /Y /I "$(TargetPath)" $(SolutionDir)..\Lua53\bin\$(Platform)\Debug\</Command> |
520 | <Message>Lua 5.3</Message> | 530 | <Message>Copy to Lua 5.3</Message> |
521 | </PostBuildEvent> | 531 | </PostBuildEvent> |
522 | <Link> | 532 | <Link> |
523 | <AdditionalDependencies>lua53.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies> | 533 | <AdditionalDependencies>lua53.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies> |
@@ -533,14 +543,15 @@ | |||
533 | <AdditionalIncludeDirectories>$(SolutionDir)..\Lua51\include;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> | 543 | <AdditionalIncludeDirectories>$(SolutionDir)..\Lua51\include;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> |
534 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> | 544 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> |
535 | <PreprocessorDefinitions>_WINDLL;_DEBUG;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions> | 545 | <PreprocessorDefinitions>_WINDLL;_DEBUG;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions> |
546 | <LanguageStandard>stdcpp20</LanguageStandard> | ||
536 | </ClCompile> | 547 | </ClCompile> |
537 | <PostBuildEvent> | 548 | <PostBuildEvent> |
538 | <Command>xcopy /R /F /Y /I "$(TargetPath)" $(SolutionDir)..\Lua51\bin\$(Platform)\Debug\</Command> | 549 | <Command>xcopy /R /F /Y /I "$(TargetPath)" $(SolutionDir)..\Lua51\bin\$(Platform)\Debug\</Command> |
539 | <Message>Copy to Lua 5.2</Message> | 550 | <Message>Copy to Lua 5.2</Message> |
540 | </PostBuildEvent> | 551 | </PostBuildEvent> |
541 | <Link> | 552 | <Link> |
542 | <AdditionalDependencies>lua52.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies> | 553 | <AdditionalDependencies>lua51.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies> |
543 | <AdditionalLibraryDirectories>$(SolutionDir)..\Lua52\bin\$(Platform)\Debug</AdditionalLibraryDirectories> | 554 | <AdditionalLibraryDirectories>$(SolutionDir)..\Lua51\bin\$(Platform)\Debug</AdditionalLibraryDirectories> |
544 | </Link> | 555 | </Link> |
545 | </ItemDefinitionGroup> | 556 | </ItemDefinitionGroup> |
546 | <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug 5.2|x64'"> | 557 | <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug 5.2|x64'"> |
@@ -549,17 +560,18 @@ | |||
549 | <Optimization>Disabled</Optimization> | 560 | <Optimization>Disabled</Optimization> |
550 | <SDLCheck>true</SDLCheck> | 561 | <SDLCheck>true</SDLCheck> |
551 | <ConformanceMode>true</ConformanceMode> | 562 | <ConformanceMode>true</ConformanceMode> |
552 | <AdditionalIncludeDirectories>$(SolutionDir)..\Lua51\include;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> | 563 | <AdditionalIncludeDirectories>$(SolutionDir)..\Lua52\include;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> |
553 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> | 564 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> |
554 | <PreprocessorDefinitions>_WINDLL;_DEBUG;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions> | 565 | <PreprocessorDefinitions>_WINDLL;_DEBUG;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions> |
566 | <LanguageStandard>stdcpp20</LanguageStandard> | ||
555 | </ClCompile> | 567 | </ClCompile> |
556 | <PostBuildEvent> | 568 | <PostBuildEvent> |
557 | <Command>xcopy /R /F /Y /I "$(TargetPath)" $(SolutionDir)..\Lua52\bin\$(Platform)\Debug\</Command> | 569 | <Command>xcopy /R /F /Y /I "$(TargetPath)" $(SolutionDir)..\Lua52\bin\$(Platform)\Debug\</Command> |
558 | <Message>Copy to Lua 5.2</Message> | 570 | <Message>Copy to Lua 5.2</Message> |
559 | </PostBuildEvent> | 571 | </PostBuildEvent> |
560 | <Link> | 572 | <Link> |
561 | <AdditionalDependencies>lua51.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies> | 573 | <AdditionalDependencies>lua52.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies> |
562 | <AdditionalLibraryDirectories>$(SolutionDir)..\Lua51\bin\$(Platform)\Debug</AdditionalLibraryDirectories> | 574 | <AdditionalLibraryDirectories>$(SolutionDir)..\Lua52\bin\$(Platform)\Debug</AdditionalLibraryDirectories> |
563 | </Link> | 575 | </Link> |
564 | </ItemDefinitionGroup> | 576 | </ItemDefinitionGroup> |
565 | <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug LuaJIT 2.1.0-beta3|x64'"> | 577 | <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug LuaJIT 2.1.0-beta3|x64'"> |
@@ -571,6 +583,7 @@ | |||
571 | <AdditionalIncludeDirectories>$(SolutionDir)..\LuaJIT-2.1.0-beta3\src;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> | 583 | <AdditionalIncludeDirectories>$(SolutionDir)..\LuaJIT-2.1.0-beta3\src;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> |
572 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> | 584 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> |
573 | <PreprocessorDefinitions>_WINDLL;_DEBUG;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions> | 585 | <PreprocessorDefinitions>_WINDLL;_DEBUG;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions> |
586 | <LanguageStandard>stdcpp20</LanguageStandard> | ||
574 | </ClCompile> | 587 | </ClCompile> |
575 | <PostBuildEvent> | 588 | <PostBuildEvent> |
576 | <Command>xcopy /R /F /Y /I "$(TargetPath)" $(SolutionDir)..\LuaJIT-2.1.0-beta3\bin\$(Platform)\</Command> | 589 | <Command>xcopy /R /F /Y /I "$(TargetPath)" $(SolutionDir)..\LuaJIT-2.1.0-beta3\bin\$(Platform)\</Command> |
@@ -590,6 +603,7 @@ | |||
590 | <AdditionalIncludeDirectories>$(SolutionDir)..\LuaJIT-2.0.5\src;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> | 603 | <AdditionalIncludeDirectories>$(SolutionDir)..\LuaJIT-2.0.5\src;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> |
591 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> | 604 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> |
592 | <PreprocessorDefinitions>_WINDLL;_DEBUG;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions> | 605 | <PreprocessorDefinitions>_WINDLL;_DEBUG;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions> |
606 | <LanguageStandard>stdcpp20</LanguageStandard> | ||
593 | </ClCompile> | 607 | </ClCompile> |
594 | <PostBuildEvent> | 608 | <PostBuildEvent> |
595 | <Command>xcopy /R /F /Y /I "$(TargetPath)" $(SolutionDir)..\LuaJIT-2.0.5\bin\$(Platform)\</Command> | 609 | <Command>xcopy /R /F /Y /I "$(TargetPath)" $(SolutionDir)..\LuaJIT-2.0.5\bin\$(Platform)\</Command> |
@@ -609,6 +623,7 @@ | |||
609 | <AdditionalIncludeDirectories>$(SolutionDir)..\Lua54\include;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> | 623 | <AdditionalIncludeDirectories>$(SolutionDir)..\Lua54\include;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> |
610 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> | 624 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> |
611 | <PreprocessorDefinitions>_WINDLL;_DEBUG;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions> | 625 | <PreprocessorDefinitions>_WINDLL;_DEBUG;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions> |
626 | <LanguageStandard>stdcpp20</LanguageStandard> | ||
612 | </ClCompile> | 627 | </ClCompile> |
613 | <PostBuildEvent> | 628 | <PostBuildEvent> |
614 | <Command>xcopy /R /F /Y /I "$(TargetPath)" $(SolutionDir)..\framework\</Command> | 629 | <Command>xcopy /R /F /Y /I "$(TargetPath)" $(SolutionDir)..\framework\</Command> |
@@ -628,6 +643,7 @@ | |||
628 | <AdditionalIncludeDirectories>$(SolutionDir)..\MoonJIT\src;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> | 643 | <AdditionalIncludeDirectories>$(SolutionDir)..\MoonJIT\src;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> |
629 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> | 644 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> |
630 | <PreprocessorDefinitions>_WINDLL;_DEBUG;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions> | 645 | <PreprocessorDefinitions>_WINDLL;_DEBUG;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions> |
646 | <LanguageStandard>stdcpp20</LanguageStandard> | ||
631 | </ClCompile> | 647 | </ClCompile> |
632 | <PostBuildEvent> | 648 | <PostBuildEvent> |
633 | <Command>xcopy /R /F /Y /I "$(TargetPath)" $(SolutionDir)..\MoonJIT\bin\$(Platform)\</Command> | 649 | <Command>xcopy /R /F /Y /I "$(TargetPath)" $(SolutionDir)..\MoonJIT\bin\$(Platform)\</Command> |
@@ -648,6 +664,7 @@ | |||
648 | <ConformanceMode>true</ConformanceMode> | 664 | <ConformanceMode>true</ConformanceMode> |
649 | <AdditionalIncludeDirectories>$(SolutionDir)..\Lua53\include;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> | 665 | <AdditionalIncludeDirectories>$(SolutionDir)..\Lua53\include;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> |
650 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> | 666 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> |
667 | <LanguageStandard>stdcpp20</LanguageStandard> | ||
651 | </ClCompile> | 668 | </ClCompile> |
652 | <Link> | 669 | <Link> |
653 | <EnableCOMDATFolding>true</EnableCOMDATFolding> | 670 | <EnableCOMDATFolding>true</EnableCOMDATFolding> |
@@ -666,6 +683,7 @@ | |||
666 | <ConformanceMode>true</ConformanceMode> | 683 | <ConformanceMode>true</ConformanceMode> |
667 | <AdditionalIncludeDirectories>$(SolutionDir)..\Lua54\include;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> | 684 | <AdditionalIncludeDirectories>$(SolutionDir)..\Lua54\include;$(SolutionDir)Lanes;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> |
668 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> | 685 | <ProgramDataBaseFileName>$(IntDir)$(TargetName).pdb</ProgramDataBaseFileName> |
686 | <LanguageStandard>stdcpp20</LanguageStandard> | ||
669 | </ClCompile> | 687 | </ClCompile> |
670 | <Link> | 688 | <Link> |
671 | <EnableCOMDATFolding>true</EnableCOMDATFolding> | 689 | <EnableCOMDATFolding>true</EnableCOMDATFolding> |
@@ -675,11 +693,11 @@ | |||
675 | </Link> | 693 | </Link> |
676 | </ItemDefinitionGroup> | 694 | </ItemDefinitionGroup> |
677 | <ItemGroup> | 695 | <ItemGroup> |
678 | <ClCompile Include="..\src\compat.c" /> | 696 | <ClCompile Include="..\src\compat.cpp" /> |
679 | <ClCompile Include="..\src\deep.c" /> | 697 | <ClCompile Include="..\src\deep.cpp" /> |
680 | <ClCompile Include="..\src\tools.c" /> | 698 | <ClCompile Include="..\src\tools.cpp" /> |
681 | <ClCompile Include="..\src\universe.c" /> | 699 | <ClCompile Include="..\src\universe.cpp" /> |
682 | <ClCompile Include="deep_test.c" /> | 700 | <ClCompile Include="deep_test.cpp" /> |
683 | </ItemGroup> | 701 | </ItemGroup> |
684 | <ItemGroup> | 702 | <ItemGroup> |
685 | <ClInclude Include="..\src\compat.h" /> | 703 | <ClInclude Include="..\src\compat.h" /> |
diff --git a/deep_test/deep_test.vcxproj.filters b/deep_test/deep_test.vcxproj.filters index be47da9..814301f 100644 --- a/deep_test/deep_test.vcxproj.filters +++ b/deep_test/deep_test.vcxproj.filters | |||
@@ -15,21 +15,21 @@ | |||
15 | </Filter> | 15 | </Filter> |
16 | </ItemGroup> | 16 | </ItemGroup> |
17 | <ItemGroup> | 17 | <ItemGroup> |
18 | <ClCompile Include="deep_test.c"> | 18 | <ClCompile Include="..\src\compat.cpp"> |
19 | <Filter>Source Files</Filter> | ||
20 | </ClCompile> | ||
21 | <ClCompile Include="..\src\deep.c"> | ||
22 | <Filter>Lanes</Filter> | 19 | <Filter>Lanes</Filter> |
23 | </ClCompile> | 20 | </ClCompile> |
24 | <ClCompile Include="..\src\universe.c"> | 21 | <ClCompile Include="..\src\deep.cpp"> |
25 | <Filter>Lanes</Filter> | 22 | <Filter>Lanes</Filter> |
26 | </ClCompile> | 23 | </ClCompile> |
27 | <ClCompile Include="..\src\compat.c"> | 24 | <ClCompile Include="..\src\tools.cpp"> |
28 | <Filter>Lanes</Filter> | 25 | <Filter>Lanes</Filter> |
29 | </ClCompile> | 26 | </ClCompile> |
30 | <ClCompile Include="..\src\tools.c"> | 27 | <ClCompile Include="..\src\universe.cpp"> |
31 | <Filter>Lanes</Filter> | 28 | <Filter>Lanes</Filter> |
32 | </ClCompile> | 29 | </ClCompile> |
30 | <ClCompile Include="deep_test.cpp"> | ||
31 | <Filter>Source Files</Filter> | ||
32 | </ClCompile> | ||
33 | </ItemGroup> | 33 | </ItemGroup> |
34 | <ItemGroup> | 34 | <ItemGroup> |
35 | <ClInclude Include="..\src\deep.h"> | 35 | <ClInclude Include="..\src\deep.h"> |
diff --git a/deep_test/deep_test.vcxproj.user b/deep_test/deep_test.vcxproj.user index 96d9b1d..24e3d31 100644 --- a/deep_test/deep_test.vcxproj.user +++ b/deep_test/deep_test.vcxproj.user | |||
@@ -33,8 +33,9 @@ | |||
33 | <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug 5.4|x64'"> | 33 | <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug 5.4|x64'"> |
34 | <LocalDebuggerCommand>$(SolutionDir)..\framework\lua54.exe</LocalDebuggerCommand> | 34 | <LocalDebuggerCommand>$(SolutionDir)..\framework\lua54.exe</LocalDebuggerCommand> |
35 | <DebuggerFlavor>WindowsLocalDebugger</DebuggerFlavor> | 35 | <DebuggerFlavor>WindowsLocalDebugger</DebuggerFlavor> |
36 | <LocalDebuggerCommandArguments>-i deeptest.lua</LocalDebuggerCommandArguments> | 36 | <LocalDebuggerCommandArguments>deeptest.lua</LocalDebuggerCommandArguments> |
37 | <LocalDebuggerWorkingDirectory>$(SolutionDir)Lanes\lanes\deep_test\</LocalDebuggerWorkingDirectory> | 37 | <LocalDebuggerWorkingDirectory>$(SolutionDir)Lanes\lanes\deep_test\</LocalDebuggerWorkingDirectory> |
38 | <RemoteDebuggerCommandArguments>deeptest.lua</RemoteDebuggerCommandArguments> | ||
38 | </PropertyGroup> | 39 | </PropertyGroup> |
39 | <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug MoonJIT|x64'"> | 40 | <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug MoonJIT|x64'"> |
40 | <LocalDebuggerCommand>$(SolutionDir)..\MoonJIT\bin\$(Platform)\moonjit.exe</LocalDebuggerCommand> | 41 | <LocalDebuggerCommand>$(SolutionDir)..\MoonJIT\bin\$(Platform)\moonjit.exe</LocalDebuggerCommand> |
diff --git a/docs/index.html b/docs/index.html index da94898..3e535a6 100644 --- a/docs/index.html +++ b/docs/index.html | |||
@@ -64,13 +64,13 @@ | |||
64 | <font size="-1"> | 64 | <font size="-1"> |
65 | <p> | 65 | <p> |
66 | <br/> | 66 | <br/> |
67 | <i>Copyright © 2007-23 Asko Kauppi, Benoit Germain. All rights reserved.</i> | 67 | <i>Copyright © 2007-24 Asko Kauppi, Benoit Germain. All rights reserved.</i> |
68 | <br/> | 68 | <br/> |
69 | Lua Lanes is published under the same <a href="http://en.wikipedia.org/wiki/MIT_License">MIT license</a> as Lua 5.1, 5.2, 5.3 and 5.4. | 69 | Lua Lanes is published under the same <a href="http://en.wikipedia.org/wiki/MIT_License">MIT license</a> as Lua 5.1, 5.2, 5.3 and 5.4. |
70 | </p> | 70 | </p> |
71 | 71 | ||
72 | <p> | 72 | <p> |
73 | This document was revised on 11-Apr-24, and applies to version <tt>3.17.0</tt>. | 73 | This document was revised on 9-Apr-24, and applies to version <tt>4.0.0</tt>. |
74 | </p> | 74 | </p> |
75 | </font> | 75 | </font> |
76 | </center> | 76 | </center> |
@@ -88,7 +88,7 @@ | |||
88 | Lanes is included into your software by the regular <tt>require "lanes"</tt> method. No C side programming is needed; all APIs are Lua side, and most existing extension modules should work seamlessly together with the multiple lanes. | 88 | Lanes is included into your software by the regular <tt>require "lanes"</tt> method. No C side programming is needed; all APIs are Lua side, and most existing extension modules should work seamlessly together with the multiple lanes. |
89 | </p> | 89 | </p> |
90 | <p> | 90 | <p> |
91 | Starting with version 3.1.6, Lanes should build and run identically with either Lua 5.1 or Lua 5.2. Version 3.10.0 supports Lua 5.3. | 91 | Lanes should build and run identically with either Lua 5.1 to Lua 5.4, as well as LuaJIT. |
92 | </p> | 92 | </p> |
93 | <p> | 93 | <p> |
94 | See <A HREF="comparison.html">comparison</A> of Lua Lanes with other Lua multithreading solutions. | 94 | See <A HREF="comparison.html">comparison</A> of Lua Lanes with other Lua multithreading solutions. |
@@ -152,7 +152,7 @@ | |||
152 | <h2 id="installing">Building and Installing</h2> | 152 | <h2 id="installing">Building and Installing</h2> |
153 | 153 | ||
154 | <p> | 154 | <p> |
155 | Lua Lanes is built simply by <tt>make</tt> on the supported platforms (<tt>make-vc</tt> for Visual C++). See <tt>README</tt> for system specific details and limitations. | 155 | Lua Lanes is implemented in C++20. It is built simply by <tt>make</tt> on the supported platforms (<tt>make-vc</tt> for Visual C++). See <tt>README</tt> for system specific details and limitations. |
156 | </p> | 156 | </p> |
157 | 157 | ||
158 | <p> | 158 | <p> |
@@ -178,14 +178,14 @@ | |||
178 | <table border=1 bgcolor="#E0E0FF" cellpadding="10" style="width:50%"> | 178 | <table border=1 bgcolor="#E0E0FF" cellpadding="10" style="width:50%"> |
179 | <tr> | 179 | <tr> |
180 | <td> | 180 | <td> |
181 | <pre> extern void LANES_API luaopen_lanes_embedded( lua_State* L, lua_CFunction _luaopen_lanes);</pre> | 181 | <pre> extern void LANES_API luaopen_lanes_embedded(lua_State* L, lua_CFunction _luaopen_lanes);</pre> |
182 | </td> | 182 | </td> |
183 | </tr> | 183 | </tr> |
184 | </table> | 184 | </table> |
185 | <p> | 185 | <p> |
186 | <tt>luaopen_lanes_embedded</tt> leaves the module table on the stack. <tt>lanes.configure()</tt> must still be called in order to use Lanes. | 186 | <tt>luaopen_lanes_embedded</tt> leaves the module table on the stack. <tt>lanes.configure()</tt> must still be called in order to use Lanes. |
187 | <br/> | 187 | <br/> |
188 | If <tt>_luaopen_lanes</tt> is <tt>NULL</tt>, a default loader will simply attempt the equivalent of <tt>luaL_dofile( L, "lanes.lua")</tt>. | 188 | If <tt>_luaopen_lanes</tt> is <tt>NULL</tt>, a default loader will simply attempt the equivalent of <tt>luaL_dofile(L, "lanes.lua")</tt>. |
189 | </p> | 189 | </p> |
190 | 190 | ||
191 | <p> | 191 | <p> |
@@ -198,30 +198,30 @@ | |||
198 | <td> | 198 | <td> |
199 | <pre> #include "lanes.h"</pre> | 199 | <pre> #include "lanes.h"</pre> |
200 | <br/> | 200 | <br/> |
201 | <pre> int load_lanes_lua( lua_State* L)</pre> | 201 | <pre> int load_lanes_lua(lua_State* L)</pre> |
202 | <pre> {</pre> | 202 | <pre> {</pre> |
203 | <pre> // retrieve lanes.lua from wherever it is stored and return the result of its execution</pre> | 203 | <pre> // retrieve lanes.lua from wherever it is stored and return the result of its execution</pre> |
204 | <pre> // trivial example 1:</pre> | 204 | <pre> // trivial example 1:</pre> |
205 | <pre> luaL_dofile( L, "lanes.lua");</pre> | 205 | <pre> luaL_dofile(L, "lanes.lua");</pre> |
206 | <br/> | 206 | <br/> |
207 | <pre> // trivial example 2:</pre> | 207 | <pre> // trivial example 2:</pre> |
208 | <pre> luaL_dostring( L, bin2c_lanes_lua);</pre> | 208 | <pre> luaL_dostring(L, bin2c_lanes_lua);</pre> |
209 | <pre> }</pre> | 209 | <pre> }</pre> |
210 | <br/> | 210 | <br/> |
211 | <pre> void embed_lanes( lua_State* L)</pre> | 211 | <pre> void embed_lanes(lua_State* L)</pre> |
212 | <pre> {</pre> | 212 | <pre> {</pre> |
213 | <pre> // we need base libraries for Lanes for work</pre> | 213 | <pre> // we need base libraries for Lanes for work</pre> |
214 | <pre> luaL_openlibs( L);</pre> | 214 | <pre> luaL_openlibs(L);</pre> |
215 | <pre> ...</pre> | 215 | <pre> ...</pre> |
216 | <pre> // will attempt luaL_dofile( L, "lanes.lua");</pre> | 216 | <pre> // will attempt luaL_dofile(L, "lanes.lua");</pre> |
217 | <pre> luaopen_lanes_embedded( L, NULL);</pre> | 217 | <pre> luaopen_lanes_embedded(L, nullptr);</pre> |
218 | <pre> lua_pop( L, 1);</pre> | 218 | <pre> lua_pop(L, 1);</pre> |
219 | <pre> // another example with a custom loader</pre> | 219 | <pre> // another example with a custom loader</pre> |
220 | <pre> luaopen_lanes_embedded( L, load_lanes_lua);</pre> | 220 | <pre> luaopen_lanes_embedded(L, load_lanes_lua);</pre> |
221 | <pre> lua_pop( L, 1);</pre> | 221 | <pre> lua_pop(L, 1);</pre> |
222 | <br/> | 222 | <br/> |
223 | <pre> // a little test to make sure things work as expected</pre> | 223 | <pre> // a little test to make sure things work as expected</pre> |
224 | <pre> luaL_dostring( L, "local lanes = require 'lanes'.configure{with_timers = false}; local l = lanes.linda()");</pre> | 224 | <pre> luaL_dostring(L, "local lanes = require 'lanes'.configure{with_timers = false}; local l = lanes.linda()");</pre> |
225 | <pre> }</pre> | 225 | <pre> }</pre> |
226 | </td> | 226 | </td> |
227 | </tr> | 227 | </tr> |
@@ -246,7 +246,7 @@ | |||
246 | </table> | 246 | </table> |
247 | 247 | ||
248 | <p> | 248 | <p> |
249 | Starting with version 3.0-beta, requiring the module follows Lua 5.2 rules: the module is not available under the global name "lanes", but has to be accessed through <tt>require</tt>'s return value. | 249 | Requiring the module follows Lua 5.2+ rules: the module is not available under the global name "lanes", but has to be accessed through <tt>require</tt>'s return value. |
250 | </p> | 250 | </p> |
251 | <p> | 251 | <p> |
252 | After lanes is required, it is necessary to call <tt>lanes.configure()</tt>, which is the only function exposed by the module at this point. Calling <tt>configure()</tt> will perform one-time initializations and make the rest of the API available. | 252 | After lanes is required, it is necessary to call <tt>lanes.configure()</tt>, which is the only function exposed by the module at this point. Calling <tt>configure()</tt> will perform one-time initializations and make the rest of the API available. |
@@ -259,14 +259,14 @@ | |||
259 | It remains to be seen whether this is actually useful or not: If a module is already threadsafe, protecting its initialization isn't useful. And if it is not, any parallel operation may crash without Lanes being able to do anything about it. | 259 | It remains to be seen whether this is actually useful or not: If a module is already threadsafe, protecting its initialization isn't useful. And if it is not, any parallel operation may crash without Lanes being able to do anything about it. |
260 | </p> | 260 | </p> |
261 | <p> | 261 | <p> |
262 | <b>IMPORTANT NOTE:</b> Starting with version 3.7.0, only the first occurence of <tt>require "lanes"</tt> must be followed by a call to <tt>.configure()</tt>. From this point, a simple <tt>require "lanes"</tt> will do wherever you need to require lanes again. | 262 | <b>IMPORTANT NOTE:</b> Only the first occurence of <tt>require "lanes"</tt> must be followed by a call to <tt>.configure()</tt>. From this point, a simple <tt>require "lanes"</tt> will do wherever you need to require lanes again. |
263 | </p> | 263 | </p> |
264 | 264 | ||
265 | <p> | 265 | <p> |
266 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"> | 266 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"> |
267 | <tr> | 267 | <tr> |
268 | <td> | 268 | <td> |
269 | <pre> lanes.configure( [opt_tbl])</pre> | 269 | <pre> lanes.configure([opt_tbl])</pre> |
270 | </td> | 270 | </td> |
271 | </tr> | 271 | </tr> |
272 | </table> | 272 | </table> |
@@ -296,9 +296,8 @@ | |||
296 | </td> | 296 | </td> |
297 | <td>integer</td> | 297 | <td>integer</td> |
298 | <td> | 298 | <td> |
299 | (Since v3.17.0)<br/> | 299 | If <0, GC runs automatically. This is the default.<br/> |
300 | If <0, GC runs automatically. This is the default.<br /> | 300 | If 0, GC runs after *every* keeper operation.<br/> |
301 | If 0, GC runs after *every* keeper operation.<br /> | ||
302 | If >0, Keepers run GC manually with <tt>lua_gc(LUA_GCCOLLECT)</tt> whenever memory usage reported by <tt>lua_gc(LUA_GCCOUNT)</tt> reaches this threshold. Check is made after every keeper operation (see <a href="#lindas">below</a>). If memory usage remains above threshold after the GC cycle, an error is raised. | 301 | If >0, Keepers run GC manually with <tt>lua_gc(LUA_GCCOLLECT)</tt> whenever memory usage reported by <tt>lua_gc(LUA_GCCOUNT)</tt> reaches this threshold. Check is made after every keeper operation (see <a href="#lindas">below</a>). If memory usage remains above threshold after the GC cycle, an error is raised. |
303 | </td> | 302 | </td> |
304 | </tr> | 303 | </tr> |
@@ -324,26 +323,12 @@ | |||
324 | <tt>nil</tt>/<tt>false</tt>/<tt>true</tt> | 323 | <tt>nil</tt>/<tt>false</tt>/<tt>true</tt> |
325 | </td> | 324 | </td> |
326 | <td> | 325 | <td> |
327 | (Since v3.6.3) If equal to <tt>true</tt>, Lanes will collect more information when transfering stuff across Lua states to help identify errors (with a cost). | 326 | If equal to <tt>true</tt>, Lanes will collect more information when transfering stuff across Lua states to help identify errors (with a cost). |
328 | Default is <tt>false</tt>. | 327 | Default is <tt>false</tt>. |
329 | </td> | 328 | </td> |
330 | </tr> | 329 | </tr> |
331 | 330 | ||
332 | <tr valign=top> | 331 | <tr valign=top> |
333 | <td id="protect_allocator"> | ||
334 | <code>.protect_allocator</code> | ||
335 | </td> | ||
336 | <td> | ||
337 | <tt>nil</tt>/<tt>false</tt>/<tt>true</tt> | ||
338 | </td> | ||
339 | <td> | ||
340 | REPLACED BY <tt>allocator="protected"</tt> AS OF VERSION v3.13.0. | ||
341 | (Since v3.5.2) If equal to <tt>true</tt>, Lanes wraps all calls to the state's allocator function inside a mutex. Since v3.6.3, when left unset, Lanes attempts to autodetect this value for LuaJIT (the guess might be wrong if <tt>"ffi"</tt> isn't loaded though). | ||
342 | Default is <tt>true</tt> when Lanes detects it is run by LuaJIT, else <tt>nil</tt>. | ||
343 | </td> | ||
344 | </tr> | ||
345 | |||
346 | <tr valign=top> | ||
347 | <td id="allocator"> | 332 | <td id="allocator"> |
348 | <code>.allocator</code> | 333 | <code>.allocator</code> |
349 | </td> | 334 | </td> |
@@ -351,18 +336,17 @@ | |||
351 | <tt>nil</tt>/<tt>"protected"</tt>/function | 336 | <tt>nil</tt>/<tt>"protected"</tt>/function |
352 | </td> | 337 | </td> |
353 | <td> | 338 | <td> |
354 | (Since v3.13.0)<br /> | ||
355 | If <tt>nil</tt>, Lua states are created with <tt>lua_newstate()</tt> and reuse the allocator from the master state.<br /> | 339 | If <tt>nil</tt>, Lua states are created with <tt>lua_newstate()</tt> and reuse the allocator from the master state.<br /> |
356 | If <tt>"protected"</tt>, The default allocator obtained from <tt>lua_getallocf()</tt> in the master state is wrapped inside a critical section and used in all newly created states.<br /> | 340 | If <tt>"protected"</tt>, The default allocator obtained from <tt>lua_getallocf()</tt> in the master state is wrapped inside a critical section and used in all newly created states.<br /> |
357 | If a <tt>function</tt>, this function is called prior to creating the state. It should return a full userdata containing the following structure: | 341 | If a <tt>function</tt>, this function is called prior to creating the state. It should return a full userdata containing the following structure: |
358 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"> | 342 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"> |
359 | <tr> | 343 | <tr> |
360 | <td> | 344 | <td> |
361 | <pre> struct { lua_Alloc allocF; void* allocUD;}</pre> | 345 | <pre> struct { lua_Alloc allocF; void* allocUD; }</pre> |
362 | </td> | 346 | </td> |
363 | </tr> | 347 | </tr> |
364 | </table> | 348 | </table> |
365 | The contents will be used to create the state with <tt>lua_newstate( allocF, allocUD)</tt>. | 349 | The contents will be used to create the state with <tt>lua_newstate(allocF, allocUD)</tt>. |
366 | This option is mostly useful for embedders that want to provide different allocators to each lane, for example to have each one work in a different memory pool thus preventing the need for the allocator itself to be threadsafe. | 350 | This option is mostly useful for embedders that want to provide different allocators to each lane, for example to have each one work in a different memory pool thus preventing the need for the allocator itself to be threadsafe. |
367 | </td> | 351 | </td> |
368 | </tr> | 352 | </tr> |
@@ -375,7 +359,6 @@ | |||
375 | <tt>"libc"</tt>/<tt>"allocator"</tt> | 359 | <tt>"libc"</tt>/<tt>"allocator"</tt> |
376 | </td> | 360 | </td> |
377 | <td> | 361 | <td> |
378 | (Since v3.16.1)<br /> | ||
379 | Controls which allocator is used for Lanes internal allocations (for keeper, linda and lane management). | 362 | Controls which allocator is used for Lanes internal allocations (for keeper, linda and lane management). |
380 | If <tt>"libc"</tt>, Lanes uses <tt>realloc</tt> and <tt>free</tt>.<br /> | 363 | If <tt>"libc"</tt>, Lanes uses <tt>realloc</tt> and <tt>free</tt>.<br /> |
381 | If <tt>"allocator"</tt>, Lanes uses whatever was obtained from the <tt>"allocator"</tt> setting.<br /> | 364 | If <tt>"allocator"</tt>, Lanes uses whatever was obtained from the <tt>"allocator"</tt> setting.<br /> |
@@ -391,7 +374,7 @@ | |||
391 | <tt>nil</tt>/<tt>false</tt>/<tt>true</tt> | 374 | <tt>nil</tt>/<tt>false</tt>/<tt>true</tt> |
392 | </td> | 375 | </td> |
393 | <td> | 376 | <td> |
394 | (Since v3.7.5) If equal to <tt>false</tt> or <tt>nil</tt>, Lanes raises an error when attempting to transfer a non-deep full userdata, else it will be demoted to a light userdata in the destination. | 377 | If equal to <tt>false</tt> or <tt>nil</tt>, Lanes raises an error when attempting to transfer a non-deep full userdata, else it will be demoted to a light userdata in the destination. |
395 | Default is <tt>false</tt> (set to <tt>true</tt> to get the legacy behaviour). | 378 | Default is <tt>false</tt> (set to <tt>true</tt> to get the legacy behaviour). |
396 | </td> | 379 | </td> |
397 | </tr> | 380 | </tr> |
@@ -428,7 +411,7 @@ | |||
428 | </ul> | 411 | </ul> |
429 | That way, all changes in the state can be properly taken into account when building the function lookup database. Default is <tt>nil</tt>. | 412 | That way, all changes in the state can be properly taken into account when building the function lookup database. Default is <tt>nil</tt>. |
430 | <br /> | 413 | <br /> |
431 | (Since version 3.7.6) If <tt>on_state_create()</tt> is a Lua function, it will be transfered normally before the call. | 414 | If <tt>on_state_create()</tt> is a Lua function, it will be transfered normally before the call. |
432 | <br /> | 415 | <br /> |
433 | If it is a C function, a C closure will be reconstructed in the created state from the C pointer. Lanes will raise an error if the function has upvalues. | 416 | If it is a C function, a C closure will be reconstructed in the created state from the C pointer. Lanes will raise an error if the function has upvalues. |
434 | </td> | 417 | </td> |
@@ -442,25 +425,36 @@ | |||
442 | number >= 0 | 425 | number >= 0 |
443 | </td> | 426 | </td> |
444 | <td> | 427 | <td> |
445 | (Since v3.3.0) Sets the duration in seconds Lanes will wait for graceful termination of running lanes at application shutdown. Irrelevant for builds using pthreads. Default is <tt>0.25</tt>. | 428 | Sets the duration in seconds Lanes will wait for graceful termination of running lanes at application shutdown. Default is <tt>0.25</tt>. |
429 | </td> | ||
430 | </tr> | ||
431 | <tr valign=top> | ||
432 | <td id="shutdown_mode"> | ||
433 | <code>.shutdown_mode</code> | ||
434 | </td> | ||
435 | <td> | ||
436 | <tt>"hard"</tt>/<tt>"soft"</tt>/<tt>"call"</tt>/<tt>"ret"</tt>/<tt>"line"</tt>/<tt>"count"</tt> | ||
437 | </td> | ||
438 | <td> | ||
439 | Select the cancellation mode used at Lanes shutdown to request free running lane termination. See <a href="#cancelling">lane cancellation</a>. Default is <tt>"hard"</tt>. | ||
446 | </td> | 440 | </td> |
447 | </tr> | 441 | </tr> |
448 | </table> | 442 | </table> |
449 | </p> | 443 | </p> |
450 | 444 | ||
451 | <p> | 445 | <p> |
452 | (Since v3.5.0) Once Lanes is configured, one should register with Lanes the modules exporting functions that will be transferred either during lane generation or through <a href="#lindas">lindas</a>. | 446 | Once Lanes is configured, one should register with Lanes the modules exporting functions that will be transferred either during lane generation or through <a href="#lindas">lindas</a>. |
453 | <br/> | 447 | <br/> |
454 | Use <tt>lanes.require()</tt> for this purpose. This will call the original <tt>require()</tt>, then add the result to the lookup databases. | 448 | Use <tt>lanes.require()</tt> for this purpose. This will call the original <tt>require()</tt>, then add the result to the lookup databases. |
455 | <br/> | 449 | <br/> |
456 | (Since version 3.11) It is also possible to register a given module with <tt>lanes.register()</tt>. This function will raise an error if the registered module is not a function or table. | 450 | It is also possible to register a given module with <tt>lanes.register()</tt>. This function will raise an error if the registered module is not a function or table. |
457 | </p> | 451 | </p> |
458 | 452 | ||
459 | <table border="1" bgcolor="#FFFFE0" cellpadding="10" style="width:50%"> | 453 | <table border="1" bgcolor="#FFFFE0" cellpadding="10" style="width:50%"> |
460 | <tr> | 454 | <tr> |
461 | <td> | 455 | <td> |
462 | <pre> local m = lanes.require "modname"</pre> | 456 | <pre> local m = lanes.require "modname"</pre> |
463 | <pre> lanes.register( "modname", module)</pre> | 457 | <pre> lanes.register("modname", module)</pre> |
464 | </td> | 458 | </td> |
465 | </tr> | 459 | </tr> |
466 | </table> | 460 | </table> |
@@ -477,11 +471,11 @@ | |||
477 | <td> | 471 | <td> |
478 | <pre> local lanes = require "lanes".configure()</pre> | 472 | <pre> local lanes = require "lanes".configure()</pre> |
479 | <br/> | 473 | <br/> |
480 | <pre> f = lanes.gen( function( n) return 2 * n end)</pre> | 474 | <pre> f = lanes.gen(function(n) return 2 * n end)</pre> |
481 | <pre> a = f( 1)</pre> | 475 | <pre> a = f(1)</pre> |
482 | <pre> b = f( 2)</pre> | 476 | <pre> b = f(2)</pre> |
483 | <br/> | 477 | <br/> |
484 | <pre> print( a[1], b[1] ) -- 2 4</pre> | 478 | <pre> print(a[1], b[1]) -- 2 4</pre> |
485 | </td> | 479 | </td> |
486 | </tr> | 480 | </tr> |
487 | </table> | 481 | </table> |
@@ -489,8 +483,8 @@ | |||
489 | <table border=1 bgcolor="#E0E0FF" cellpadding="10" style="width:50%"> | 483 | <table border=1 bgcolor="#E0E0FF" cellpadding="10" style="width:50%"> |
490 | <tr> | 484 | <tr> |
491 | <td> | 485 | <td> |
492 | <pre> func = lanes.gen( [libs_str | opt_tbl [, ...],] lane_func)</pre> | 486 | <pre> func = lanes.gen([libs_str | opt_tbl [, ...],] lane_func)</pre> |
493 | <pre> lane_h = func( ...)</pre> | 487 | <pre> lane_h = func(...)</pre> |
494 | </td> | 488 | </td> |
495 | </tr> | 489 | </tr> |
496 | </table> | 490 | </table> |
@@ -714,7 +708,7 @@ | |||
714 | </td> | 708 | </td> |
715 | <td>function</td> | 709 | <td>function</td> |
716 | <td> | 710 | <td> |
717 | (Since version 3.8.2) Callback that gets invoked when the lane is garbage collected. The function receives two arguments (the lane name and a string, either <tt>"closed"</tt> or <tt>"selfdestruct"</tt>). | 711 | Callback that gets invoked when the lane is garbage collected. The function receives two arguments (the lane name and a string, either <tt>"closed"</tt> or <tt>"selfdestruct"</tt>). |
718 | </td> | 712 | </td> |
719 | </tr> | 713 | </tr> |
720 | <tr valign=top> | 714 | <tr valign=top> |
@@ -737,8 +731,6 @@ | |||
737 | </td> | 731 | </td> |
738 | <td> table</td> | 732 | <td> table</td> |
739 | <td> | 733 | <td> |
740 | Introduced at version 3.0. | ||
741 | <br/> | ||
742 | Specifying it when <code>libs_str</code> doesn't cause the <code>package</code> library to be loaded will generate an error. | 734 | Specifying it when <code>libs_str</code> doesn't cause the <code>package</code> library to be loaded will generate an error. |
743 | <br/> | 735 | <br/> |
744 | If not specified, the created lane will receive the current values of <tt>package</tt>. Only <tt>path</tt>, <tt>cpath</tt>, <tt>preload</tt> and <tt>loaders</tt> (Lua 5.1)/<tt>searchers</tt> (Lua 5.2) are transfered. | 736 | If not specified, the created lane will receive the current values of <tt>package</tt>. Only <tt>path</tt>, <tt>cpath</tt>, <tt>preload</tt> and <tt>loaders</tt> (Lua 5.1)/<tt>searchers</tt> (Lua 5.2) are transfered. |
@@ -749,26 +741,22 @@ | |||
749 | <p> | 741 | <p> |
750 | Each lane also gets a global function <tt>set_debug_threadname()</tt> that it can use anytime to do as the name says. Supported debuggers are Microsoft Visual Studio (for the C side) and Decoda (for the Lua side). | 742 | Each lane also gets a global function <tt>set_debug_threadname()</tt> that it can use anytime to do as the name says. Supported debuggers are Microsoft Visual Studio (for the C side) and Decoda (for the Lua side). |
751 | <br/> | 743 | <br/> |
752 | Starting with version 3.8.1, the lane has a new method <tt>lane:get_debug_threadname()</tt> that gives access to that name from the caller side (returns <tt>"<unnamed>"</tt> if unset, <tt>"<closed>"</tt> if the internal Lua state is closed). | 744 | The lane also has a method <tt>lane:get_debug_threadname()</tt> that gives access to that name from the caller side (returns <tt>"<unnamed>"</tt> if unset, <tt>"<closed>"</tt> if the internal Lua state is closed). |
753 | </p> | 745 | </p> |
754 | 746 | ||
755 | <p> | 747 | <p> |
756 | If a lane body pulls a C function imported by a module required before Lanes itself (thus not through a hooked <tt>require</tt>), the lane generator creation will raise an error. | 748 | If a lane body pulls a C function imported by a module required before Lanes itself (thus not through a hooked <tt>require</tt>), the lane generator creation will raise an error. |
757 | The function name it shows is a path where it was found by scanning <tt>_G</tt>. As a utility, the name guessing functionality is exposed as such: | 749 | The function name it shows is a path where it was found by scanning <tt>_G</tt> and the registry. As a utility, the name guessing functionality is exposed as such: |
758 | 750 | ||
759 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"> | 751 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"> |
760 | <tr> | 752 | <tr> |
761 | <td> | 753 | <td> |
762 | <pre> "type", "name" = lanes.nameof( o)</pre> | 754 | <pre> "type", "name" = lanes.nameof(o)</pre> |
763 | </td> | 755 | </td> |
764 | </tr> | 756 | </tr> |
765 | </table> | 757 | </table> |
766 | </p> | 758 | </p> |
767 | 759 | ||
768 | <p> | ||
769 | Starting with version 3.8.3, <tt>lanes.nameof()</tt> searches the registry as well. | ||
770 | </p> | ||
771 | |||
772 | <h3>Free running lanes</h3> | 760 | <h3>Free running lanes</h3> |
773 | 761 | ||
774 | <p> | 762 | <p> |
@@ -777,7 +765,7 @@ | |||
777 | <table border="1" bgcolor="#FFFFE0" cellpadding="10" style="width:50%"> | 765 | <table border="1" bgcolor="#FFFFE0" cellpadding="10" style="width:50%"> |
778 | <tr> | 766 | <tr> |
779 | <td> | 767 | <td> |
780 | <pre> lanes.gen( function( params) ... end ) ( ...)</pre> | 768 | <pre> lanes.gen(function(params) ... end ) (...)</pre> |
781 | </td> | 769 | </td> |
782 | </tr> | 770 | </tr> |
783 | </table> | 771 | </table> |
@@ -793,7 +781,7 @@ | |||
793 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"> | 781 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"> |
794 | <tr> | 782 | <tr> |
795 | <td> | 783 | <td> |
796 | <pre> lanes.set_thread_priority( prio)</pre> | 784 | <pre> lanes.set_thread_priority(prio)</pre> |
797 | </td> | 785 | </td> |
798 | </tr> | 786 | </tr> |
799 | </table> | 787 | </table> |
@@ -811,7 +799,7 @@ | |||
811 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"> | 799 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"> |
812 | <tr> | 800 | <tr> |
813 | <td> | 801 | <td> |
814 | <pre> lanes.set_thread_affinity( affinity)</pre> | 802 | <pre> lanes.set_thread_affinity(affinity)</pre> |
815 | </td> | 803 | </td> |
816 | </tr> | 804 | </tr> |
817 | </table> | 805 | </table> |
@@ -898,16 +886,6 @@ | |||
898 | received <a href="#cancelling">cancellation</a> and finished itself. | 886 | received <a href="#cancelling">cancellation</a> and finished itself. |
899 | </td> | 887 | </td> |
900 | </tr> | 888 | </tr> |
901 | <tr> | ||
902 | <td/> | ||
903 | <td> | ||
904 | <tt>"killed"</tt> | ||
905 | </td> | ||
906 | <td/> | ||
907 | <td> | ||
908 | was forcefully killed by <tt>lane_h:cancel()</tt> (since v3.3.0) | ||
909 | </td> | ||
910 | </tr> | ||
911 | </table> | 889 | </table> |
912 | </p> | 890 | </p> |
913 | 891 | ||
@@ -929,7 +907,7 @@ | |||
929 | </table> | 907 | </table> |
930 | 908 | ||
931 | <p> | 909 | <p> |
932 | Only available if lane tracking feature is compiled (see <tt>HAVE_LANE_TRACKING</tt> in <tt>lanes.c</tt>) and <a href="#track_lanes"><tt>track_lanes</tt></a> is set. | 910 | Only available if lane tracking feature is compiled (see <tt>HAVE_LANE_TRACKING</tt> in <tt>lanes.cpp</tt>) and <a href="#track_lanes"><tt>track_lanes</tt></a> is set. |
933 | <br/> | 911 | <br/> |
934 | Returns an array table where each entry is a table containing a lane's name and status. Returns <tt>nil</tt> if no lane is running. | 912 | Returns an array table where each entry is a table containing a lane's name and status. Returns <tt>nil</tt> if no lane is running. |
935 | </p> | 913 | </p> |
@@ -940,7 +918,7 @@ | |||
940 | <h2 id="results">Results and errors</h2> | 918 | <h2 id="results">Results and errors</h2> |
941 | 919 | ||
942 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"><tr><td><pre> | 920 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"><tr><td><pre> |
943 | set_error_reporting( "basic"|"extended") | 921 | set_error_reporting("basic"|"extended") |
944 | </pre></td></tr></table> | 922 | </pre></td></tr></table> |
945 | 923 | ||
946 | <p> | 924 | <p> |
@@ -962,11 +940,11 @@ | |||
962 | </p> | 940 | </p> |
963 | 941 | ||
964 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"><tr><td><pre> | 942 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"><tr><td><pre> |
965 | [...]|[nil,err,stack_tbl]= lane_h:join( [timeout_secs] ) | 943 | [...]|[nil,err,stack_tbl]= lane_h:join([timeout_secs]) |
966 | </pre></td></tr></table> | 944 | </pre></td></tr></table> |
967 | 945 | ||
968 | <p> | 946 | <p> |
969 | Waits until the lane finishes, or <tt>timeout</tt> seconds have passed. Returns <tt>nil, "timeout"</tt> on timeout (since v3.13), <tt>nil,err,stack_tbl</tt> if the lane hit an error, <tt>nil, "killed"</tt> if forcefully killed (starting with v3.3.0), or the return values of the lane. | 947 | Waits until the lane finishes, or <tt>timeout</tt> seconds have passed. Returns <tt>nil, "timeout"</tt> on timeout, <tt>nil,err,stack_tbl</tt> if the lane hit an error, <tt>nil, "killed"</tt> if forcefully killed, or the return values of the lane. |
970 | Unlike in reading the results in table fashion, errors are not propagated. | 948 | Unlike in reading the results in table fashion, errors are not propagated. |
971 | </p> | 949 | </p> |
972 | 950 | ||
@@ -987,14 +965,14 @@ | |||
987 | <table border=1 bgcolor="#FFFFE0" cellpadding="10" style="width:50%"><tr><td><pre> | 965 | <table border=1 bgcolor="#FFFFE0" cellpadding="10" style="width:50%"><tr><td><pre> |
988 | require "lanes".configure() | 966 | require "lanes".configure() |
989 | 967 | ||
990 | f = lanes.gen( function() error "!!!" end) | 968 | f = lanes.gen(function() error "!!!" end) |
991 | a = f( 1) | 969 | a = f(1) |
992 | 970 | ||
993 | --print( a[1]) -- propagates error | 971 | --print(a[1]) -- propagates error |
994 | 972 | ||
995 | v, err = a:join() -- no propagation | 973 | v, err = a:join() -- no propagation |
996 | if v == nil then | 974 | if v == nil then |
997 | error( "'a' faced error"..tostring(err)) -- manual propagation | 975 | error("'a' faced error"..tostring(err)) -- manual propagation |
998 | end | 976 | end |
999 | </pre></td></tr></table> | 977 | </pre></td></tr></table> |
1000 | 978 | ||
@@ -1006,12 +984,12 @@ | |||
1006 | require "lanes".configure() | 984 | require "lanes".configure() |
1007 | 985 | ||
1008 | local sync_linda = lanes.linda() | 986 | local sync_linda = lanes.linda() |
1009 | f = lanes.gen( function() dostuff() sync_linda:send( "done", true) end) | 987 | f = lanes.gen(function() dostuff() sync_linda:send("done", true) end) |
1010 | a = f() | 988 | a = f() |
1011 | b = f() | 989 | b = f() |
1012 | c = f() | 990 | c = f() |
1013 | 991 | ||
1014 | sync_linda:receive( nil, sync_linda.batched, "done", 3) -- wait for 3 lanes to write something in "done" slot of sync_linda | 992 | sync_linda:receive(nil, sync_linda.batched, "done", 3) -- wait for 3 lanes to write something in "done" slot of sync_linda |
1015 | </pre></td></tr></table> | 993 | </pre></td></tr></table> |
1016 | 994 | ||
1017 | <!-- cancelling +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ --> | 995 | <!-- cancelling +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ --> |
@@ -1019,39 +997,36 @@ | |||
1019 | <h2 id="cancelling">Cancelling</h2> | 997 | <h2 id="cancelling">Cancelling</h2> |
1020 | 998 | ||
1021 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"><tr><td><pre> | 999 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"><tr><td><pre> |
1022 | bool[,reason] = lane_h:cancel( "soft" [, timeout] [, wake_bool]) | 1000 | bool[,reason] = lane_h:cancel("soft" [, timeout] [, wake_lane]) |
1023 | bool[,reason] = lane_h:cancel( "hard" [, timeout] [, force [, forcekill_timeout]]) | 1001 | bool[,reason] = lane_h:cancel("hard" [, timeout] [, wake_lane]) |
1024 | bool[,reason] = lane_h:cancel( [mode, hookcount] [, timeout] [, force [, forcekill_timeout]]) | 1002 | bool[,reason] = lane_h:cancel([mode, hookcount] [, timeout] [, wake_lane]) |
1025 | </pre></td></tr></table> | 1003 | </pre></td></tr></table> |
1026 | 1004 | ||
1027 | <p> | 1005 | <p> |
1028 | <tt>cancel()</tt> sends a cancellation request to the lane.<br/> | 1006 | <tt>cancel()</tt> sends a cancellation request to the lane.<br/> |
1029 | First argument is a <tt>mode</tt> can be one of <tt>"hard"</tt>, <tt>"soft"</tt>, <tt>"count"</tt>, <tt>"line"</tt>, <tt>"call"</tt>, <tt>"ret"</tt>. | 1007 | First argument is a <tt>mode</tt> can be one of <tt>"hard"</tt>, <tt>"soft"</tt>, <tt>"call"</tt>, <tt>"ret"</tt>, <tt>"line"</tt>, <tt>"count"</tt>. |
1030 | If <tt>mode</tt> is not specified, it defaults to <tt>"hard"</tt>. | 1008 | If <tt>mode</tt> is not specified, it defaults to <tt>"hard"</tt>. |
1009 | If <tt>wake_lane</tt> is <tt>true</tt>, the lane is also signalled so that execution returns from any pending linda operation. Linda operations detecting the cancellation request return <tt>lanes.cancel_error</tt>. | ||
1031 | </p> | 1010 | </p> |
1032 | <p> | 1011 | <p> |
1033 | If <tt>mode</tt> is <tt>"soft"</tt>, cancellation will only cause <tt>cancel_test()</tt> to return <tt>true</tt>, so that the lane can cleanup manually.<br/> | 1012 | If <tt>mode</tt> is <tt>"soft"</tt>, cancellation will only cause <tt>cancel_test()</tt> to return <tt>true</tt>, so that the lane can cleanup manually.<br/> |
1034 | If <tt>wake_bool</tt> is <tt>true</tt>, the lane is also signalled so that execution returns from any pending linda operation. Linda operations detecting the cancellation request return <tt>lanes.cancel_error</tt>. | ||
1035 | </p> | 1013 | </p> |
1036 | <p> | 1014 | <p> |
1037 | If <tt>mode</tt> is <tt>"hard"</tt>, waits for the request to be processed, or a timeout to occur. Linda operations detecting the cancellation request will raise a special cancellation error (meaning they won't return in that case).<br/> | 1015 | If <tt>mode</tt> is <tt>"hard"</tt>, waits for the request to be processed, or a timeout to occur. Linda operations detecting the cancellation request will raise a special cancellation error (meaning they won't return in that case).<br/> |
1038 | <tt>timeout</tt> defaults to 0 if not specified. | 1016 | <tt>wake_lane</tt> defaults to <tt>true</tt>, and <tt>timeout</tt> defaults to 0 if not specified. |
1039 | </p> | 1017 | </p> |
1040 | <p> | 1018 | <p> |
1041 | Other values of <tt>mode</tt> will asynchronously install the corresponding hook, then behave as <tt>"hard"</tt>. | 1019 | Other values of <tt>mode</tt> will asynchronously install the corresponding hook, then behave as <tt>"hard"</tt>. |
1042 | </p> | 1020 | </p> |
1043 | <p> | 1021 | <p> |
1044 | If <tt>force_kill_bool</tt> is <tt>true</tt>, <tt>forcekill_timeout</tt> can be set to tell how long lanes will wait for the OS thread to terminate before raising an error. Windows threads always terminate immediately, but it might not always be the case with some pthread implementations. | ||
1045 | </p> | ||
1046 | <p> | ||
1047 | Returns <tt>true, lane_h.status</tt> if lane was already done (in <tt>"done"</tt>, <tt>"error"</tt> or <tt>"cancelled"</tt> status), or the cancellation was fruitful within <tt>timeout_secs</tt> timeout period.<br/> | 1022 | Returns <tt>true, lane_h.status</tt> if lane was already done (in <tt>"done"</tt>, <tt>"error"</tt> or <tt>"cancelled"</tt> status), or the cancellation was fruitful within <tt>timeout_secs</tt> timeout period.<br/> |
1048 | Returns <tt>false, "timeout"</tt> otherwise. | 1023 | Returns <tt>false, "timeout"</tt> otherwise. |
1049 | </p> | 1024 | </p> |
1050 | <p> | 1025 | <p> |
1051 | If the lane is still running after the timeout expired and <tt>force_kill</tt> is <tt>true</tt>, the OS thread running the lane is forcefully killed. This means no GC, probable OS resource leaks (thread stack, locks, DLL notifications), and should generally be the last resort. | 1026 | If the lane is still running after the timeout expired, there is a chance lanes will raise an error at shutdown when failing to terminate all free-running lanes within the specified timeout. |
1052 | </p> | 1027 | </p> |
1053 | <p> | 1028 | <p> |
1054 | Cancellation is tested <u>before</u> going to sleep in <tt>receive()</tt> or <tt>send()</tt> calls and after executing <tt>cancelstep</tt> Lua statements. Starting with version 3.0-beta, a pending <tt>receive()</tt>or <tt>send()</tt> call is awakened. | 1029 | Cancellation is tested <u>before</u> going to sleep in <tt>receive()</tt> or <tt>send()</tt> calls and after executing <tt>cancelstep</tt> Lua statements. A pending <tt>receive()</tt>or <tt>send()</tt> call is awakened. |
1055 | <br/> | 1030 | <br/> |
1056 | This means the execution of the lane will resume although the operation has not completed, to give the lane a chance to detect cancellation (even in the case the code waits on a <a href="#lindas">linda</a> with infinite timeout). | 1031 | This means the execution of the lane will resume although the operation has not completed, to give the lane a chance to detect cancellation (even in the case the code waits on a <a href="#lindas">linda</a> with infinite timeout). |
1057 | <br/> | 1032 | <br/> |
@@ -1066,9 +1041,9 @@ | |||
1066 | <h2 id="finalizers">Finalizers</h2> | 1041 | <h2 id="finalizers">Finalizers</h2> |
1067 | 1042 | ||
1068 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"><tr><td><pre> | 1043 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"><tr><td><pre> |
1069 | set_finalizer( finalizer_func) | 1044 | set_finalizer(finalizer_func) |
1070 | 1045 | ||
1071 | void = finalizer_func( [err, stack_tbl]) | 1046 | void = finalizer_func([err, stack_tbl]) |
1072 | </pre></td></tr></table> | 1047 | </pre></td></tr></table> |
1073 | 1048 | ||
1074 | <p> | 1049 | <p> |
@@ -1088,16 +1063,16 @@ | |||
1088 | 1063 | ||
1089 | <table border="1" bgcolor="#FFFFE0" cellpadding="10" style="width:50%"><tr><td><pre> | 1064 | <table border="1" bgcolor="#FFFFE0" cellpadding="10" style="width:50%"><tr><td><pre> |
1090 | local lane_body = function() | 1065 | local lane_body = function() |
1091 | set_finalizer( function( err, stk) | 1066 | set_finalizer(function(err, stk) |
1092 | if err and type( err) ~= "userdata" then | 1067 | if err and type(err) ~= "userdata" then |
1093 | -- no special error: true error | 1068 | -- no special error: true error |
1094 | print( " error: "..tostring(err)) | 1069 | print(" error: "..tostring(err)) |
1095 | elseif type( err) == "userdata" then | 1070 | elseif type(err) == "userdata" then |
1096 | -- lane <a href="#cancelling">cancellation</a> is performed by throwing a special userdata as error | 1071 | -- lane <a href="#cancelling">cancellation</a> is performed by throwing a special userdata as error |
1097 | print( "after cancel") | 1072 | print("after cancel") |
1098 | else | 1073 | else |
1099 | -- no error: we just got finalized | 1074 | -- no error: we just got finalized |
1100 | print( "finalized") | 1075 | print("finalized") |
1101 | end | 1076 | end |
1102 | end) | 1077 | end) |
1103 | end | 1078 | end |
@@ -1123,22 +1098,22 @@ | |||
1123 | 1098 | ||
1124 | local linda = lanes.linda() | 1099 | local linda = lanes.linda() |
1125 | 1100 | ||
1126 | local function loop( max) | 1101 | local function loop(max) |
1127 | for i = 1, max do | 1102 | for i = 1, max do |
1128 | print( "sending: " .. i) | 1103 | print("sending: " .. i) |
1129 | linda:send( "x", i) -- linda as upvalue | 1104 | linda:send("x", i) -- linda as upvalue |
1130 | end | 1105 | end |
1131 | end | 1106 | end |
1132 | 1107 | ||
1133 | a = lanes.gen( "", loop)( 10000) | 1108 | a = lanes.gen("", loop)(10000) |
1134 | 1109 | ||
1135 | while true do | 1110 | while true do |
1136 | local key, val = linda:receive( 3.0, "x") -- timeout in seconds | 1111 | local key, val = linda:receive(3.0, "x") -- timeout in seconds |
1137 | if val == nil then | 1112 | if val == nil then |
1138 | print( "timed out") | 1113 | print("timed out") |
1139 | break | 1114 | break |
1140 | end | 1115 | end |
1141 | print( tostring( linda) .. " received: " .. val) | 1116 | print(tostring(linda) .. " received: " .. val) |
1142 | end | 1117 | end |
1143 | </pre></td></tr></table> | 1118 | </pre></td></tr></table> |
1144 | 1119 | ||
@@ -1153,23 +1128,23 @@ | |||
1153 | <li>two producer-side methods: <tt>:send</tt> and <tt>:set</tt> (not out).</li> | 1128 | <li>two producer-side methods: <tt>:send</tt> and <tt>:set</tt> (not out).</li> |
1154 | <li><tt>send</tt> allows for sending multiple values -atomically- to a given key.</li> | 1129 | <li><tt>send</tt> allows for sending multiple values -atomically- to a given key.</li> |
1155 | <li><tt>receive</tt> can wait for multiple keys at once.</li> | 1130 | <li><tt>receive</tt> can wait for multiple keys at once.</li> |
1156 | <li><tt>receive</tt> has a batched mode to consume more than one value from a single key, as in <tt>linda:receive( 1.0, linda.batched, "key", 3, 6).</tt></li> | 1131 | <li><tt>receive</tt> has a batched mode to consume more than one value from a single key, as in <tt>linda:receive(1.0, linda.batched, "key", 3, 6).</tt></li> |
1157 | <li>individual keys' queue length can be limited, balancing speed differences in a producer/consumer scenario (making <tt>:send</tt> wait).</li> | 1132 | <li>individual keys' queue length can be limited, balancing speed differences in a producer/consumer scenario (making <tt>:send</tt> wait).</li> |
1158 | <li><tt>tostring( linda)</tt> returns a string of the form <tt>"Linda: <opt_name>"</tt></li> | 1133 | <li><tt>tostring(linda)</tt> returns a string of the form <tt>"Linda: <opt_name>"</tt></li> |
1159 | <li>several lindas may share the same keeper state. Since version 3.9.1, state assignation can be controlled with the linda's group (an integer). All lindas belonging to the same group will share the same keeper state. One keeper state may be shared by several groups.</li> | 1134 | <li>several lindas may share the same keeper state. State assignation can be controlled with the linda's group (an integer). All lindas belonging to the same group will share the same keeper state. One keeper state may be shared by several groups.</li> |
1160 | </ul> | 1135 | </ul> |
1161 | </p> | 1136 | </p> |
1162 | 1137 | ||
1163 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"><tr><td><pre> | 1138 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"><tr><td><pre> |
1164 | h = lanes.linda( [opt_name, [opt_group]]) | 1139 | h = lanes.linda([opt_name, [opt_group]]) |
1165 | 1140 | ||
1166 | [true|lanes.cancel_error] = h:send( [timeout_secs,] [h.null,] key, ...) | 1141 | [true|lanes.cancel_error] = h:send([timeout_secs,] [h.null,] key, ...) |
1167 | 1142 | ||
1168 | [key, val]|[lanes.cancel_error] = h:receive( [timeout_secs,] key [, ...]) | 1143 | [key, val]|[lanes.cancel_error] = h:receive([timeout_secs,] key [, ...]) |
1169 | 1144 | ||
1170 | [key, val [, ...]]|[lanes.cancel_error] = h:receive( timeout, h.batched, key, n_uint_min[, n_uint_max]) | 1145 | [key, val [, ...]]|[lanes.cancel_error] = h:receive(timeout, h.batched, key, n_uint_min[, n_uint_max]) |
1171 | 1146 | ||
1172 | [true|lanes.cancel_error] = h:limit( key, n_uint) | 1147 | [true|lanes.cancel_error] = h:limit(key, n_uint) |
1173 | </pre></td></tr></table> | 1148 | </pre></td></tr></table> |
1174 | 1149 | ||
1175 | <p> | 1150 | <p> |
@@ -1181,7 +1156,7 @@ | |||
1181 | <br/> | 1156 | <br/> |
1182 | A limit of 0 is allowed to block everything. | 1157 | A limit of 0 is allowed to block everything. |
1183 | <br/> | 1158 | <br/> |
1184 | (Since version 3.7.7) if the key was full but the limit change added some room, <tt>limit()</tt> returns <tt>true</tt> and the linda is signalled so that <tt>send()</tt>-blocked threads are awakened. | 1159 | If the key was full but the limit change added some room, <tt>limit()</tt> returns <tt>true</tt> and the linda is signalled so that <tt>send()</tt>-blocked threads are awakened. |
1185 | </p> | 1160 | </p> |
1186 | 1161 | ||
1187 | <p> | 1162 | <p> |
@@ -1195,9 +1170,9 @@ | |||
1195 | <p> | 1170 | <p> |
1196 | <tt>send()</tt> returns <tt>true</tt> if the sending succeeded, and <tt>false</tt> if the queue limit was met, and the queue did not empty enough during the given timeout. | 1171 | <tt>send()</tt> returns <tt>true</tt> if the sending succeeded, and <tt>false</tt> if the queue limit was met, and the queue did not empty enough during the given timeout. |
1197 | <br/> | 1172 | <br/> |
1198 | (Since version 3.7.8) <tt>send()</tt> returns <tt>lanes.cancel_error</tt> if interrupted by a soft cancel request. | 1173 | <tt>send()</tt> returns <tt>lanes.cancel_error</tt> if interrupted by a soft cancel request. |
1199 | <br/> | 1174 | <br/> |
1200 | If no data is provided after the key, <tt>send()</tt> raises an error. Since version 3.9.3, if provided with <tt>linda.null</tt> before the actual key and there is no data to send, <tt>send()</tt> sends a single <tt>nil</tt>. | 1175 | If no data is provided after the key, <tt>send()</tt> raises an error. If provided with <tt>linda.null</tt> before the actual key and there is no data to send, <tt>send()</tt> sends a single <tt>nil</tt>. |
1201 | <br/> | 1176 | <br/> |
1202 | Also, if <tt>linda.null</tt> is sent as data in a linda, it will be read as a <tt>nil</tt>. | 1177 | Also, if <tt>linda.null</tt> is sent as data in a linda, it will be read as a <tt>nil</tt>. |
1203 | </p> | 1178 | </p> |
@@ -1205,9 +1180,7 @@ | |||
1205 | <p> | 1180 | <p> |
1206 | Equally, <tt>receive()</tt> returns a key and the value extracted from it, or nothing for timeout. Note that <tt>nil</tt>s can be sent and received; the <tt>key</tt> value will tell it apart from a timeout. | 1181 | Equally, <tt>receive()</tt> returns a key and the value extracted from it, or nothing for timeout. Note that <tt>nil</tt>s can be sent and received; the <tt>key</tt> value will tell it apart from a timeout. |
1207 | <br/> | 1182 | <br/> |
1208 | Version 3.4.0 introduces an API change in the returned values: <tt>receive()</tt> returns the key followed by the value(s), in that order, and not the other way around. | 1183 | <tt>receive()</tt> returns <tt>lanes.cancel_error</tt> if interrupted by a soft cancel request. |
1209 | <br/> | ||
1210 | (Since version 3.7.8) <tt>receive()</tt> returns <tt>lanes.cancel_error</tt> if interrupted by a soft cancel request. | ||
1211 | </p> | 1184 | </p> |
1212 | 1185 | ||
1213 | <p> | 1186 | <p> |
@@ -1220,9 +1193,9 @@ | |||
1220 | </p> | 1193 | </p> |
1221 | 1194 | ||
1222 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"><tr><td><pre> | 1195 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"><tr><td><pre> |
1223 | bool|lanes.cancel_error = linda_h:set( key [, val [, ...]]) | 1196 | bool|lanes.cancel_error = linda_h:set(key [, val [, ...]]) |
1224 | 1197 | ||
1225 | [[val [, ...]]|lanes.cancel_error] = linda_h:get( key [, count = 1]) | 1198 | [[val [, ...]]|lanes.cancel_error] = linda_h:get(key [, count = 1]) |
1226 | </pre></td></tr></table> | 1199 | </pre></td></tr></table> |
1227 | 1200 | ||
1228 | <p> | 1201 | <p> |
@@ -1238,21 +1211,21 @@ | |||
1238 | <p> | 1211 | <p> |
1239 | <tt>set()</tt> signals the linda for write if a value is stored. If nothing special happens, <tt>set() </tt>returns nothing. | 1212 | <tt>set()</tt> signals the linda for write if a value is stored. If nothing special happens, <tt>set() </tt>returns nothing. |
1240 | <br/> | 1213 | <br/> |
1241 | Since version 3.7.7, if the key was full but the new data count of the key after <tt>set()</tt> is below its limit, <tt>set()</tt> returns <tt>true</tt> and the linda is also signaled for read so that <tt>send()</tt>-blocked threads are awakened. | 1214 | If the key was full but the new data count of the key after <tt>set()</tt> is below its limit, <tt>set()</tt> returns <tt>true</tt> and the linda is also signaled for read so that <tt>send()</tt>-blocked threads are awakened. |
1242 | </p> | 1215 | </p> |
1243 | 1216 | ||
1244 | <p> | 1217 | <p> |
1245 | Since version 3.8.0, <tt>set()</tt> can write several values at the specified key, writing <tt>nil</tt> values is now possible, and clearing the contents at the specified key is done by not providing any value. | 1218 | <tt>set()</tt> can write several values at the specified key, writing <tt>nil</tt> values is now possible, and clearing the contents at the specified key is done by not providing any value. |
1246 | <br/> | 1219 | <br/> |
1247 | Also, <tt>get()</tt> can read several values at once. If the key contains no data, <tt>get()</tt> returns no value. This can be used to separate the case when reading stored <tt>nil</tt> values. | 1220 | Also, <tt>get()</tt> can read several values at once. If the key contains no data, <tt>get()</tt> returns no value. This can be used to separate the case when reading stored <tt>nil</tt> values. |
1248 | </p> | 1221 | </p> |
1249 | 1222 | ||
1250 | <p> | 1223 | <p> |
1251 | Since version 3.8.4, trying to send or receive data through a cancelled linda does nothing and returns <tt>lanes.cancel_error</tt>. | 1224 | Trying to send or receive data through a cancelled linda does nothing and returns <tt>lanes.cancel_error</tt>. |
1252 | </p> | 1225 | </p> |
1253 | 1226 | ||
1254 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"><tr><td><pre> | 1227 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"><tr><td><pre> |
1255 | [val] = linda_h:count( [key[,...]]) | 1228 | [val] = linda_h:count([key[,...]]) |
1256 | </pre></td></tr></table> | 1229 | </pre></td></tr></table> |
1257 | 1230 | ||
1258 | <p> | 1231 | <p> |
@@ -1280,7 +1253,7 @@ | |||
1280 | </pre></td></tr></table> | 1253 | </pre></td></tr></table> |
1281 | 1254 | ||
1282 | <p> | 1255 | <p> |
1283 | (Starting with version 3.8.4) Signals the linda so that lanes waiting for read, write, or both, wake up. | 1256 | Signals the linda so that lanes waiting for read, write, or both, wake up. |
1284 | All linda operations (including <tt>get()</tt> and <tt>set()</tt>) will return <tt>lanes.cancel_error</tt> as when the calling lane is <a href="#cancelling">soft-cancelled</a> as long as the linda is marked as cancelled. | 1257 | All linda operations (including <tt>get()</tt> and <tt>set()</tt>) will return <tt>lanes.cancel_error</tt> as when the calling lane is <a href="#cancelling">soft-cancelled</a> as long as the linda is marked as cancelled. |
1285 | <br/> | 1258 | <br/> |
1286 | <tt>"none"</tt> reset the linda's cancel status, but doesn't signal it. | 1259 | <tt>"none"</tt> reset the linda's cancel status, but doesn't signal it. |
@@ -1342,7 +1315,7 @@ events to a common Linda, but... :).</font> | |||
1342 | <h2 id="timers">Timers</h2> | 1315 | <h2 id="timers">Timers</h2> |
1343 | 1316 | ||
1344 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"><tr><td><pre> | 1317 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"><tr><td><pre> |
1345 | void = lanes.timer( linda_h, key, date_tbl|first_secs [,period_secs]) | 1318 | void = lanes.timer(linda_h, key, date_tbl|first_secs [,period_secs]) |
1346 | </pre></td></tr></table> | 1319 | </pre></td></tr></table> |
1347 | 1320 | ||
1348 | <p> | 1321 | <p> |
@@ -1369,18 +1342,18 @@ events to a common Linda, but... :).</font> | |||
1369 | 1342 | ||
1370 | -- First timer once a second, not synchronized to wall clock | 1343 | -- First timer once a second, not synchronized to wall clock |
1371 | -- | 1344 | -- |
1372 | lanes.timer( linda, "sec", 1, 1) | 1345 | lanes.timer(linda, "sec", 1, 1) |
1373 | 1346 | ||
1374 | -- Timer to a future event (next even minute); wall clock synchronized | 1347 | -- Timer to a future event (next even minute); wall clock synchronized |
1375 | -- | 1348 | -- |
1376 | local t = os.date( "*t", os.time() + 60) -- now + 1min | 1349 | local t = os.date("*t", os.time() + 60) -- now + 1min |
1377 | t.sec = 0 | 1350 | t.sec = 0 |
1378 | 1351 | ||
1379 | lanes.timer( linda, "min", t, 60) -- reoccur every minute (sharp) | 1352 | lanes.timer(linda, "min", t, 60) -- reoccur every minute (sharp) |
1380 | 1353 | ||
1381 | while true do | 1354 | while true do |
1382 | local key, v = linda:receive( "sec", "min") | 1355 | local key, v = linda:receive("sec", "min") |
1383 | print( "Timer "..key..": "..v) | 1356 | print("Timer "..key..": "..v) |
1384 | end | 1357 | end |
1385 | </pre></td></tr></table> | 1358 | </pre></td></tr></table> |
1386 | 1359 | ||
@@ -1394,7 +1367,7 @@ events to a common Linda, but... :).</font> | |||
1394 | <td> | 1367 | <td> |
1395 | <font size="-1"> | 1368 | <font size="-1"> |
1396 | Having the API as <tt>lanes.timer()</tt> is intentional. Another alternative would be <tt>linda_h:timer()</tt> but timers are not traditionally seen to be part of Lindas. Also, it would mean any lane getting a Linda handle would be able to modify timers on it. | 1369 | Having the API as <tt>lanes.timer()</tt> is intentional. Another alternative would be <tt>linda_h:timer()</tt> but timers are not traditionally seen to be part of Lindas. Also, it would mean any lane getting a Linda handle would be able to modify timers on it. |
1397 | A third choice could be abstracting the timers out of Linda realm altogether (<tt>timer_h= lanes.timer( date|first_secs, period_secs )</tt>) but that would mean separate waiting functions for timers, and lindas. | 1370 | A third choice could be abstracting the timers out of Linda realm altogether (<tt>timer_h= lanes.timer(date|first_secs, period_secs )</tt>) but that would mean separate waiting functions for timers, and lindas. |
1398 | Even if a linda object and key was returned, that key couldn't be waited upon simultaneously with one's general linda events. | 1371 | Even if a linda object and key was returned, that key couldn't be waited upon simultaneously with one's general linda events. |
1399 | The current system gives maximum capabilities with minimum API, and any smoothenings can easily be crafted in Lua at the application level. | 1372 | The current system gives maximum capabilities with minimum API, and any smoothenings can easily be crafted in Lua at the application level. |
1400 | </font> | 1373 | </font> |
@@ -1413,14 +1386,22 @@ events to a common Linda, but... :).</font> | |||
1413 | </p> | 1386 | </p> |
1414 | 1387 | ||
1415 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"><tr><td><pre> | 1388 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"><tr><td><pre> |
1416 | void = lanes.sleep( [seconds|false]) | 1389 | void = lanes.sleep([seconds|false]) |
1417 | </pre></td></tr></table> | 1390 | </pre></td></tr></table> |
1418 | 1391 | ||
1419 | <p> | 1392 | <p> |
1420 | (Since version 3.9.7) A very simple way of sleeping when nothing else is available. Is implemented by attempting to read some data in an unused channel of the internal linda used for timers (this linda exists even when timers aren't enabled). | 1393 | A very simple way of sleeping when nothing else is available. Is implemented by attempting to read some data in an unused channel of the internal linda used for timers (this linda exists even when timers aren't enabled). |
1421 | Default duration is null, which should only cause a thread context switch. | 1394 | Default duration is null, which should only cause a thread context switch. |
1422 | </p> | 1395 | </p> |
1423 | 1396 | ||
1397 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"><tr><td><pre> | ||
1398 | number = lanes.now_secs() | ||
1399 | </pre></td></tr></table> | ||
1400 | |||
1401 | <p> | ||
1402 | Returns the current value of the clock used by timers and lindas. | ||
1403 | </p> | ||
1404 | |||
1424 | <!-- locks +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ --> | 1405 | <!-- locks +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ --> |
1425 | <hr/> | 1406 | <hr/> |
1426 | <h2 id="locks">Locks etc.</h2> | 1407 | <h2 id="locks">Locks etc.</h2> |
@@ -1430,11 +1411,11 @@ events to a common Linda, but... :).</font> | |||
1430 | </p> | 1411 | </p> |
1431 | 1412 | ||
1432 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"><tr><td><pre> | 1413 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"><tr><td><pre> |
1433 | lock_func|lanes.cancel_error = lanes.genlock( linda_h, key [,N_uint=1]) | 1414 | lock_func|lanes.cancel_error = lanes.genlock(linda_h, key [,N_uint=1]) |
1434 | 1415 | ||
1435 | bool|lanes.cancel_error = lock_func( M_uint [, "try"] ) -- acquire | 1416 | bool|lanes.cancel_error = lock_func(M_uint [, "try"] ) -- acquire |
1436 | .. | 1417 | .. |
1437 | bool|lanes.cancel_error = lock_func( -M_uint) -- release | 1418 | bool|lanes.cancel_error = lock_func(-M_uint) -- release |
1438 | </pre></td></tr></table> | 1419 | </pre></td></tr></table> |
1439 | 1420 | ||
1440 | <p> | 1421 | <p> |
@@ -1456,9 +1437,9 @@ events to a common Linda, but... :).</font> | |||
1456 | <p> | 1437 | <p> |
1457 | 1438 | ||
1458 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"><tr><td><pre> | 1439 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"><tr><td><pre> |
1459 | atomic_func|lanes.cancel_error = lanes.genatomic( linda_h, key [,initial_num=0.0]) | 1440 | atomic_func|lanes.cancel_error = lanes.genatomic(linda_h, key [,initial_num=0.0]) |
1460 | 1441 | ||
1461 | new_num|lanes.cancel_error = atomic_func( [diff_num=+1.0]) | 1442 | new_num|lanes.cancel_error = atomic_func([diff_num=+1.0]) |
1462 | </pre></td></tr></table> | 1443 | </pre></td></tr></table> |
1463 | 1444 | ||
1464 | <p> | 1445 | <p> |
@@ -1483,9 +1464,6 @@ events to a common Linda, but... :).</font> | |||
1483 | <p> | 1464 | <p> |
1484 | <ul> | 1465 | <ul> |
1485 | <li>Booleans, numbers, strings, light userdata, Lua functions and tables of such can always be passed.</li> | 1466 | <li>Booleans, numbers, strings, light userdata, Lua functions and tables of such can always be passed.</li> |
1486 | <ul> | ||
1487 | <li>Versions 3.4.1 and earlier had an undocumented limitation: Lua functions with an indirect recursive Lua function upvalue raised an error when transfered. This limitation disappeared with version 3.4.2.</li> | ||
1488 | </ul> | ||
1489 | <li> | 1467 | <li> |
1490 | Cyclic tables and/or duplicate references are allowed and reproduced appropriately, but only <u>within the same transmission</u>. | 1468 | Cyclic tables and/or duplicate references are allowed and reproduced appropriately, but only <u>within the same transmission</u>. |
1491 | <ul> | 1469 | <ul> |
@@ -1514,7 +1492,7 @@ events to a common Linda, but... :).</font> | |||
1514 | </ul> | 1492 | </ul> |
1515 | </li> | 1493 | </li> |
1516 | <li>Coroutines cannot be passed. A coroutine's Lua state is tied to the Lua state that created it, and there is no way the mixed C/Lua stack of a coroutine can be transfered from one Lua state to another.</li> | 1494 | <li>Coroutines cannot be passed. A coroutine's Lua state is tied to the Lua state that created it, and there is no way the mixed C/Lua stack of a coroutine can be transfered from one Lua state to another.</li> |
1517 | <li>Starting with version 3.10.1, if the metatable contains <tt>__lanesignore</tt>, the object is skipped and <tt>nil</tt> is transfered instead.</li> | 1495 | <li>If the metatable contains <tt>__lanesignore</tt>, the object is skipped and <tt>nil</tt> is transfered instead.</li> |
1518 | </ul> | 1496 | </ul> |
1519 | </p> | 1497 | </p> |
1520 | 1498 | ||
@@ -1527,14 +1505,14 @@ events to a common Linda, but... :).</font> | |||
1527 | 1505 | ||
1528 | <table border="1" bgcolor="#FFFFE0" cellpadding="10" style="width:50%"><tr><td><pre> | 1506 | <table border="1" bgcolor="#FFFFE0" cellpadding="10" style="width:50%"><tr><td><pre> |
1529 | // expects a C function on top of the source Lua stack | 1507 | // expects a C function on top of the source Lua stack |
1530 | copy_func( lua_State *dest, lua_State* source) | 1508 | copy_func(lua_State *dest, lua_State* source) |
1531 | { | 1509 | { |
1532 | // extract C function pointer from source | 1510 | // extract C function pointer from source |
1533 | lua_CFunction func = lua_tocfunction( source, -1); | 1511 | lua_CFunction func = lua_tocfunction(source, -1); |
1534 | // transfer upvalues | 1512 | // transfer upvalues |
1535 | int nup = transfer_upvalues( dest, source); | 1513 | int nup = transfer_upvalues(dest, source); |
1536 | // dest Lua stack contains a copy of all upvalues | 1514 | // dest Lua stack contains a copy of all upvalues |
1537 | lua_pushcfunction( dest, func, nup); | 1515 | lua_pushcfunction(dest, func, nup); |
1538 | } | 1516 | } |
1539 | </pre></td></tr></table> | 1517 | </pre></td></tr></table> |
1540 | 1518 | ||
@@ -1546,12 +1524,12 @@ events to a common Linda, but... :).</font> | |||
1546 | 1524 | ||
1547 | <table border="1" bgcolor="#FFFFE0" cellpadding="10" style="width:50%"><tr><td><pre> | 1525 | <table border="1" bgcolor="#FFFFE0" cellpadding="10" style="width:50%"><tr><td><pre> |
1548 | // expects a C function on top of the source Lua stack | 1526 | // expects a C function on top of the source Lua stack |
1549 | copy_func( lua_State *dest, lua_State* source) | 1527 | copy_func(lua_State *dest, lua_State* source) |
1550 | { | 1528 | { |
1551 | // fetch function 'name' from source lookup database | 1529 | // fetch function 'name' from source lookup database |
1552 | char const* funcname = lookup_func_name( source, -1); | 1530 | char const* funcname = lookup_func_name(source, -1); |
1553 | // lookup a function bound to this name in the destination state, and push it on the stack | 1531 | // lookup a function bound to this name in the destination state, and push it on the stack |
1554 | push_resolved_func( dest, funcname); | 1532 | push_resolved_func(dest, funcname); |
1555 | } | 1533 | } |
1556 | </pre></td></tr></table> | 1534 | </pre></td></tr></table> |
1557 | 1535 | ||
@@ -1612,7 +1590,7 @@ events to a common Linda, but... :).</font> | |||
1612 | </p> | 1590 | </p> |
1613 | 1591 | ||
1614 | <table border="1" bgcolor="#FFFFE0" cellpadding="10" style="width:50%"><tr><td><pre> | 1592 | <table border="1" bgcolor="#FFFFE0" cellpadding="10" style="width:50%"><tr><td><pre> |
1615 | int luaopen_module( lua_State *L ) | 1593 | int luaopen_module(lua_State *L ) |
1616 | { | 1594 | { |
1617 | static char been_here; /* 0 by ANSI C */ | 1595 | static char been_here; /* 0 by ANSI C */ |
1618 | 1596 | ||
@@ -1628,27 +1606,27 @@ events to a common Linda, but... :).</font> | |||
1628 | 1606 | ||
1629 | <h3 id="clonable_userdata">Clonable full userdata in your own apps</h3> | 1607 | <h3 id="clonable_userdata">Clonable full userdata in your own apps</h3> |
1630 | <p> | 1608 | <p> |
1631 | Starting with version 3.13.0, a new way of passing full userdata across lanes uses a new <tt>__lanesclone</tt> metamethod. | 1609 | An alternative way of passing full userdata across lanes uses a new <tt>__lanesclone</tt> metamethod. |
1632 | When a deep userdata is cloned, Lanes calls <tt>__lanesclone</tt> once, in the context of the source lane.<br/> | 1610 | When a deep userdata is cloned, Lanes calls <tt>__lanesclone</tt> once, in the context of the source lane.<br/> |
1633 | The call receives the clone and original as light userdata, plus the actual userdata size, as in <tt>clone:__lanesclone(original,size)</tt>, and should perform the actual cloning.<br/> | 1611 | The call receives the clone and original as light userdata, plus the actual userdata size, as in <tt>clone:__lanesclone(original,size)</tt>, and should perform the actual cloning.<br/> |
1634 | A typical implementation would look like (BEWARE, THIS CHANGED WITH VERSION 3.16.0): | 1612 | A typical implementation would look like: |
1635 | <table border="1" bgcolor="#FFFFE0" cellpadding="10" style="width:50%"><tr><td><pre> | 1613 | <table border="1" bgcolor="#FFFFE0" cellpadding="10" style="width:50%"><tr><td><pre> |
1636 | static int clonable_lanesclone( lua_State* L) | 1614 | static int clonable_lanesclone(lua_State* L) |
1637 | { | 1615 | { |
1638 | switch( lua_gettop( L)) | 1616 | switch(lua_gettop(L)) |
1639 | { | 1617 | { |
1640 | case 3: | 1618 | case 3: |
1641 | { | 1619 | { |
1642 | struct s_MyClonableUserdata* self = lua_touserdata( L, 1); | 1620 | struct s_MyClonableUserdata* self = lua_touserdata(L, 1); |
1643 | struct s_MyClonableUserdata* from = lua_touserdata( L, 2); | 1621 | struct s_MyClonableUserdata* from = lua_touserdata(L, 2); |
1644 | size_t len = lua_tointeger( L, 3); | 1622 | size_t len = lua_tointeger(L, 3); |
1645 | assert( len == sizeof(struct s_MyClonableUserdata)); | 1623 | assert(len == sizeof(struct s_MyClonableUserdata)); |
1646 | *self = *from; | 1624 | *self = *from; |
1647 | } | 1625 | } |
1648 | return 0; | 1626 | return 0; |
1649 | 1627 | ||
1650 | default: | 1628 | default: |
1651 | (void) luaL_error( L, "Lanes called clonable_lanesclone with unexpected parameters"); | 1629 | std::ignore = luaL_error(L, "Lanes called clonable_lanesclone with unexpected parameters"); |
1652 | } | 1630 | } |
1653 | return 0; | 1631 | return 0; |
1654 | } | 1632 | } |
@@ -1661,20 +1639,20 @@ static int clonable_lanesclone( lua_State* L) | |||
1661 | <table border="1" bgcolor="#FFFFE0" cellpadding="10" style="width:50%"><tr><td><pre> | 1639 | <table border="1" bgcolor="#FFFFE0" cellpadding="10" style="width:50%"><tr><td><pre> |
1662 | int luaopen_deep_test(lua_State* L) | 1640 | int luaopen_deep_test(lua_State* L) |
1663 | { | 1641 | { |
1664 | luaL_newlib( L, deep_module); | 1642 | luaL_newlib(L, deep_module); |
1665 | 1643 | ||
1666 | // preregister the metatables for the types we can instanciate so that Lanes can know about them | 1644 | // preregister the metatables for the types we can instanciate so that Lanes can know about them |
1667 | if( luaL_newmetatable( L, "clonable")) | 1645 | if (luaL_newmetatable(L, "clonable")) |
1668 | { | 1646 | { |
1669 | luaL_setfuncs( L, clonable_mt, 0); | 1647 | luaL_setfuncs(L, clonable_mt, 0); |
1670 | lua_pushvalue(L, -1); | 1648 | lua_pushvalue(L, -1); |
1671 | lua_setfield(L, -2, "__index"); | 1649 | lua_setfield(L, -2, "__index"); |
1672 | } | 1650 | } |
1673 | lua_setfield(L, -2, "__clonableMT"); // actual name is not important | 1651 | lua_setfield(L, -2, "__clonableMT"); // actual name is not important |
1674 | 1652 | ||
1675 | if( luaL_newmetatable( L, "deep")) | 1653 | if (luaL_newmetatable(L, "deep")) |
1676 | { | 1654 | { |
1677 | luaL_setfuncs( L, deep_mt, 0); | 1655 | luaL_setfuncs(L, deep_mt, 0); |
1678 | lua_pushvalue(L, -1); | 1656 | lua_pushvalue(L, -1); |
1679 | lua_setfield(L, -2, "__index"); | 1657 | lua_setfield(L, -2, "__index"); |
1680 | } | 1658 | } |
@@ -1688,10 +1666,10 @@ int luaopen_deep_test(lua_State* L) | |||
1688 | <p> | 1666 | <p> |
1689 | Then a new clonable userdata instance can just do like any non-Lanes aware userdata, as long as its metatable contains the aforementionned <tt>__lanesclone</tt> method. | 1667 | Then a new clonable userdata instance can just do like any non-Lanes aware userdata, as long as its metatable contains the aforementionned <tt>__lanesclone</tt> method. |
1690 | <table border="1" bgcolor="#FFFFE0" cellpadding="10" style="width:50%"><tr><td><pre> | 1668 | <table border="1" bgcolor="#FFFFE0" cellpadding="10" style="width:50%"><tr><td><pre> |
1691 | int luaD_new_clonable( lua_State* L) | 1669 | int luaD_new_clonable(lua_State* L) |
1692 | { | 1670 | { |
1693 | lua_newuserdata( L, sizeof( struct s_MyClonableUserdata)); | 1671 | lua_newuserdata(L, sizeof(struct s_MyClonableUserdata)); |
1694 | luaL_setmetatable( L, "clonable"); | 1672 | luaL_setmetatable(L, "clonable"); |
1695 | return 1; | 1673 | return 1; |
1696 | } | 1674 | } |
1697 | </pre></td></tr></table> | 1675 | </pre></td></tr></table> |
@@ -1706,17 +1684,17 @@ int luaD_new_clonable( lua_State* L) | |||
1706 | <ol> | 1684 | <ol> |
1707 | <li> | 1685 | <li> |
1708 | Provide an <i>identity function</i> for your userdata, in C. This function is used for creation and deletion of your deep userdata (the shared resource), and for making metatables for the state-specific proxies for accessing it. The prototype is | 1686 | Provide an <i>identity function</i> for your userdata, in C. This function is used for creation and deletion of your deep userdata (the shared resource), and for making metatables for the state-specific proxies for accessing it. The prototype is |
1709 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"><tr><td><pre> void* idfunc( lua_State* L, DeepOp op_);</pre></td></tr></table> | 1687 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"><tr><td><pre> void* idfunc(lua_State* L, DeepOp op_);</pre></td></tr></table> |
1710 | <tt>op_</tt> can be one of: | 1688 | <tt>op_</tt> can be one of: |
1711 | <ul> | 1689 | <ul> |
1712 | <li><tt>eDO_new</tt>: requests the creation of a new object, whose pointer is returned. Starting with version 3.13.0, object should embed <tt>DeepPrelude</tt> structure as header and initialize its <tt>magic</tt> member with the current <tt>DEEP_VERSION</tt>.</li> | 1690 | <li><tt>DeepOp::New</tt>: requests the creation of a new object, whose pointer is returned. Said object must derive from <tt>DeepPrelude</tt>.</li> |
1713 | <li><tt>eDO_delete</tt>: receives this same pointer on the stack as a light userdata, and should cleanup the object.</li> | 1691 | <li><tt>DeepOp::Delete</tt>: receives this same pointer on the stack as a light userdata, and should cleanup the object.</li> |
1714 | <li><tt>eDO_metatable</tt>: should build a metatable for the object. Don't cache the metatable yourself, Lanes takes care of it (<tt>eDO_metatable</tt> should only be invoked once per state). Just push the metatable on the stack.</li> | 1692 | <li><tt>DeepOp::Metatable</tt>: should build a metatable for the object. Don't cache the metatable yourself, Lanes takes care of it (<tt>DeepOp::Metatable</tt> should only be invoked once per state). Just push the metatable on the stack.</li> |
1715 | <li><tt>eDO_module</tt>: requests the name of the module that exports the idfunc, to be returned. It is necessary so that Lanes can require it in any lane state that receives a userdata. This is to prevent crashes in situations where the module could be unloaded while the idfunc pointer is still held.</li> | 1693 | <li><tt>DeepOp::Module</tt>: requests the name of the module that exports the idfunc, to be returned. It is necessary so that Lanes can require it in any lane state that receives a userdata. This is to prevent crashes in situations where the module could be unloaded while the idfunc pointer is still held.</li> |
1716 | </ul> | 1694 | </ul> |
1717 | Take a look at <tt>linda_id</tt> in <tt>lanes.c</tt> or <tt>deep_test_id</tt> in <tt>deep_test.c</tt>. | 1695 | Take a look at <tt>linda_id</tt> in <tt>lanes.cpp</tt> or <tt>deep_test_id</tt> in <tt>deep_test.cpp</tt>. |
1718 | </li> | 1696 | </li> |
1719 | <li>Include <tt>"deep.h"</tt> and either link against Lanes or statically compile <tt>compat.c deep.c tools.c universe.c</tt> into your module if you want to avoid a runtime dependency for users that will use your module without Lanes. | 1697 | <li>Include <tt>"deep.h"</tt> and either link against Lanes or statically compile <tt>compat.cpp deep.cpp tools.cpp universe.cpp</tt> into your module if you want to avoid a runtime dependency for users that will use your module without Lanes. |
1720 | <li>Instanciate your userdata using <tt>luaG_newdeepuserdata()</tt>, instead of the regular <tt>lua_newuserdata()</tt>. Given an <tt>idfunc</tt>, it sets up the support structures and returns a state-specific proxy userdata for accessing your data. This proxy can also be copied over to other lanes.</li> | 1698 | <li>Instanciate your userdata using <tt>luaG_newdeepuserdata()</tt>, instead of the regular <tt>lua_newuserdata()</tt>. Given an <tt>idfunc</tt>, it sets up the support structures and returns a state-specific proxy userdata for accessing your data. This proxy can also be copied over to other lanes.</li> |
1721 | <li>Accessing the deep userdata from your C code, use <tt>luaG_todeep()</tt> instead of the regular <tt>lua_touserdata()</tt>.</li> | 1699 | <li>Accessing the deep userdata from your C code, use <tt>luaG_todeep()</tt> instead of the regular <tt>lua_touserdata()</tt>.</li> |
1722 | </ol> | 1700 | </ol> |
@@ -1726,17 +1704,17 @@ int luaD_new_clonable( lua_State* L) | |||
1726 | </p> | 1704 | </p> |
1727 | 1705 | ||
1728 | <p> | 1706 | <p> |
1729 | Deep userdata in transit inside keeper states (sent in a linda but not yet consumed) don't call <tt>idfunc(eDO_delete)</tt> and aren't considered by reference counting. The rationale is the following: | 1707 | Deep userdata in transit inside keeper states (sent in a linda but not yet consumed) don't call <tt>idfunc(DeepOp::Delete)</tt> and aren't considered by reference counting. The rationale is the following: |
1730 | <br /> | 1708 | <br /> |
1731 | If some non-keeper state holds a deep userdata for some deep object, then even if the keeper collects its own deep userdata, it shouldn't be cleaned up since the refcount is not 0. | 1709 | If some non-keeper state holds a deep userdata for some deep object, then even if the keeper collects its own deep userdata, it shouldn't be cleaned up since the refcount is not 0. |
1732 | <br /> | 1710 | <br /> |
1733 | OTOH, if a keeper state holds the last deep userdata for some deep object, then no lane can do actual work with it. Deep userdata's <tt>idfunc()</tt> is never called from a keeper state. | 1711 | OTOH, if a keeper state holds the last deep userdata for some deep object, then no lane can do actual work with it. Deep userdata's <tt>idfunc()</tt> is never called from a keeper state. |
1734 | <br /> | 1712 | <br /> |
1735 | Therefore, Lanes can just call <tt>idfunc(eDO_delete)</tt> when the last non-keeper-held deep userdata is collected, as long as it doesn't do the same in a keeper state after that, since any remaining deep userdata in keeper states now hold stale pointers. | 1713 | Therefore, Lanes can just call <tt>idfunc(DeepOp::Delete)</tt> when the last non-keeper-held deep userdata is collected, as long as it doesn't do the same in a keeper state after that, since any remaining deep userdata in keeper states now hold stale pointers. |
1736 | </p> | 1714 | </p> |
1737 | 1715 | ||
1738 | <p> | 1716 | <p> |
1739 | <b>NOTE</b>: The lifespan of deep userdata may exceed that of the Lua state that created it. The allocation of the data storage should not be tied to the Lua state used. In other words, use <tt>malloc()</tt>/<tt>free()</tt> or similar memory handling mechanism. | 1717 | <b>NOTE</b>: The lifespan of deep userdata may exceed that of the Lua state that created it. The allocation of the data storage should not be tied to the Lua state used. In other words, use <tt>new</tt>/<tt>delete</tt>, <tt>malloc()</tt>/<tt>free()</tt> or similar memory handling mechanism. |
1740 | </p> | 1718 | </p> |
1741 | 1719 | ||
1742 | 1720 | ||
@@ -1757,8 +1735,8 @@ int luaD_new_clonable( lua_State* L) | |||
1757 | In multithreaded scenarios, giving multiple parameters to <tt>print()</tt> or <tt>file:write()</tt> may cause them to be overlapped in the output, something like this: | 1735 | In multithreaded scenarios, giving multiple parameters to <tt>print()</tt> or <tt>file:write()</tt> may cause them to be overlapped in the output, something like this: |
1758 | 1736 | ||
1759 | <table border="1" bgcolor="#FFFFE0" cellpadding="10" style="width:50%"><tr><td><pre> | 1737 | <table border="1" bgcolor="#FFFFE0" cellpadding="10" style="width:50%"><tr><td><pre> |
1760 | A: print( 1, 2, 3, 4 ) | 1738 | A: print(1, 2, 3, 4 ) |
1761 | B: print( 'a', 'b', 'c', 'd' ) | 1739 | B: print('a', 'b', 'c', 'd' ) |
1762 | 1740 | ||
1763 | 1 a b 2 3 c d 4 | 1741 | 1 a b 2 3 c d 4 |
1764 | </pre></td></tr></table> | 1742 | </pre></td></tr></table> |
@@ -1824,4 +1802,4 @@ int luaD_new_clonable( lua_State* L) | |||
1824 | </p> | 1802 | </p> |
1825 | 1803 | ||
1826 | </body> | 1804 | </body> |
1827 | </html> \ No newline at end of file | 1805 | </html> |
diff --git a/lanes-3.17.0-0.rockspec b/lanes-4.0.0-0.rockspec index 69ee515..4e1b370 100644 --- a/lanes-3.17.0-0.rockspec +++ b/lanes-4.0.0-0.rockspec | |||
@@ -7,11 +7,11 @@ | |||
7 | 7 | ||
8 | package = "Lanes" | 8 | package = "Lanes" |
9 | 9 | ||
10 | version = "3.17.0-0" | 10 | version = "4.0.0-0" |
11 | 11 | ||
12 | source= { | 12 | source= { |
13 | url= "git+https://github.com/LuaLanes/lanes.git", | 13 | url= "git+https://github.com/LuaLanes/lanes.git", |
14 | branch= "v3.17.0" | 14 | branch= "v4.0.0" |
15 | } | 15 | } |
16 | 16 | ||
17 | description = { | 17 | description = { |
@@ -60,16 +60,16 @@ build = { | |||
60 | { | 60 | { |
61 | sources = | 61 | sources = |
62 | { | 62 | { |
63 | "src/cancel.c", | 63 | "src/cancel.cpp", |
64 | "src/compat.c", | 64 | "src/compat.cpp", |
65 | "src/deep.c", | 65 | "src/deep.cpp", |
66 | "src/keeper.c", | 66 | "src/keeper.cpp", |
67 | "src/lanes.c", | 67 | "src/lanes.cpp", |
68 | "src/linda.c", | 68 | "src/linda.cpp", |
69 | "src/tools.c", | 69 | "src/tools.cpp", |
70 | "src/state.c", | 70 | "src/state.cpp", |
71 | "src/threading.c", | 71 | "src/threading.cpp", |
72 | "src/universe.c" | 72 | "src/universe.cpp" |
73 | }, | 73 | }, |
74 | incdirs = { "src"}, | 74 | incdirs = { "src"}, |
75 | }, | 75 | }, |
diff --git a/make-vc.cmd b/make-vc.cmd index 6b63b21..f4cd412 100644 --- a/make-vc.cmd +++ b/make-vc.cmd | |||
@@ -130,8 +130,8 @@ goto ERR_NOLUA | |||
130 | @REM | 130 | @REM |
131 | @set FLAGS=/O2 /LD | 131 | @set FLAGS=/O2 /LD |
132 | 132 | ||
133 | cl %WARN% %FLAGS% /I "%LUA51%\include" /Felanes\core.dll src\*.c "%LUA_LIB%\lua5.1.lib" | 133 | cl %WARN% %FLAGS% /I "%LUA51%\include" /Felanes\core.dll src\*.cpp "%LUA_LIB%\lua5.1.lib" |
134 | @REM cl %WARN% %FLAGS% /I "%LUA51%\include" /Felanes\core.dll src\*.c "%LUA_LIB%\lua5.1.lib" /link /NODEFAULTLIB:libcmt | 134 | @REM cl %WARN% %FLAGS% /I "%LUA51%\include" /Felanes\core.dll src\*.cpp "%LUA_LIB%\lua5.1.lib" /link /NODEFAULTLIB:libcmt |
135 | 135 | ||
136 | @del lanes\core.lib | 136 | @del lanes\core.lib |
137 | @del lanes\core.exp | 137 | @del lanes\core.exp |
diff --git a/src/Makefile b/src/Makefile index c4d4c30..cef4174 100644 --- a/src/Makefile +++ b/src/Makefile | |||
@@ -119,7 +119,7 @@ MODULE_DIR=$(MODULE) | |||
119 | #--- | 119 | #--- |
120 | all: $(MODULE)/core.$(_SO) | 120 | all: $(MODULE)/core.$(_SO) |
121 | 121 | ||
122 | %.o: %.c *.h Makefile | 122 | %.o: %.cpp *.h Makefile |
123 | 123 | ||
124 | # Note: Don't put $(LUA_LIBS) ahead of $^; MSYS will not like that (I think) | 124 | # Note: Don't put $(LUA_LIBS) ahead of $^; MSYS will not like that (I think) |
125 | # | 125 | # |
diff --git a/src/cancel.c b/src/cancel.c deleted file mode 100644 index d739ff9..0000000 --- a/src/cancel.c +++ /dev/null | |||
@@ -1,302 +0,0 @@ | |||
1 | /* | ||
2 | -- | ||
3 | -- CANCEL.C | ||
4 | -- | ||
5 | -- Lane cancellation support | ||
6 | -- | ||
7 | -- Author: Benoit Germain <bnt.germain@gmail.com> | ||
8 | -- | ||
9 | --[[ | ||
10 | =============================================================================== | ||
11 | |||
12 | Copyright (C) 2011-2019 Benoit Germain <bnt.germain@gmail.com> | ||
13 | |||
14 | Permission is hereby granted, free of charge, to any person obtaining a copy | ||
15 | of this software and associated documentation files (the "Software"), to deal | ||
16 | in the Software without restriction, including without limitation the rights | ||
17 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||
18 | copies of the Software, and to permit persons to whom the Software is | ||
19 | furnished to do so, subject to the following conditions: | ||
20 | |||
21 | The above copyright notice and this permission notice shall be included in | ||
22 | all copies or substantial portions of the Software. | ||
23 | |||
24 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
25 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
26 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
27 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
28 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
29 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | ||
30 | THE SOFTWARE. | ||
31 | |||
32 | =============================================================================== | ||
33 | ]]-- | ||
34 | */ | ||
35 | |||
36 | #include <assert.h> | ||
37 | #include <string.h> | ||
38 | |||
39 | #include "threading.h" | ||
40 | #include "cancel.h" | ||
41 | #include "tools.h" | ||
42 | #include "lanes_private.h" | ||
43 | |||
44 | // ################################################################################################ | ||
45 | // ################################################################################################ | ||
46 | |||
47 | /* | ||
48 | * Check if the thread in question ('L') has been signalled for cancel. | ||
49 | * | ||
50 | * Called by cancellation hooks and/or pending Linda operations (because then | ||
51 | * the check won't affect performance). | ||
52 | * | ||
53 | * Returns TRUE if any locks are to be exited, and 'cancel_error()' called, | ||
54 | * to make execution of the lane end. | ||
55 | */ | ||
56 | static inline enum e_cancel_request cancel_test( lua_State* L) | ||
57 | { | ||
58 | Lane* const s = get_lane_from_registry( L); | ||
59 | // 's' is NULL for the original main state (and no-one can cancel that) | ||
60 | return s ? s->cancel_request : CANCEL_NONE; | ||
61 | } | ||
62 | |||
63 | // ################################################################################################ | ||
64 | |||
65 | //--- | ||
66 | // bool = cancel_test() | ||
67 | // | ||
68 | // Available inside the global namespace of lanes | ||
69 | // returns a boolean saying if a cancel request is pending | ||
70 | // | ||
71 | LUAG_FUNC( cancel_test) | ||
72 | { | ||
73 | enum e_cancel_request test = cancel_test( L); | ||
74 | lua_pushboolean( L, test != CANCEL_NONE); | ||
75 | return 1; | ||
76 | } | ||
77 | |||
78 | // ################################################################################################ | ||
79 | // ################################################################################################ | ||
80 | |||
81 | static void cancel_hook( lua_State* L, lua_Debug* ar) | ||
82 | { | ||
83 | (void)ar; | ||
84 | DEBUGSPEW_CODE( fprintf( stderr, "cancel_hook\n")); | ||
85 | if( cancel_test( L) != CANCEL_NONE) | ||
86 | { | ||
87 | lua_sethook( L, NULL, 0, 0); | ||
88 | cancel_error( L); | ||
89 | } | ||
90 | } | ||
91 | |||
92 | // ################################################################################################ | ||
93 | // ################################################################################################ | ||
94 | |||
95 | //--- | ||
96 | // = thread_cancel( lane_ud [,timeout_secs=0.0] [,force_kill_bool=false] ) | ||
97 | // | ||
98 | // The originator thread asking us specifically to cancel the other thread. | ||
99 | // | ||
100 | // 'timeout': <0: wait forever, until the lane is finished | ||
101 | // 0.0: just signal it to cancel, no time waited | ||
102 | // >0: time to wait for the lane to detect cancellation | ||
103 | // | ||
104 | // 'force_kill': if true, and lane does not detect cancellation within timeout, | ||
105 | // it is forcefully killed. Using this with 0.0 timeout means just kill | ||
106 | // (unless the lane is already finished). | ||
107 | // | ||
108 | // Returns: true if the lane was already finished (DONE/ERROR_ST/CANCELLED) or if we | ||
109 | // managed to cancel it. | ||
110 | // false if the cancellation timed out, or a kill was needed. | ||
111 | // | ||
112 | |||
113 | // ################################################################################################ | ||
114 | |||
115 | static cancel_result thread_cancel_soft( Lane* s, double secs_, bool_t wake_lindas_) | ||
116 | { | ||
117 | s->cancel_request = CANCEL_SOFT; // it's now signaled to stop | ||
118 | // negative timeout: we don't want to truly abort the lane, we just want it to react to cancel_test() on its own | ||
119 | if( wake_lindas_) // wake the thread so that execution returns from any pending linda operation if desired | ||
120 | { | ||
121 | SIGNAL_T *waiting_on = s->waiting_on; | ||
122 | if( s->status == WAITING && waiting_on != NULL) | ||
123 | { | ||
124 | SIGNAL_ALL( waiting_on); | ||
125 | } | ||
126 | } | ||
127 | |||
128 | return THREAD_WAIT( &s->thread, secs_, &s->done_signal, &s->done_lock, &s->status) ? CR_Cancelled : CR_Timeout; | ||
129 | } | ||
130 | |||
131 | // ################################################################################################ | ||
132 | |||
133 | static cancel_result thread_cancel_hard( lua_State* L, Lane* s, double secs_, bool_t force_, double waitkill_timeout_) | ||
134 | { | ||
135 | cancel_result result; | ||
136 | |||
137 | s->cancel_request = CANCEL_HARD; // it's now signaled to stop | ||
138 | { | ||
139 | SIGNAL_T *waiting_on = s->waiting_on; | ||
140 | if( s->status == WAITING && waiting_on != NULL) | ||
141 | { | ||
142 | SIGNAL_ALL( waiting_on); | ||
143 | } | ||
144 | } | ||
145 | |||
146 | result = THREAD_WAIT( &s->thread, secs_, &s->done_signal, &s->done_lock, &s->status) ? CR_Cancelled : CR_Timeout; | ||
147 | |||
148 | if( (result == CR_Timeout) && force_) | ||
149 | { | ||
150 | // Killing is asynchronous; we _will_ wait for it to be done at | ||
151 | // GC, to make sure the data structure can be released (alternative | ||
152 | // would be use of "cancellation cleanup handlers" that at least | ||
153 | // PThread seems to have). | ||
154 | // | ||
155 | THREAD_KILL( &s->thread); | ||
156 | #if THREADAPI == THREADAPI_PTHREAD | ||
157 | // pthread: make sure the thread is really stopped! | ||
158 | // note that this may block forever if the lane doesn't call a cancellation point and pthread doesn't honor PTHREAD_CANCEL_ASYNCHRONOUS | ||
159 | result = THREAD_WAIT( &s->thread, waitkill_timeout_, &s->done_signal, &s->done_lock, &s->status) ? CR_Killed : CR_Timeout; | ||
160 | if( result == CR_Timeout) | ||
161 | { | ||
162 | (void) luaL_error( L, "force-killed lane failed to terminate within %f second%s", waitkill_timeout_, waitkill_timeout_ > 1 ? "s" : ""); | ||
163 | } | ||
164 | #else | ||
165 | (void) waitkill_timeout_; // unused | ||
166 | (void) L; // unused | ||
167 | #endif // THREADAPI == THREADAPI_PTHREAD | ||
168 | s->mstatus = KILLED; // mark 'gc' to wait for it | ||
169 | // note that s->status value must remain to whatever it was at the time of the kill | ||
170 | // because we need to know if we can lua_close() the Lua State or not. | ||
171 | result = CR_Killed; | ||
172 | } | ||
173 | return result; | ||
174 | } | ||
175 | |||
176 | // ################################################################################################ | ||
177 | |||
178 | cancel_result thread_cancel( lua_State* L, Lane* s, CancelOp op_, double secs_, bool_t force_, double waitkill_timeout_) | ||
179 | { | ||
180 | // remember that lanes are not transferable: only one thread can cancel a lane, so no multithreading issue here | ||
181 | // We can read 's->status' without locks, but not wait for it (if Posix no PTHREAD_TIMEDJOIN) | ||
182 | if( s->mstatus == KILLED) | ||
183 | { | ||
184 | return CR_Killed; | ||
185 | } | ||
186 | |||
187 | if( s->status >= DONE) | ||
188 | { | ||
189 | // say "ok" by default, including when lane is already done | ||
190 | return CR_Cancelled; | ||
191 | } | ||
192 | |||
193 | // signal the linda the wake up the thread so that it can react to the cancel query | ||
194 | // let us hope we never land here with a pointer on a linda that has been destroyed... | ||
195 | if( op_ == CO_Soft) | ||
196 | { | ||
197 | return thread_cancel_soft( s, secs_, force_); | ||
198 | } | ||
199 | |||
200 | return thread_cancel_hard( L, s, secs_, force_, waitkill_timeout_); | ||
201 | } | ||
202 | |||
203 | // ################################################################################################ | ||
204 | // ################################################################################################ | ||
205 | |||
206 | // > 0: the mask | ||
207 | // = 0: soft | ||
208 | // < 0: hard | ||
209 | static CancelOp which_op( lua_State* L, int idx_) | ||
210 | { | ||
211 | if( lua_type( L, idx_) == LUA_TSTRING) | ||
212 | { | ||
213 | CancelOp op = CO_Invalid; | ||
214 | char const* str = lua_tostring( L, idx_); | ||
215 | if( strcmp( str, "soft") == 0) | ||
216 | { | ||
217 | op = CO_Soft; | ||
218 | } | ||
219 | else if( strcmp( str, "count") == 0) | ||
220 | { | ||
221 | op = CO_Count; | ||
222 | } | ||
223 | else if( strcmp( str, "line") == 0) | ||
224 | { | ||
225 | op = CO_Line; | ||
226 | } | ||
227 | else if( strcmp( str, "call") == 0) | ||
228 | { | ||
229 | op = CO_Call; | ||
230 | } | ||
231 | else if( strcmp( str, "ret") == 0) | ||
232 | { | ||
233 | op = CO_Ret; | ||
234 | } | ||
235 | else if( strcmp( str, "hard") == 0) | ||
236 | { | ||
237 | op = CO_Hard; | ||
238 | } | ||
239 | lua_remove( L, idx_); // argument is processed, remove it | ||
240 | if( op == CO_Invalid) | ||
241 | { | ||
242 | luaL_error( L, "invalid hook option %s", str); | ||
243 | } | ||
244 | return op; | ||
245 | } | ||
246 | return CO_Hard; | ||
247 | } | ||
248 | // ################################################################################################ | ||
249 | |||
250 | // bool[,reason] = lane_h:cancel( [mode, hookcount] [, timeout] [, force [, forcekill_timeout]]) | ||
251 | LUAG_FUNC( thread_cancel) | ||
252 | { | ||
253 | Lane* s = lua_toLane( L, 1); | ||
254 | double secs = 0.0; | ||
255 | CancelOp op = which_op( L, 2); // this removes the op string from the stack | ||
256 | |||
257 | if( op > 0) // hook is requested | ||
258 | { | ||
259 | int hook_count = (int) lua_tointeger( L, 2); | ||
260 | lua_remove( L, 2); // argument is processed, remove it | ||
261 | if( hook_count < 1) | ||
262 | { | ||
263 | return luaL_error( L, "hook count cannot be < 1"); | ||
264 | } | ||
265 | lua_sethook( s->L, cancel_hook, op, hook_count); | ||
266 | } | ||
267 | |||
268 | if( lua_type( L, 2) == LUA_TNUMBER) | ||
269 | { | ||
270 | secs = lua_tonumber( L, 2); | ||
271 | lua_remove( L, 2); // argument is processed, remove it | ||
272 | if( secs < 0.0) | ||
273 | { | ||
274 | return luaL_error( L, "cancel timeout cannot be < 0"); | ||
275 | } | ||
276 | } | ||
277 | |||
278 | { | ||
279 | bool_t force = lua_toboolean( L, 2); // FALSE if nothing there | ||
280 | double forcekill_timeout = luaL_optnumber( L, 3, 0.0); | ||
281 | |||
282 | switch( thread_cancel( L, s, op, secs, force, forcekill_timeout)) | ||
283 | { | ||
284 | case CR_Timeout: | ||
285 | lua_pushboolean( L, 0); | ||
286 | lua_pushstring( L, "timeout"); | ||
287 | return 2; | ||
288 | |||
289 | case CR_Cancelled: | ||
290 | lua_pushboolean( L, 1); | ||
291 | push_thread_status( L, s); | ||
292 | return 2; | ||
293 | |||
294 | case CR_Killed: | ||
295 | lua_pushboolean( L, 1); | ||
296 | push_thread_status( L, s); | ||
297 | return 2; | ||
298 | } | ||
299 | } | ||
300 | // should never happen, only here to prevent the compiler from complaining of "not all control paths returning a value" | ||
301 | return 0; | ||
302 | } | ||
diff --git a/src/cancel.cpp b/src/cancel.cpp new file mode 100644 index 0000000..b3e52b6 --- /dev/null +++ b/src/cancel.cpp | |||
@@ -0,0 +1,282 @@ | |||
1 | /* | ||
2 | -- | ||
3 | -- CANCEL.CPP | ||
4 | -- | ||
5 | -- Lane cancellation support | ||
6 | -- | ||
7 | -- Author: Benoit Germain <bnt.germain@gmail.com> | ||
8 | -- | ||
9 | --[[ | ||
10 | =============================================================================== | ||
11 | |||
12 | Copyright (C) 2011-2024 Benoit Germain <bnt.germain@gmail.com> | ||
13 | |||
14 | Permission is hereby granted, free of charge, to any person obtaining a copy | ||
15 | of this software and associated documentation files (the "Software"), to deal | ||
16 | in the Software without restriction, including without limitation the rights | ||
17 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||
18 | copies of the Software, and to permit persons to whom the Software is | ||
19 | furnished to do so, subject to the following conditions: | ||
20 | |||
21 | The above copyright notice and this permission notice shall be included in | ||
22 | all copies or substantial portions of the Software. | ||
23 | |||
24 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
25 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
26 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
27 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
28 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
29 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | ||
30 | THE SOFTWARE. | ||
31 | |||
32 | =============================================================================== | ||
33 | ]]-- | ||
34 | */ | ||
35 | |||
36 | #include "cancel.h" | ||
37 | |||
38 | #include "lanes_private.h" | ||
39 | #include "threading.h" | ||
40 | #include "tools.h" | ||
41 | |||
42 | // ################################################################################################ | ||
43 | // ################################################################################################ | ||
44 | |||
45 | /* | ||
46 | * Check if the thread in question ('L') has been signalled for cancel. | ||
47 | * | ||
48 | * Called by cancellation hooks and/or pending Linda operations (because then | ||
49 | * the check won't affect performance). | ||
50 | * | ||
51 | * Returns CANCEL_SOFT/HARD if any locks are to be exited, and 'raise_cancel_error()' called, | ||
52 | * to make execution of the lane end. | ||
53 | */ | ||
54 | [[nodiscard]] static inline CancelRequest cancel_test(lua_State* L) | ||
55 | { | ||
56 | Lane* const lane{ LANE_POINTER_REGKEY.readLightUserDataValue<Lane>(L) }; | ||
57 | // 'lane' is nullptr for the original main state (and no-one can cancel that) | ||
58 | return lane ? lane->cancel_request : CancelRequest::None; | ||
59 | } | ||
60 | |||
61 | // ################################################################################################ | ||
62 | |||
63 | //--- | ||
64 | // bool = cancel_test() | ||
65 | // | ||
66 | // Available inside the global namespace of lanes | ||
67 | // returns a boolean saying if a cancel request is pending | ||
68 | // | ||
69 | LUAG_FUNC( cancel_test) | ||
70 | { | ||
71 | CancelRequest test{ cancel_test(L) }; | ||
72 | lua_pushboolean(L, test != CancelRequest::None); | ||
73 | return 1; | ||
74 | } | ||
75 | |||
76 | // ################################################################################################ | ||
77 | // ################################################################################################ | ||
78 | |||
79 | [[nodiscard]] static void cancel_hook(lua_State* L, [[maybe_unused]] lua_Debug* ar) | ||
80 | { | ||
81 | DEBUGSPEW_CODE(fprintf(stderr, "cancel_hook\n")); | ||
82 | if (cancel_test(L) != CancelRequest::None) | ||
83 | { | ||
84 | lua_sethook(L, nullptr, 0, 0); | ||
85 | raise_cancel_error(L); | ||
86 | } | ||
87 | } | ||
88 | |||
89 | // ################################################################################################ | ||
90 | // ################################################################################################ | ||
91 | |||
92 | //--- | ||
93 | // = thread_cancel( lane_ud [,timeout_secs=0.0] [,wake_lindas_bool=false] ) | ||
94 | // | ||
95 | // The originator thread asking us specifically to cancel the other thread. | ||
96 | // | ||
97 | // 'timeout': <0: wait forever, until the lane is finished | ||
98 | // 0.0: just signal it to cancel, no time waited | ||
99 | // >0: time to wait for the lane to detect cancellation | ||
100 | // | ||
101 | // 'wake_lindas_bool': if true, signal any linda the thread is waiting on | ||
102 | // instead of waiting for its timeout (if any) | ||
103 | // | ||
104 | // Returns: true if the lane was already finished (Done/Error/Cancelled) or if we | ||
105 | // managed to cancel it. | ||
106 | // false if the cancellation timed out, or a kill was needed. | ||
107 | // | ||
108 | |||
109 | // ################################################################################################ | ||
110 | |||
111 | [[nodiscard]] static CancelResult thread_cancel_soft(Lane* lane_, lua_Duration duration_, bool wake_lane_) | ||
112 | { | ||
113 | lane_->cancel_request = CancelRequest::Soft; // it's now signaled to stop | ||
114 | // negative timeout: we don't want to truly abort the lane, we just want it to react to cancel_test() on its own | ||
115 | if (wake_lane_) // wake the thread so that execution returns from any pending linda operation if desired | ||
116 | { | ||
117 | std::condition_variable* const waiting_on{ lane_->m_waiting_on }; | ||
118 | if (lane_->m_status == Lane::Waiting && waiting_on != nullptr) | ||
119 | { | ||
120 | waiting_on->notify_all(); | ||
121 | } | ||
122 | } | ||
123 | |||
124 | return lane_->waitForCompletion(duration_) ? CancelResult::Cancelled : CancelResult::Timeout; | ||
125 | } | ||
126 | |||
127 | // ################################################################################################ | ||
128 | |||
129 | [[nodiscard]] static CancelResult thread_cancel_hard(Lane* lane_, lua_Duration duration_, bool wake_lane_) | ||
130 | { | ||
131 | lane_->cancel_request = CancelRequest::Hard; // it's now signaled to stop | ||
132 | //lane_->m_thread.get_stop_source().request_stop(); | ||
133 | if (wake_lane_) // wake the thread so that execution returns from any pending linda operation if desired | ||
134 | { | ||
135 | std::condition_variable* waiting_on = lane_->m_waiting_on; | ||
136 | if (lane_->m_status == Lane::Waiting && waiting_on != nullptr) | ||
137 | { | ||
138 | waiting_on->notify_all(); | ||
139 | } | ||
140 | } | ||
141 | |||
142 | CancelResult result{ lane_->waitForCompletion(duration_) ? CancelResult::Cancelled : CancelResult::Timeout }; | ||
143 | return result; | ||
144 | } | ||
145 | |||
146 | // ################################################################################################ | ||
147 | |||
148 | CancelResult thread_cancel(Lane* lane_, CancelOp op_, int hook_count_, lua_Duration duration_, bool wake_lane_) | ||
149 | { | ||
150 | // remember that lanes are not transferable: only one thread can cancel a lane, so no multithreading issue here | ||
151 | // We can read 'lane_->status' without locks, but not wait for it (if Posix no PTHREAD_TIMEDJOIN) | ||
152 | if (lane_->m_status >= Lane::Done) | ||
153 | { | ||
154 | // say "ok" by default, including when lane is already done | ||
155 | return CancelResult::Cancelled; | ||
156 | } | ||
157 | |||
158 | // signal the linda the wake up the thread so that it can react to the cancel query | ||
159 | // let us hope we never land here with a pointer on a linda that has been destroyed... | ||
160 | if (op_ == CancelOp::Soft) | ||
161 | { | ||
162 | return thread_cancel_soft(lane_, duration_, wake_lane_); | ||
163 | } | ||
164 | else if (static_cast<int>(op_) > static_cast<int>(CancelOp::Soft)) | ||
165 | { | ||
166 | lua_sethook(lane_->L, cancel_hook, static_cast<int>(op_), hook_count_); | ||
167 | } | ||
168 | |||
169 | return thread_cancel_hard(lane_, duration_, wake_lane_); | ||
170 | } | ||
171 | |||
172 | // ################################################################################################ | ||
173 | // ################################################################################################ | ||
174 | |||
175 | CancelOp which_cancel_op(char const* op_string_) | ||
176 | { | ||
177 | CancelOp op{ CancelOp::Invalid }; | ||
178 | if (strcmp(op_string_, "hard") == 0) | ||
179 | { | ||
180 | op = CancelOp::Hard; | ||
181 | } | ||
182 | else if (strcmp(op_string_, "soft") == 0) | ||
183 | { | ||
184 | op = CancelOp::Soft; | ||
185 | } | ||
186 | else if (strcmp(op_string_, "call") == 0) | ||
187 | { | ||
188 | op = CancelOp::MaskCall; | ||
189 | } | ||
190 | else if (strcmp(op_string_, "ret") == 0) | ||
191 | { | ||
192 | op = CancelOp::MaskRet; | ||
193 | } | ||
194 | else if (strcmp(op_string_, "line") == 0) | ||
195 | { | ||
196 | op = CancelOp::MaskLine; | ||
197 | } | ||
198 | else if (strcmp(op_string_, "count") == 0) | ||
199 | { | ||
200 | op = CancelOp::MaskCount; | ||
201 | } | ||
202 | return op; | ||
203 | } | ||
204 | |||
205 | // ################################################################################################ | ||
206 | |||
207 | [[nodiscard]] static CancelOp which_cancel_op(lua_State* L, int idx_) | ||
208 | { | ||
209 | if (lua_type(L, idx_) == LUA_TSTRING) | ||
210 | { | ||
211 | char const* const str{ lua_tostring(L, idx_) }; | ||
212 | CancelOp op{ which_cancel_op(str) }; | ||
213 | lua_remove(L, idx_); // argument is processed, remove it | ||
214 | if (op == CancelOp::Invalid) | ||
215 | { | ||
216 | luaL_error(L, "invalid hook option %s", str); // doesn't return | ||
217 | } | ||
218 | return op; | ||
219 | } | ||
220 | return CancelOp::Hard; | ||
221 | } | ||
222 | |||
223 | // ################################################################################################ | ||
224 | |||
225 | // bool[,reason] = lane_h:cancel( [mode, hookcount] [, timeout] [, wake_lindas]) | ||
226 | LUAG_FUNC(thread_cancel) | ||
227 | { | ||
228 | Lane* const lane{ lua_toLane(L, 1) }; | ||
229 | CancelOp const op{ which_cancel_op(L, 2) }; // this removes the op string from the stack | ||
230 | |||
231 | int hook_count{ 0 }; | ||
232 | if (static_cast<int>(op) > static_cast<int>(CancelOp::Soft)) // hook is requested | ||
233 | { | ||
234 | hook_count = static_cast<int>(luaL_checkinteger(L, 2)); | ||
235 | lua_remove(L, 2); // argument is processed, remove it | ||
236 | if (hook_count < 1) | ||
237 | { | ||
238 | return luaL_error(L, "hook count cannot be < 1"); | ||
239 | } | ||
240 | } | ||
241 | |||
242 | lua_Duration wait_timeout{ 0.0 }; | ||
243 | if (lua_type(L, 2) == LUA_TNUMBER) | ||
244 | { | ||
245 | wait_timeout = lua_Duration{ lua_tonumber(L, 2) }; | ||
246 | lua_remove(L, 2); // argument is processed, remove it | ||
247 | if (wait_timeout.count() < 0.0) | ||
248 | { | ||
249 | return luaL_error(L, "cancel timeout cannot be < 0"); | ||
250 | } | ||
251 | } | ||
252 | // we wake by default in "hard" mode (remember that hook is hard too), but this can be turned off if desired | ||
253 | bool wake_lane{ op != CancelOp::Soft }; | ||
254 | if (lua_gettop(L) >= 2) | ||
255 | { | ||
256 | if (!lua_isboolean(L, 2)) | ||
257 | { | ||
258 | return luaL_error(L, "wake_lindas parameter is not a boolean"); | ||
259 | } | ||
260 | wake_lane = lua_toboolean(L, 2); | ||
261 | lua_remove(L, 2); // argument is processed, remove it | ||
262 | } | ||
263 | STACK_CHECK_START_REL(L, 0); | ||
264 | switch (thread_cancel(lane, op, hook_count, wait_timeout, wake_lane)) | ||
265 | { | ||
266 | default: // should never happen unless we added a case and forgot to handle it | ||
267 | ASSERT_L(false); | ||
268 | break; | ||
269 | |||
270 | case CancelResult::Timeout: | ||
271 | lua_pushboolean(L, 0); // false | ||
272 | lua_pushstring(L, "timeout"); // false "timeout" | ||
273 | break; | ||
274 | |||
275 | case CancelResult::Cancelled: | ||
276 | lua_pushboolean(L, 1); // true | ||
277 | push_thread_status(L, lane); // true status | ||
278 | break; | ||
279 | } | ||
280 | STACK_CHECK(L, 2); | ||
281 | return 2; | ||
282 | } | ||
diff --git a/src/cancel.h b/src/cancel.h index b25d9f9..060edb3 100644 --- a/src/cancel.h +++ b/src/cancel.h | |||
@@ -1,63 +1,68 @@ | |||
1 | #if !defined( __LANES_CANCEL_H__) | 1 | #pragma once |
2 | #define __LANES_CANCEL_H__ 1 | ||
3 | 2 | ||
3 | #ifdef __cplusplus | ||
4 | extern "C" { | ||
5 | #endif // __cplusplus | ||
4 | #include "lua.h" | 6 | #include "lua.h" |
5 | #include "lualib.h" | 7 | #include "lualib.h" |
6 | #include "lauxlib.h" | 8 | #include "lauxlib.h" |
9 | #ifdef __cplusplus | ||
10 | } | ||
11 | #endif // __cplusplus | ||
7 | 12 | ||
8 | #include "uniquekey.h" | 13 | #include "uniquekey.h" |
9 | #include "macros_and_utils.h" | 14 | #include "macros_and_utils.h" |
10 | 15 | ||
16 | #include <chrono> | ||
17 | |||
11 | // ################################################################################################ | 18 | // ################################################################################################ |
12 | 19 | ||
13 | typedef struct s_Lane Lane; // forward | 20 | class Lane; // forward |
14 | 21 | ||
15 | /* | 22 | /* |
16 | * Lane cancellation request modes | 23 | * Lane cancellation request modes |
17 | */ | 24 | */ |
18 | enum e_cancel_request | 25 | enum class CancelRequest |
19 | { | 26 | { |
20 | CANCEL_NONE, // no pending cancel request | 27 | None, // no pending cancel request |
21 | CANCEL_SOFT, // user wants the lane to cancel itself manually on cancel_test() | 28 | Soft, // user wants the lane to cancel itself manually on cancel_test() |
22 | CANCEL_HARD // user wants the lane to be interrupted (meaning code won't return from those functions) from inside linda:send/receive calls | 29 | Hard // user wants the lane to be interrupted (meaning code won't return from those functions) from inside linda:send/receive calls |
23 | }; | 30 | }; |
24 | 31 | ||
25 | typedef enum | 32 | enum class CancelResult |
26 | { | 33 | { |
27 | CR_Timeout, | 34 | Timeout, |
28 | CR_Cancelled, | 35 | Cancelled |
29 | CR_Killed | 36 | }; |
30 | } cancel_result; | ||
31 | 37 | ||
32 | typedef enum | 38 | enum class CancelOp |
33 | { | 39 | { |
34 | CO_Invalid = -2, | 40 | Invalid = -2, |
35 | CO_Hard = -1, | 41 | Hard = -1, |
36 | CO_Soft = 0, | 42 | Soft = 0, |
37 | CO_Count = LUA_MASKCOUNT, | 43 | MaskCall = LUA_MASKCALL, |
38 | CO_Line = LUA_MASKLINE, | 44 | MaskRet = LUA_MASKRET, |
39 | CO_Call = LUA_MASKCALL, | 45 | MaskLine = LUA_MASKLINE, |
40 | CO_Ret = LUA_MASKRET, | 46 | MaskCount = LUA_MASKCOUNT, |
41 | } CancelOp; | 47 | }; |
42 | 48 | ||
43 | // crc64/we of string "CANCEL_ERROR" generated at http://www.nitrxgen.net/hashgen/ | 49 | // crc64/we of string "CANCEL_ERROR" generated at http://www.nitrxgen.net/hashgen/ |
44 | static DECLARE_CONST_UNIQUE_KEY(CANCEL_ERROR, 0xe97d41626cc97577); // 'cancel_error' sentinel | 50 | static constexpr UniqueKey CANCEL_ERROR{ 0xe97d41626cc97577ull }; // 'raise_cancel_error' sentinel |
45 | 51 | ||
46 | cancel_result thread_cancel( lua_State* L, Lane* s, CancelOp op_, double secs_, bool_t force_, double waitkill_timeout_); | 52 | [[nodiscard]] CancelOp which_cancel_op(char const* op_string_); |
53 | [[nodiscard]] CancelResult thread_cancel(Lane* lane_, CancelOp op_, int hook_count_, lua_Duration secs_, bool wake_lindas_); | ||
47 | 54 | ||
48 | static inline int cancel_error( lua_State* L) | 55 | [[noreturn]] static inline void raise_cancel_error(lua_State* L) |
49 | { | 56 | { |
50 | STACK_GROW( L, 1); | 57 | STACK_GROW(L, 1); |
51 | push_unique_key( L, CANCEL_ERROR); // special error value | 58 | CANCEL_ERROR.pushKey(L); // special error value |
52 | return lua_error( L); // doesn't return | 59 | raise_lua_error(L); // doesn't return |
53 | } | 60 | } |
54 | 61 | ||
55 | // ################################################################################################ | 62 | // ################################################################################################ |
56 | // ################################################################################################ | 63 | // ################################################################################################ |
57 | 64 | ||
58 | LUAG_FUNC( cancel_test); | 65 | LUAG_FUNC(cancel_test); |
59 | LUAG_FUNC( thread_cancel); | 66 | LUAG_FUNC(thread_cancel); |
60 | 67 | ||
61 | // ################################################################################################ | 68 | // ################################################################################################ |
62 | |||
63 | #endif // __LANES_CANCEL_H__ | ||
diff --git a/src/compat.c b/src/compat.cpp index bc39d4c..73d0f6b 100644 --- a/src/compat.c +++ b/src/compat.cpp | |||
@@ -14,7 +14,8 @@ | |||
14 | #if LUA_VERSION_NUM == 501 | 14 | #if LUA_VERSION_NUM == 501 |
15 | // ################################################################################################ | 15 | // ################################################################################################ |
16 | // ################################################################################################ | 16 | // ################################################################################################ |
17 | static int luaL_getsubtable (lua_State *L, int idx, const char *fname) | 17 | |
18 | static int luaL_getsubtable(lua_State* L, int idx, const char* fname) | ||
18 | { | 19 | { |
19 | lua_getfield(L, idx, fname); | 20 | lua_getfield(L, idx, fname); |
20 | if (lua_istable(L, -1)) | 21 | if (lua_istable(L, -1)) |
@@ -32,7 +33,7 @@ static int luaL_getsubtable (lua_State *L, int idx, const char *fname) | |||
32 | 33 | ||
33 | // ################################################################################################ | 34 | // ################################################################################################ |
34 | 35 | ||
35 | void luaL_requiref (lua_State *L, const char *modname, lua_CFunction openf, int glb) | 36 | void luaL_requiref(lua_State *L, const char *modname, lua_CFunction openf, int glb) |
36 | { | 37 | { |
37 | lua_pushcfunction(L, openf); | 38 | lua_pushcfunction(L, openf); |
38 | lua_pushstring(L, modname); /* argument to open function */ | 39 | lua_pushstring(L, modname); /* argument to open function */ |
@@ -58,21 +59,21 @@ void luaL_requiref (lua_State *L, const char *modname, lua_CFunction openf, int | |||
58 | void* lua_newuserdatauv( lua_State* L, size_t sz, int nuvalue) | 59 | void* lua_newuserdatauv( lua_State* L, size_t sz, int nuvalue) |
59 | { | 60 | { |
60 | ASSERT_L( nuvalue <= 1); | 61 | ASSERT_L( nuvalue <= 1); |
61 | return lua_newuserdata( L, sz); | 62 | return lua_newuserdata(L, sz); |
62 | } | 63 | } |
63 | 64 | ||
64 | // ################################################################################################ | 65 | // ################################################################################################ |
65 | 66 | ||
66 | // push on stack uservalue #n of full userdata at idx | 67 | // push on stack uservalue #n of full userdata at idx |
67 | int lua_getiuservalue( lua_State* L, int idx, int n) | 68 | int lua_getiuservalue(lua_State* L, int idx, int n) |
68 | { | 69 | { |
69 | // full userdata can have only 1 uservalue before 5.4 | 70 | // full userdata can have only 1 uservalue before 5.4 |
70 | if( n > 1) | 71 | if( n > 1) |
71 | { | 72 | { |
72 | lua_pushnil( L); | 73 | lua_pushnil(L); |
73 | return LUA_TNONE; | 74 | return LUA_TNONE; |
74 | } | 75 | } |
75 | lua_getuservalue( L, idx); | 76 | lua_getuservalue(L, idx); |
76 | 77 | ||
77 | #if LUA_VERSION_NUM == 501 | 78 | #if LUA_VERSION_NUM == 501 |
78 | /* default environment is not a nil (see lua_getfenv) */ | 79 | /* default environment is not a nil (see lua_getfenv) */ |
@@ -80,33 +81,33 @@ int lua_getiuservalue( lua_State* L, int idx, int n) | |||
80 | if (lua_rawequal(L, -2, -1) || lua_rawequal(L, -2, LUA_GLOBALSINDEX)) | 81 | if (lua_rawequal(L, -2, -1) || lua_rawequal(L, -2, LUA_GLOBALSINDEX)) |
81 | { | 82 | { |
82 | lua_pop(L, 2); | 83 | lua_pop(L, 2); |
83 | lua_pushnil( L); | 84 | lua_pushnil(L); |
84 | 85 | ||
85 | return LUA_TNONE; | 86 | return LUA_TNONE; |
86 | } | 87 | } |
87 | lua_pop(L, 1); /* remove package */ | 88 | lua_pop(L, 1); /* remove package */ |
88 | #endif | 89 | #endif |
89 | 90 | ||
90 | return lua_type( L, -1); | 91 | return lua_type(L, -1); |
91 | } | 92 | } |
92 | 93 | ||
93 | // ################################################################################################ | 94 | // ################################################################################################ |
94 | 95 | ||
95 | // Pops a value from the stack and sets it as the new n-th user value associated to the full userdata at the given index. | 96 | // Pops a value from the stack and sets it as the new n-th user value associated to the full userdata at the given index. |
96 | // Returns 0 if the userdata does not have that value. | 97 | // Returns 0 if the userdata does not have that value. |
97 | int lua_setiuservalue( lua_State* L, int idx, int n) | 98 | int lua_setiuservalue(lua_State* L, int idx, int n) |
98 | { | 99 | { |
99 | if( n > 1 | 100 | if( n > 1 |
100 | #if LUA_VERSION_NUM == 501 | 101 | #if LUA_VERSION_NUM == 501 |
101 | || lua_type( L, -1) != LUA_TTABLE | 102 | || lua_type(L, -1) != LUA_TTABLE |
102 | #endif | 103 | #endif |
103 | ) | 104 | ) |
104 | { | 105 | { |
105 | lua_pop( L, 1); | 106 | lua_pop(L, 1); |
106 | return 0; | 107 | return 0; |
107 | } | 108 | } |
108 | 109 | ||
109 | (void) lua_setuservalue( L, idx); | 110 | lua_setuservalue(L, idx); |
110 | return 1; // I guess anything non-0 is ok | 111 | return 1; // I guess anything non-0 is ok |
111 | } | 112 | } |
112 | 113 | ||
diff --git a/src/compat.h b/src/compat.h index fbcbee1..8d10e78 100644 --- a/src/compat.h +++ b/src/compat.h | |||
@@ -1,9 +1,14 @@ | |||
1 | #if !defined( __COMPAT_H__) | 1 | #pragma once |
2 | #define __COMPAT_H__ 1 | ||
3 | 2 | ||
3 | #ifdef __cplusplus | ||
4 | extern "C" { | ||
5 | #endif // __cplusplus | ||
4 | #include "lua.h" | 6 | #include "lua.h" |
5 | #include "lualib.h" | 7 | #include "lualib.h" |
6 | #include "lauxlib.h" | 8 | #include "lauxlib.h" |
9 | #ifdef __cplusplus | ||
10 | } | ||
11 | #endif // __cplusplus | ||
7 | 12 | ||
8 | // try to detect if we are building against LuaJIT or MoonJIT | 13 | // try to detect if we are building against LuaJIT or MoonJIT |
9 | #if defined(LUA_JITLIBNAME) | 14 | #if defined(LUA_JITLIBNAME) |
@@ -31,7 +36,7 @@ | |||
31 | #define lua_setuservalue lua_setfenv | 36 | #define lua_setuservalue lua_setfenv |
32 | #define lua_getuservalue lua_getfenv | 37 | #define lua_getuservalue lua_getfenv |
33 | #define lua_rawlen lua_objlen | 38 | #define lua_rawlen lua_objlen |
34 | #define luaG_registerlibfuncs( L, _funcs) luaL_register( L, NULL, _funcs) | 39 | #define luaG_registerlibfuncs(L, _funcs) luaL_register(L, nullptr, _funcs) |
35 | #define LUA_OK 0 | 40 | #define LUA_OK 0 |
36 | #define LUA_ERRGCMM 666 // doesn't exist in Lua 5.1, we don't care about the actual value | 41 | #define LUA_ERRGCMM 666 // doesn't exist in Lua 5.1, we don't care about the actual value |
37 | void luaL_requiref (lua_State* L, const char* modname, lua_CFunction openf, int glb); // implementation copied from Lua 5.2 sources | 42 | void luaL_requiref (lua_State* L, const char* modname, lua_CFunction openf, int glb); // implementation copied from Lua 5.2 sources |
@@ -94,4 +99,29 @@ int lua_setiuservalue( lua_State* L, int idx, int n); | |||
94 | 99 | ||
95 | #endif // LUA_VERSION_NUM == 504 | 100 | #endif // LUA_VERSION_NUM == 504 |
96 | 101 | ||
97 | #endif // __COMPAT_H__ | 102 | // ################################################################################################# |
103 | |||
104 | // a wrapper over lua types to see them easier in a debugger | ||
105 | enum class LuaType | ||
106 | { | ||
107 | NONE = LUA_TNONE, | ||
108 | NIL = LUA_TNIL, | ||
109 | BOOLEAN = LUA_TBOOLEAN, | ||
110 | LIGHTUSERDATA = LUA_TLIGHTUSERDATA, | ||
111 | NUMBER = LUA_TNUMBER, | ||
112 | STRING = LUA_TSTRING, | ||
113 | TABLE = LUA_TTABLE, | ||
114 | FUNCTION = LUA_TFUNCTION, | ||
115 | USERDATA = LUA_TUSERDATA, | ||
116 | THREAD = LUA_TTHREAD, | ||
117 | CDATA = 10 // LuaJIT CDATA | ||
118 | }; | ||
119 | |||
120 | inline LuaType lua_type_as_enum(lua_State* L, int idx_) | ||
121 | { | ||
122 | return static_cast<LuaType>(lua_type(L, idx_)); | ||
123 | } | ||
124 | inline char const* lua_typename(lua_State* L, LuaType t_) | ||
125 | { | ||
126 | return lua_typename(L, static_cast<int>(t_)); | ||
127 | } | ||
diff --git a/src/deep.c b/src/deep.cpp index a1f078a..d0b8123 100644 --- a/src/deep.c +++ b/src/deep.cpp | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * DEEP.C Copyright (c) 2017, Benoit Germain | 2 | * DEEP.CPP Copyright (c) 2024, Benoit Germain |
3 | * | 3 | * |
4 | * Deep userdata support, separate in its own source file to help integration | 4 | * Deep userdata support, separate in its own source file to help integration |
5 | * without enforcing a Lanes dependency | 5 | * without enforcing a Lanes dependency |
@@ -9,7 +9,7 @@ | |||
9 | =============================================================================== | 9 | =============================================================================== |
10 | 10 | ||
11 | Copyright (C) 2002-10 Asko Kauppi <akauppi@gmail.com> | 11 | Copyright (C) 2002-10 Asko Kauppi <akauppi@gmail.com> |
12 | 2011-17 Benoit Germain <bnt.germain@gmail.com> | 12 | 2011-24 Benoit Germain <bnt.germain@gmail.com> |
13 | 13 | ||
14 | Permission is hereby granted, free of charge, to any person obtaining a copy | 14 | Permission is hereby granted, free of charge, to any person obtaining a copy |
15 | of this software and associated documentation files (the "Software"), to deal | 15 | of this software and associated documentation files (the "Software"), to deal |
@@ -32,20 +32,15 @@ THE SOFTWARE. | |||
32 | =============================================================================== | 32 | =============================================================================== |
33 | */ | 33 | */ |
34 | 34 | ||
35 | #include <stdio.h> | 35 | #include "deep.h" |
36 | #include <assert.h> | ||
37 | #include <string.h> | ||
38 | #include <ctype.h> | ||
39 | #include <stdlib.h> | ||
40 | #if !defined(__APPLE__) | ||
41 | #include <malloc.h> | ||
42 | #endif | ||
43 | 36 | ||
44 | #include "compat.h" | 37 | #include "compat.h" |
45 | #include "deep.h" | ||
46 | #include "tools.h" | 38 | #include "tools.h" |
47 | #include "universe.h" | ||
48 | #include "uniquekey.h" | 39 | #include "uniquekey.h" |
40 | #include "universe.h" | ||
41 | |||
42 | #include <bit> | ||
43 | #include <cassert> | ||
49 | 44 | ||
50 | /*-- Metatable copying --*/ | 45 | /*-- Metatable copying --*/ |
51 | 46 | ||
@@ -59,61 +54,65 @@ THE SOFTWARE. | |||
59 | * idfunc -> metatable | 54 | * idfunc -> metatable |
60 | */ | 55 | */ |
61 | // crc64/we of string "DEEP_LOOKUP_KEY" generated at http://www.nitrxgen.net/hashgen/ | 56 | // crc64/we of string "DEEP_LOOKUP_KEY" generated at http://www.nitrxgen.net/hashgen/ |
62 | static DECLARE_CONST_UNIQUE_KEY( DEEP_LOOKUP_KEY, 0x9fb9b4f3f633d83d); | 57 | static constexpr UniqueKey DEEP_LOOKUP_KEY{ 0x9fb9b4f3f633d83dull }; |
63 | 58 | ||
64 | /* | 59 | /* |
65 | * The deep proxy cache is a weak valued table listing all deep UD proxies indexed by the deep UD that they are proxying | 60 | * The deep proxy cache is a weak valued table listing all deep UD proxies indexed by the deep UD that they are proxying |
66 | * crc64/we of string "DEEP_PROXY_CACHE_KEY" generated at http://www.nitrxgen.net/hashgen/ | 61 | * crc64/we of string "DEEP_PROXY_CACHE_KEY" generated at http://www.nitrxgen.net/hashgen/ |
67 | */ | 62 | */ |
68 | static DECLARE_CONST_UNIQUE_KEY( DEEP_PROXY_CACHE_KEY, 0x05773d6fc26be106); | 63 | static constexpr UniqueKey DEEP_PROXY_CACHE_KEY{ 0x05773d6fc26be106ull }; |
69 | 64 | ||
70 | /* | 65 | /* |
71 | * Sets up [-1]<->[-2] two-way lookups, and ensures the lookup table exists. | 66 | * Sets up [-1]<->[-2] two-way lookups, and ensures the lookup table exists. |
72 | * Pops the both values off the stack. | 67 | * Pops the both values off the stack. |
73 | */ | 68 | */ |
74 | static void set_deep_lookup( lua_State* L) | 69 | static void set_deep_lookup(lua_State* L) |
75 | { | 70 | { |
76 | STACK_GROW( L, 3); | 71 | STACK_GROW( L, 3); |
77 | STACK_CHECK( L, 2); // a b | 72 | STACK_CHECK_START_REL(L, 2); // a b |
78 | push_registry_subtable( L, DEEP_LOOKUP_KEY); // a b {} | 73 | push_registry_subtable( L, DEEP_LOOKUP_KEY); // a b {} |
79 | STACK_MID( L, 3); | 74 | STACK_CHECK( L, 3); |
80 | lua_insert( L, -3); // {} a b | 75 | lua_insert( L, -3); // {} a b |
81 | lua_pushvalue( L, -1); // {} a b b | 76 | lua_pushvalue( L, -1); // {} a b b |
82 | lua_pushvalue( L,-3); // {} a b b a | 77 | lua_pushvalue( L,-3); // {} a b b a |
83 | lua_rawset( L, -5); // {} a b | 78 | lua_rawset( L, -5); // {} a b |
84 | lua_rawset( L, -3); // {} | 79 | lua_rawset( L, -3); // {} |
85 | lua_pop( L, 1); // | 80 | lua_pop( L, 1); // |
86 | STACK_END( L, 0); | 81 | STACK_CHECK( L, 0); |
87 | } | 82 | } |
88 | 83 | ||
84 | // ################################################################################################ | ||
85 | |||
89 | /* | 86 | /* |
90 | * Pops the key (metatable or idfunc) off the stack, and replaces with the | 87 | * Pops the key (metatable or idfunc) off the stack, and replaces with the |
91 | * deep lookup value (idfunc/metatable/nil). | 88 | * deep lookup value (idfunc/metatable/nil). |
92 | */ | 89 | */ |
93 | static void get_deep_lookup( lua_State* L) | 90 | static void get_deep_lookup(lua_State* L) |
94 | { | 91 | { |
95 | STACK_GROW( L, 1); | 92 | STACK_GROW( L, 1); |
96 | STACK_CHECK( L, 1); // a | 93 | STACK_CHECK_START_REL(L, 1); // a |
97 | REGISTRY_GET( L, DEEP_LOOKUP_KEY); // a {} | 94 | DEEP_LOOKUP_KEY.pushValue(L); // a {} |
98 | if( !lua_isnil( L, -1)) | 95 | if( !lua_isnil( L, -1)) |
99 | { | 96 | { |
100 | lua_insert( L, -2); // {} a | 97 | lua_insert( L, -2); // {} a |
101 | lua_rawget( L, -2); // {} b | 98 | lua_rawget( L, -2); // {} b |
102 | } | 99 | } |
103 | lua_remove( L, -2); // a|b | 100 | lua_remove( L, -2); // a|b |
104 | STACK_END( L, 1); | 101 | STACK_CHECK( L, 1); |
105 | } | 102 | } |
106 | 103 | ||
104 | // ################################################################################################ | ||
105 | |||
107 | /* | 106 | /* |
108 | * Return the registered ID function for 'index' (deep userdata proxy), | 107 | * Return the registered ID function for 'index' (deep userdata proxy), |
109 | * or NULL if 'index' is not a deep userdata proxy. | 108 | * or nullptr if 'index' is not a deep userdata proxy. |
110 | */ | 109 | */ |
111 | static inline luaG_IdFunction get_idfunc( lua_State* L, int index, LookupMode mode_) | 110 | [[nodiscard]] static inline luaG_IdFunction get_idfunc(lua_State* L, int index, LookupMode mode_) |
112 | { | 111 | { |
113 | // when looking inside a keeper, we are 100% sure the object is a deep userdata | 112 | // when looking inside a keeper, we are 100% sure the object is a deep userdata |
114 | if( mode_ == eLM_FromKeeper) | 113 | if (mode_ == LookupMode::FromKeeper) |
115 | { | 114 | { |
116 | DeepPrelude** proxy = (DeepPrelude**) lua_touserdata( L, index); | 115 | DeepPrelude** const proxy{ lua_tofulluserdata<DeepPrelude*>(L, index) }; |
117 | // we can (and must) cast and fetch the internally stored idfunc | 116 | // we can (and must) cast and fetch the internally stored idfunc |
118 | return (*proxy)->idfunc; | 117 | return (*proxy)->idfunc; |
119 | } | 118 | } |
@@ -122,34 +121,38 @@ static inline luaG_IdFunction get_idfunc( lua_State* L, int index, LookupMode mo | |||
122 | // essentially we are making sure that the metatable of the object we want to copy is stored in our metatable/idfunc database | 121 | // essentially we are making sure that the metatable of the object we want to copy is stored in our metatable/idfunc database |
123 | // it is the only way to ensure that the userdata is indeed a deep userdata! | 122 | // it is the only way to ensure that the userdata is indeed a deep userdata! |
124 | // of course, we could just trust the caller, but we won't | 123 | // of course, we could just trust the caller, but we won't |
125 | luaG_IdFunction ret; | ||
126 | STACK_GROW( L, 1); | 124 | STACK_GROW( L, 1); |
127 | STACK_CHECK( L, 0); | 125 | STACK_CHECK_START_REL(L, 0); |
128 | 126 | ||
129 | if( !lua_getmetatable( L, index)) // deep ... metatable? | 127 | if( !lua_getmetatable( L, index)) // deep ... metatable? |
130 | { | 128 | { |
131 | return NULL; // no metatable: can't be a deep userdata object! | 129 | return nullptr; // no metatable: can't be a deep userdata object! |
132 | } | 130 | } |
133 | 131 | ||
134 | // replace metatable with the idfunc pointer, if it is actually a deep userdata | 132 | // replace metatable with the idfunc pointer, if it is actually a deep userdata |
135 | get_deep_lookup( L); // deep ... idfunc|nil | 133 | get_deep_lookup( L); // deep ... idfunc|nil |
136 | 134 | ||
137 | ret = (luaG_IdFunction) lua_touserdata( L, -1); // NULL if not a userdata | 135 | luaG_IdFunction const ret{ *lua_tolightuserdata<luaG_IdFunction>(L, -1) }; // nullptr if not a userdata |
138 | lua_pop( L, 1); | 136 | lua_pop( L, 1); |
139 | STACK_END( L, 0); | 137 | STACK_CHECK( L, 0); |
140 | return ret; | 138 | return ret; |
141 | } | 139 | } |
142 | } | 140 | } |
143 | 141 | ||
142 | // ################################################################################################ | ||
144 | 143 | ||
145 | void free_deep_prelude( lua_State* L, DeepPrelude* prelude_) | 144 | void free_deep_prelude(lua_State* L, DeepPrelude* prelude_) |
146 | { | 145 | { |
146 | ASSERT_L(prelude_->idfunc); | ||
147 | STACK_CHECK_START_REL(L, 0); | ||
147 | // Call 'idfunc( "delete", deep_ptr )' to make deep cleanup | 148 | // Call 'idfunc( "delete", deep_ptr )' to make deep cleanup |
148 | lua_pushlightuserdata( L, prelude_); | 149 | lua_pushlightuserdata( L, prelude_); |
149 | ASSERT_L( prelude_->idfunc); | 150 | prelude_->idfunc( L, DeepOp::Delete); |
150 | prelude_->idfunc( L, eDO_delete); | 151 | lua_pop(L, 1); |
152 | STACK_CHECK(L, 0); | ||
151 | } | 153 | } |
152 | 154 | ||
155 | // ################################################################################################ | ||
153 | 156 | ||
154 | /* | 157 | /* |
155 | * void= mt.__gc( proxy_ud ) | 158 | * void= mt.__gc( proxy_ud ) |
@@ -157,23 +160,19 @@ void free_deep_prelude( lua_State* L, DeepPrelude* prelude_) | |||
157 | * End of life for a proxy object; reduce the deep reference count and clean it up if reaches 0. | 160 | * End of life for a proxy object; reduce the deep reference count and clean it up if reaches 0. |
158 | * | 161 | * |
159 | */ | 162 | */ |
160 | static int deep_userdata_gc( lua_State* L) | 163 | [[nodiscard]] static int deep_userdata_gc(lua_State* L) |
161 | { | 164 | { |
162 | DeepPrelude** proxy = (DeepPrelude**) lua_touserdata( L, 1); | 165 | DeepPrelude** const proxy{ lua_tofulluserdata<DeepPrelude*>(L, 1) }; |
163 | DeepPrelude* p = *proxy; | 166 | DeepPrelude* p = *proxy; |
164 | Universe* U = universe_get( L); | ||
165 | int v; | ||
166 | 167 | ||
167 | // can work without a universe if creating a deep userdata from some external C module when Lanes isn't loaded | 168 | // can work without a universe if creating a deep userdata from some external C module when Lanes isn't loaded |
168 | // in that case, we are not multithreaded and locking isn't necessary anyway | 169 | // in that case, we are not multithreaded and locking isn't necessary anyway |
169 | if( U) MUTEX_LOCK( &U->deep_lock); | 170 | bool const isLastRef{ p->m_refcount.fetch_sub(1, std::memory_order_relaxed) == 1 }; |
170 | v = -- (p->refcount); | ||
171 | if (U) MUTEX_UNLOCK( &U->deep_lock); | ||
172 | 171 | ||
173 | if( v == 0) | 172 | if (isLastRef) |
174 | { | 173 | { |
175 | // retrieve wrapped __gc | 174 | // retrieve wrapped __gc |
176 | lua_pushvalue( L, lua_upvalueindex( 1)); // self __gc? | 175 | lua_pushvalue( L, lua_upvalueindex( 1)); // self __gc? |
177 | if( !lua_isnil( L, -1)) | 176 | if( !lua_isnil( L, -1)) |
178 | { | 177 | { |
179 | lua_insert( L, -2); // __gc self | 178 | lua_insert( L, -2); // __gc self |
@@ -186,164 +185,157 @@ static int deep_userdata_gc( lua_State* L) | |||
186 | // top was set to 0, then userdata was pushed. "delete" might want to pop the userdata (we don't care), but should not push anything! | 185 | // top was set to 0, then userdata was pushed. "delete" might want to pop the userdata (we don't care), but should not push anything! |
187 | if ( lua_gettop( L) > 1) | 186 | if ( lua_gettop( L) > 1) |
188 | { | 187 | { |
189 | luaL_error( L, "Bad idfunc(eDO_delete): should not push anything"); | 188 | return luaL_error( L, "Bad idfunc(DeepOp::Delete): should not push anything"); |
190 | } | 189 | } |
191 | } | 190 | } |
192 | *proxy = NULL; // make sure we don't use it any more, just in case | 191 | *proxy = nullptr; // make sure we don't use it any more, just in case |
193 | return 0; | 192 | return 0; |
194 | } | 193 | } |
195 | 194 | ||
195 | // ################################################################################################ | ||
196 | 196 | ||
197 | /* | 197 | /* |
198 | * Push a proxy userdata on the stack. | 198 | * Push a proxy userdata on the stack. |
199 | * returns NULL if ok, else some error string related to bad idfunc behavior or module require problem | 199 | * returns nullptr if ok, else some error string related to bad idfunc behavior or module require problem |
200 | * (error cannot happen with mode_ == eLM_ToKeeper) | 200 | * (error cannot happen with mode_ == LookupMode::ToKeeper) |
201 | * | 201 | * |
202 | * Initializes necessary structures if it's the first time 'idfunc' is being | 202 | * Initializes necessary structures if it's the first time 'idfunc' is being |
203 | * used in this Lua state (metatable, registring it). Otherwise, increments the | 203 | * used in this Lua state (metatable, registring it). Otherwise, increments the |
204 | * reference count. | 204 | * reference count. |
205 | */ | 205 | */ |
206 | char const* push_deep_proxy( Universe* U, lua_State* L, DeepPrelude* prelude, int nuv_, LookupMode mode_) | 206 | char const* push_deep_proxy(Dest L, DeepPrelude* prelude, int nuv_, LookupMode mode_) |
207 | { | 207 | { |
208 | DeepPrelude** proxy; | ||
209 | |||
210 | // Check if a proxy already exists | 208 | // Check if a proxy already exists |
211 | push_registry_subtable_mode( L, DEEP_PROXY_CACHE_KEY, "v"); // DPC | 209 | push_registry_subtable_mode( L, DEEP_PROXY_CACHE_KEY, "v"); // DPC |
212 | lua_pushlightuserdata( L, prelude); // DPC deep | 210 | lua_pushlightuserdata( L, prelude); // DPC deep |
213 | lua_rawget( L, -2); // DPC proxy | 211 | lua_rawget( L, -2); // DPC proxy |
214 | if ( !lua_isnil( L, -1)) | 212 | if ( !lua_isnil( L, -1)) |
215 | { | 213 | { |
216 | lua_remove( L, -2); // proxy | 214 | lua_remove( L, -2); // proxy |
217 | return NULL; | 215 | return nullptr; |
218 | } | 216 | } |
219 | else | 217 | else |
220 | { | 218 | { |
221 | lua_pop( L, 1); // DPC | 219 | lua_pop( L, 1); // DPC |
222 | } | 220 | } |
223 | 221 | ||
224 | // can work without a universe if creating a deep userdata from some external C module when Lanes isn't loaded | ||
225 | // in that case, we are not multithreaded and locking isn't necessary anyway | ||
226 | if( U) MUTEX_LOCK( &U->deep_lock); | ||
227 | ++ (prelude->refcount); // one more proxy pointing to this deep data | ||
228 | if( U) MUTEX_UNLOCK( &U->deep_lock); | ||
229 | |||
230 | STACK_GROW( L, 7); | 222 | STACK_GROW( L, 7); |
231 | STACK_CHECK( L, 0); | 223 | STACK_CHECK_START_REL(L, 0); |
232 | 224 | ||
233 | // a new full userdata, fitted with the specified number of uservalue slots (always 1 for Lua < 5.4) | 225 | // a new full userdata, fitted with the specified number of uservalue slots (always 1 for Lua < 5.4) |
234 | proxy = lua_newuserdatauv( L, sizeof(DeepPrelude*), nuv_); // DPC proxy | 226 | DeepPrelude** proxy = (DeepPrelude**) lua_newuserdatauv(L, sizeof(DeepPrelude*), nuv_); // DPC proxy |
235 | ASSERT_L( proxy); | 227 | ASSERT_L( proxy); |
236 | *proxy = prelude; | 228 | *proxy = prelude; |
229 | prelude->m_refcount.fetch_add(1, std::memory_order_relaxed); // one more proxy pointing to this deep data | ||
237 | 230 | ||
238 | // Get/create metatable for 'idfunc' (in this state) | 231 | // Get/create metatable for 'idfunc' (in this state) |
239 | lua_pushlightuserdata( L, (void*)(uintptr_t)(prelude->idfunc)); // DPC proxy idfunc | 232 | lua_pushlightuserdata( L, std::bit_cast<void*>(prelude->idfunc)); // DPC proxy idfunc |
240 | get_deep_lookup( L); // DPC proxy metatable? | 233 | get_deep_lookup( L); // DPC proxy metatable? |
241 | 234 | ||
242 | if( lua_isnil( L, -1)) // // No metatable yet. | 235 | if( lua_isnil( L, -1)) // // No metatable yet. |
243 | { | 236 | { |
244 | char const* modname; | 237 | char const* modname; |
245 | int oldtop = lua_gettop( L); // DPC proxy nil | 238 | int oldtop = lua_gettop( L); // DPC proxy nil |
246 | lua_pop( L, 1); // DPC proxy | 239 | lua_pop( L, 1); // DPC proxy |
247 | // 1 - make one and register it | 240 | // 1 - make one and register it |
248 | if( mode_ != eLM_ToKeeper) | 241 | if (mode_ != LookupMode::ToKeeper) |
249 | { | 242 | { |
250 | (void) prelude->idfunc( L, eDO_metatable); // DPC proxy metatable | 243 | (void) prelude->idfunc( L, DeepOp::Metatable); // DPC proxy metatable |
251 | if( lua_gettop( L) - oldtop != 0 || !lua_istable( L, -1)) | 244 | if( lua_gettop( L) - oldtop != 0 || !lua_istable( L, -1)) |
252 | { | 245 | { |
253 | lua_settop( L, oldtop); // DPC proxy X | 246 | lua_settop( L, oldtop); // DPC proxy X |
254 | lua_pop( L, 3); // | 247 | lua_pop( L, 3); // |
255 | return "Bad idfunc(eOP_metatable): unexpected pushed value"; | 248 | return "Bad idfunc(eOP_metatable): unexpected pushed value"; |
256 | } | 249 | } |
257 | // if the metatable contains a __gc, we will call it from our own | 250 | // if the metatable contains a __gc, we will call it from our own |
258 | lua_getfield( L, -1, "__gc"); // DPC proxy metatable __gc | 251 | lua_getfield( L, -1, "__gc"); // DPC proxy metatable __gc |
259 | } | 252 | } |
260 | else | 253 | else |
261 | { | 254 | { |
262 | // keepers need a minimal metatable that only contains our own __gc | 255 | // keepers need a minimal metatable that only contains our own __gc |
263 | lua_newtable( L); // DPC proxy metatable | 256 | lua_newtable( L); // DPC proxy metatable |
264 | lua_pushnil( L); // DPC proxy metatable nil | 257 | lua_pushnil( L); // DPC proxy metatable nil |
265 | } | 258 | } |
266 | if( lua_isnil( L, -1)) | 259 | if( lua_isnil( L, -1)) |
267 | { | 260 | { |
268 | // Add our own '__gc' method | 261 | // Add our own '__gc' method |
269 | lua_pop( L, 1); // DPC proxy metatable | 262 | lua_pop( L, 1); // DPC proxy metatable |
270 | lua_pushcfunction( L, deep_userdata_gc); // DPC proxy metatable deep_userdata_gc | 263 | lua_pushcfunction( L, deep_userdata_gc); // DPC proxy metatable deep_userdata_gc |
271 | } | 264 | } |
272 | else | 265 | else |
273 | { | 266 | { |
274 | // Add our own '__gc' method wrapping the original | 267 | // Add our own '__gc' method wrapping the original |
275 | lua_pushcclosure( L, deep_userdata_gc, 1); // DPC proxy metatable deep_userdata_gc | 268 | lua_pushcclosure( L, deep_userdata_gc, 1); // DPC proxy metatable deep_userdata_gc |
276 | } | 269 | } |
277 | lua_setfield( L, -2, "__gc"); // DPC proxy metatable | 270 | lua_setfield( L, -2, "__gc"); // DPC proxy metatable |
278 | 271 | ||
279 | // Memorize for later rounds | 272 | // Memorize for later rounds |
280 | lua_pushvalue( L, -1); // DPC proxy metatable metatable | 273 | lua_pushvalue( L, -1); // DPC proxy metatable metatable |
281 | lua_pushlightuserdata( L, (void*)(uintptr_t)(prelude->idfunc)); // DPC proxy metatable metatable idfunc | 274 | lua_pushlightuserdata( L, std::bit_cast<void*>(prelude->idfunc)); // DPC proxy metatable metatable idfunc |
282 | set_deep_lookup( L); // DPC proxy metatable | 275 | set_deep_lookup( L); // DPC proxy metatable |
283 | 276 | ||
284 | // 2 - cause the target state to require the module that exported the idfunc | 277 | // 2 - cause the target state to require the module that exported the idfunc |
285 | // this is needed because we must make sure the shared library is still loaded as long as we hold a pointer on the idfunc | 278 | // this is needed because we must make sure the shared library is still loaded as long as we hold a pointer on the idfunc |
286 | { | 279 | { |
287 | int oldtop_module = lua_gettop( L); | 280 | int oldtop_module = lua_gettop( L); |
288 | modname = (char const*) prelude->idfunc( L, eDO_module); // DPC proxy metatable | 281 | modname = (char const*) prelude->idfunc( L, DeepOp::Module); // DPC proxy metatable |
289 | // make sure the function pushed nothing on the stack! | 282 | // make sure the function pushed nothing on the stack! |
290 | if( lua_gettop( L) - oldtop_module != 0) | 283 | if( lua_gettop( L) - oldtop_module != 0) |
291 | { | 284 | { |
292 | lua_pop( L, 3); // | 285 | lua_pop( L, 3); // |
293 | return "Bad idfunc(eOP_module): should not push anything"; | 286 | return "Bad idfunc(eOP_module): should not push anything"; |
294 | } | 287 | } |
295 | } | 288 | } |
296 | if( NULL != modname) // we actually got a module name | 289 | if (nullptr != modname) // we actually got a module name |
297 | { | 290 | { |
298 | // L.registry._LOADED exists without having registered the 'package' library. | 291 | // L.registry._LOADED exists without having registered the 'package' library. |
299 | lua_getglobal( L, "require"); // DPC proxy metatable require() | 292 | lua_getglobal( L, "require"); // DPC proxy metatable require() |
300 | // check that the module is already loaded (or being loaded, we are happy either way) | 293 | // check that the module is already loaded (or being loaded, we are happy either way) |
301 | if( lua_isfunction( L, -1)) | 294 | if( lua_isfunction( L, -1)) |
302 | { | 295 | { |
303 | lua_pushstring( L, modname); // DPC proxy metatable require() "module" | 296 | lua_pushstring( L, modname); // DPC proxy metatable require() "module" |
304 | lua_getfield( L, LUA_REGISTRYINDEX, LUA_LOADED_TABLE); // DPC proxy metatable require() "module" _R._LOADED | 297 | lua_getfield( L, LUA_REGISTRYINDEX, LUA_LOADED_TABLE); // DPC proxy metatable require() "module" _R._LOADED |
305 | if( lua_istable( L, -1)) | 298 | if( lua_istable( L, -1)) |
306 | { | 299 | { |
307 | bool_t alreadyloaded; | 300 | lua_pushvalue( L, -2); // DPC proxy metatable require() "module" _R._LOADED "module" |
308 | lua_pushvalue( L, -2); // DPC proxy metatable require() "module" _R._LOADED "module" | 301 | lua_rawget( L, -2); // DPC proxy metatable require() "module" _R._LOADED module |
309 | lua_rawget( L, -2); // DPC proxy metatable require() "module" _R._LOADED module | 302 | int const alreadyloaded = lua_toboolean( L, -1); |
310 | alreadyloaded = lua_toboolean( L, -1); | ||
311 | if( !alreadyloaded) // not loaded | 303 | if( !alreadyloaded) // not loaded |
312 | { | 304 | { |
313 | int require_result; | 305 | int require_result; |
314 | lua_pop( L, 2); // DPC proxy metatable require() "module" | 306 | lua_pop( L, 2); // DPC proxy metatable require() "module" |
315 | // require "modname" | 307 | // require "modname" |
316 | require_result = lua_pcall( L, 1, 0, 0); // DPC proxy metatable error? | 308 | require_result = lua_pcall( L, 1, 0, 0); // DPC proxy metatable error? |
317 | if( require_result != LUA_OK) | 309 | if( require_result != LUA_OK) |
318 | { | 310 | { |
319 | // failed, return the error message | 311 | // failed, return the error message |
320 | lua_pushfstring( L, "error while requiring '%s' identified by idfunc(eOP_module): ", modname); | 312 | lua_pushfstring( L, "error while requiring '%s' identified by idfunc(eOP_module): ", modname); |
321 | lua_insert( L, -2); // DPC proxy metatable prefix error | 313 | lua_insert( L, -2); // DPC proxy metatable prefix error |
322 | lua_concat( L, 2); // DPC proxy metatable error | 314 | lua_concat( L, 2); // DPC proxy metatable error |
323 | return lua_tostring( L, -1); | 315 | return lua_tostring( L, -1); |
324 | } | 316 | } |
325 | } | 317 | } |
326 | else // already loaded, we are happy | 318 | else // already loaded, we are happy |
327 | { | 319 | { |
328 | lua_pop( L, 4); // DPC proxy metatable | 320 | lua_pop( L, 4); // DPC proxy metatable |
329 | } | 321 | } |
330 | } | 322 | } |
331 | else // no L.registry._LOADED; can this ever happen? | 323 | else // no L.registry._LOADED; can this ever happen? |
332 | { | 324 | { |
333 | lua_pop( L, 6); // | 325 | lua_pop( L, 6); // |
334 | return "unexpected error while requiring a module identified by idfunc(eOP_module)"; | 326 | return "unexpected error while requiring a module identified by idfunc(eOP_module)"; |
335 | } | 327 | } |
336 | } | 328 | } |
337 | else // a module name, but no require() function :-( | 329 | else // a module name, but no require() function :-( |
338 | { | 330 | { |
339 | lua_pop( L, 4); // | 331 | lua_pop( L, 4); // |
340 | return "lanes receiving deep userdata should register the 'package' library"; | 332 | return "lanes receiving deep userdata should register the 'package' library"; |
341 | } | 333 | } |
342 | } | 334 | } |
343 | } | 335 | } |
344 | STACK_MID( L, 2); // DPC proxy metatable | 336 | STACK_CHECK(L, 2); // DPC proxy metatable |
345 | ASSERT_L( lua_isuserdata( L, -2)); | 337 | ASSERT_L(lua_type(L, -2) == LUA_TUSERDATA); |
346 | ASSERT_L( lua_istable( L, -1)); | 338 | ASSERT_L(lua_istable( L, -1)); |
347 | lua_setmetatable( L, -2); // DPC proxy | 339 | lua_setmetatable( L, -2); // DPC proxy |
348 | 340 | ||
349 | // If we're here, we obviously had to create a new proxy, so cache it. | 341 | // If we're here, we obviously had to create a new proxy, so cache it. |
@@ -351,11 +343,13 @@ char const* push_deep_proxy( Universe* U, lua_State* L, DeepPrelude* prelude, in | |||
351 | lua_pushvalue( L, -2); // DPC proxy deep proxy | 343 | lua_pushvalue( L, -2); // DPC proxy deep proxy |
352 | lua_rawset( L, -4); // DPC proxy | 344 | lua_rawset( L, -4); // DPC proxy |
353 | lua_remove( L, -2); // proxy | 345 | lua_remove( L, -2); // proxy |
354 | ASSERT_L( lua_isuserdata( L, -1)); | 346 | ASSERT_L(lua_type(L, -1) == LUA_TUSERDATA); |
355 | STACK_END( L, 0); | 347 | STACK_CHECK(L, 0); |
356 | return NULL; | 348 | return nullptr; |
357 | } | 349 | } |
358 | 350 | ||
351 | // ################################################################################################ | ||
352 | |||
359 | /* | 353 | /* |
360 | * Create a deep userdata | 354 | * Create a deep userdata |
361 | * | 355 | * |
@@ -366,9 +360,9 @@ char const* push_deep_proxy( Universe* U, lua_State* L, DeepPrelude* prelude, in | |||
366 | * | 360 | * |
367 | * 'idfunc' must fulfill the following features: | 361 | * 'idfunc' must fulfill the following features: |
368 | * | 362 | * |
369 | * lightuserdata = idfunc( eDO_new [, ...] ) -- creates a new deep data instance | 363 | * lightuserdata = idfunc( DeepOp::New [, ...] ) -- creates a new deep data instance |
370 | * void = idfunc( eDO_delete, lightuserdata ) -- releases a deep data instance | 364 | * void = idfunc( DeepOp::Delete, lightuserdata ) -- releases a deep data instance |
371 | * tbl = idfunc( eDO_metatable ) -- gives metatable for userdata proxies | 365 | * tbl = idfunc( DeepOp::Metatable ) -- gives metatable for userdata proxies |
372 | * | 366 | * |
373 | * Reference counting and true userdata proxying are taken care of for the | 367 | * Reference counting and true userdata proxying are taken care of for the |
374 | * actual data type. | 368 | * actual data type. |
@@ -378,46 +372,46 @@ char const* push_deep_proxy( Universe* U, lua_State* L, DeepPrelude* prelude, in | |||
378 | * | 372 | * |
379 | * Returns: 'proxy' userdata for accessing the deep data via 'luaG_todeep()' | 373 | * Returns: 'proxy' userdata for accessing the deep data via 'luaG_todeep()' |
380 | */ | 374 | */ |
381 | int luaG_newdeepuserdata( lua_State* L, luaG_IdFunction idfunc, int nuv_) | 375 | int luaG_newdeepuserdata(Dest L, luaG_IdFunction idfunc, int nuv_) |
382 | { | 376 | { |
383 | char const* errmsg; | ||
384 | |||
385 | STACK_GROW( L, 1); | 377 | STACK_GROW( L, 1); |
386 | STACK_CHECK( L, 0); | 378 | STACK_CHECK_START_REL(L, 0); |
379 | int const oldtop{ lua_gettop(L) }; | ||
380 | DeepPrelude* const prelude{ static_cast<DeepPrelude*>(idfunc(L, DeepOp::New)) }; | ||
381 | if (prelude == nullptr) | ||
387 | { | 382 | { |
388 | int const oldtop = lua_gettop( L); | 383 | return luaL_error( L, "idfunc(DeepOp::New) failed to create deep userdata (out of memory)"); |
389 | DeepPrelude* prelude = idfunc( L, eDO_new); | 384 | } |
390 | if( prelude == NULL) | ||
391 | { | ||
392 | return luaL_error( L, "idfunc(eDO_new) failed to create deep userdata (out of memory)"); | ||
393 | } | ||
394 | if( prelude->magic.value != DEEP_VERSION.value) | ||
395 | { | ||
396 | // just in case, don't leak the newly allocated deep userdata object | ||
397 | lua_pushlightuserdata( L, prelude); | ||
398 | idfunc( L, eDO_delete); | ||
399 | return luaL_error( L, "Bad idfunc(eDO_new): DEEP_VERSION is incorrect, rebuild your implementation with the latest deep implementation"); | ||
400 | } | ||
401 | prelude->refcount = 0; // 'push_deep_proxy' will lift it to 1 | ||
402 | prelude->idfunc = idfunc; | ||
403 | 385 | ||
404 | if( lua_gettop( L) - oldtop != 0) | 386 | if( prelude->magic != DEEP_VERSION) |
405 | { | 387 | { |
406 | // just in case, don't leak the newly allocated deep userdata object | 388 | // just in case, don't leak the newly allocated deep userdata object |
407 | lua_pushlightuserdata( L, prelude); | 389 | lua_pushlightuserdata( L, prelude); |
408 | idfunc( L, eDO_delete); | 390 | idfunc( L, DeepOp::Delete); |
409 | return luaL_error( L, "Bad idfunc(eDO_new): should not push anything on the stack"); | 391 | return luaL_error( L, "Bad idfunc(DeepOp::New): DEEP_VERSION is incorrect, rebuild your implementation with the latest deep implementation"); |
410 | } | 392 | } |
411 | errmsg = push_deep_proxy( universe_get( L), L, prelude, nuv_, eLM_LaneBody); // proxy | 393 | |
412 | if( errmsg != NULL) | 394 | ASSERT_L(prelude->m_refcount.load(std::memory_order_relaxed) == 0); // 'push_deep_proxy' will lift it to 1 |
413 | { | 395 | prelude->idfunc = idfunc; |
414 | return luaL_error( L, errmsg); | 396 | |
415 | } | 397 | if( lua_gettop( L) - oldtop != 0) |
398 | { | ||
399 | // just in case, don't leak the newly allocated deep userdata object | ||
400 | lua_pushlightuserdata( L, prelude); | ||
401 | idfunc( L, DeepOp::Delete); | ||
402 | return luaL_error( L, "Bad idfunc(DeepOp::New): should not push anything on the stack"); | ||
416 | } | 403 | } |
417 | STACK_END( L, 1); | 404 | |
405 | char const* const errmsg{ push_deep_proxy(L, prelude, nuv_, LookupMode::LaneBody) }; // proxy | ||
406 | if (errmsg != nullptr) | ||
407 | { | ||
408 | return luaL_error( L, errmsg); | ||
409 | } | ||
410 | STACK_CHECK( L, 1); | ||
418 | return 1; | 411 | return 1; |
419 | } | 412 | } |
420 | 413 | ||
414 | // ################################################################################################ | ||
421 | 415 | ||
422 | /* | 416 | /* |
423 | * Access deep userdata through a proxy. | 417 | * Access deep userdata through a proxy. |
@@ -425,80 +419,76 @@ int luaG_newdeepuserdata( lua_State* L, luaG_IdFunction idfunc, int nuv_) | |||
425 | * Reference count is not changed, and access to the deep userdata is not | 419 | * Reference count is not changed, and access to the deep userdata is not |
426 | * serialized. It is the module's responsibility to prevent conflicting usage. | 420 | * serialized. It is the module's responsibility to prevent conflicting usage. |
427 | */ | 421 | */ |
428 | void* luaG_todeep( lua_State* L, luaG_IdFunction idfunc, int index) | 422 | DeepPrelude* luaG_todeep(lua_State* L, luaG_IdFunction idfunc, int index) |
429 | { | 423 | { |
430 | DeepPrelude** proxy; | 424 | STACK_CHECK_START_REL(L, 0); |
431 | |||
432 | STACK_CHECK( L, 0); | ||
433 | // ensure it is actually a deep userdata | 425 | // ensure it is actually a deep userdata |
434 | if( get_idfunc( L, index, eLM_LaneBody) != idfunc) | 426 | if (get_idfunc(L, index, LookupMode::LaneBody) != idfunc) |
435 | { | 427 | { |
436 | return NULL; // no metatable, or wrong kind | 428 | return nullptr; // no metatable, or wrong kind |
437 | } | 429 | } |
430 | STACK_CHECK(L, 0); | ||
438 | 431 | ||
439 | proxy = (DeepPrelude**) lua_touserdata( L, index); | 432 | DeepPrelude** const proxy{ lua_tofulluserdata<DeepPrelude*>(L, index) }; |
440 | STACK_END( L, 0); | ||
441 | |||
442 | return *proxy; | 433 | return *proxy; |
443 | } | 434 | } |
444 | 435 | ||
436 | // ################################################################################################ | ||
445 | 437 | ||
446 | /* | 438 | /* |
447 | * Copy deep userdata between two separate Lua states (from L to L2) | 439 | * Copy deep userdata between two separate Lua states (from L to L2) |
448 | * | 440 | * |
449 | * Returns: | 441 | * Returns: |
450 | * the id function of the copied value, or NULL for non-deep userdata | 442 | * the id function of the copied value, or nullptr for non-deep userdata |
451 | * (not copied) | 443 | * (not copied) |
452 | */ | 444 | */ |
453 | bool_t copydeep( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, LookupMode mode_, char const* upName_) | 445 | bool copydeep(Universe* U, Dest L2, int L2_cache_i, Source L, int i, LookupMode mode_, char const* upName_) |
454 | { | 446 | { |
455 | char const* errmsg; | 447 | luaG_IdFunction const idfunc { get_idfunc(L, i, mode_) }; |
456 | luaG_IdFunction idfunc = get_idfunc( L, i, mode_); | 448 | if (idfunc == nullptr) |
457 | int nuv = 0; | ||
458 | |||
459 | if( idfunc == NULL) | ||
460 | { | 449 | { |
461 | return FALSE; // not a deep userdata | 450 | return false; // not a deep userdata |
462 | } | 451 | } |
463 | 452 | ||
464 | STACK_CHECK( L, 0); | 453 | STACK_CHECK_START_REL(L, 0); |
465 | STACK_CHECK( L2, 0); | 454 | STACK_CHECK_START_REL(L2, 0); |
466 | 455 | ||
467 | // extract all uservalues of the source | 456 | // extract all uservalues of the source |
468 | while( lua_getiuservalue( L, i, nuv + 1) != LUA_TNONE) // ... u [uv]* nil | 457 | int nuv = 0; |
458 | while (lua_getiuservalue(L, i, nuv + 1) != LUA_TNONE) // ... u [uv]* nil | ||
469 | { | 459 | { |
470 | ++ nuv; | 460 | ++ nuv; |
471 | } | 461 | } |
472 | // last call returned TNONE and pushed nil, that we don't need | 462 | // last call returned TNONE and pushed nil, that we don't need |
473 | lua_pop( L, 1); // ... u [uv]* | 463 | lua_pop( L, 1); // ... u [uv]* |
474 | STACK_MID( L, nuv); | 464 | STACK_CHECK( L, nuv); |
475 | 465 | ||
476 | errmsg = push_deep_proxy( U, L2, *(DeepPrelude**) lua_touserdata( L, i), nuv, mode_); // u | 466 | char const* errmsg{ push_deep_proxy(L2, *lua_tofulluserdata<DeepPrelude*>(L, i), nuv, mode_) }; // u |
477 | 467 | ||
478 | // transfer all uservalues of the source in the destination | 468 | // transfer all uservalues of the source in the destination |
479 | { | 469 | { |
480 | int const clone_i = lua_gettop( L2); | 470 | int const clone_i = lua_gettop( L2); |
481 | while( nuv) | 471 | while( nuv) |
482 | { | 472 | { |
483 | if(!inter_copy_one( U, L2, L2_cache_i, L, lua_absindex( L, -1), VT_NORMAL, mode_, upName_)) // u uv | 473 | if (!inter_copy_one(U, L2, L2_cache_i, L, lua_absindex(L, -1), VT::NORMAL, mode_, upName_)) // u uv |
484 | { | 474 | { |
485 | return luaL_error(L, "Cannot copy upvalue type '%s'", luaL_typename(L, -1)); | 475 | return luaL_error(L, "Cannot copy upvalue type '%s'", luaL_typename(L, -1)); |
486 | } | 476 | } |
487 | lua_pop( L, 1); // ... u [uv]* | 477 | lua_pop( L, 1); // ... u [uv]* |
488 | // this pops the value from the stack | 478 | // this pops the value from the stack |
489 | lua_setiuservalue( L2, clone_i, nuv); // u | 479 | lua_setiuservalue(L2, clone_i, nuv); // u |
490 | -- nuv; | 480 | -- nuv; |
491 | } | 481 | } |
492 | } | 482 | } |
493 | 483 | ||
494 | STACK_END( L2, 1); | 484 | STACK_CHECK(L2, 1); |
495 | STACK_END( L, 0); | 485 | STACK_CHECK(L, 0); |
496 | 486 | ||
497 | if( errmsg != NULL) | 487 | if (errmsg != nullptr) |
498 | { | 488 | { |
499 | // raise the error in the proper state (not the keeper) | 489 | // raise the error in the proper state (not the keeper) |
500 | lua_State* errL = (mode_ == eLM_FromKeeper) ? L2 : L; | 490 | lua_State* const errL{ (mode_ == LookupMode::FromKeeper) ? L2 : L }; |
501 | luaL_error( errL, errmsg); | 491 | luaL_error(errL, errmsg); // doesn't return |
502 | } | 492 | } |
503 | return TRUE; | 493 | return true; |
504 | } \ No newline at end of file | 494 | } \ No newline at end of file |
@@ -1,66 +1,61 @@ | |||
1 | #ifndef __LANES_DEEP_H__ | 1 | #pragma once |
2 | #define __LANES_DEEP_H__ 1 | ||
3 | 2 | ||
4 | /* | 3 | /* |
5 | * public 'deep' API to be used by external modules if they want to implement Lanes-aware userdata | 4 | * public 'deep' API to be used by external modules if they want to implement Lanes-aware userdata |
6 | * said modules will have to link against lanes (it is not really possible to separate the 'deep userdata' implementation from the rest of Lanes) | 5 | * said modules will have to link against lanes (it is not really possible to separate the 'deep userdata' implementation from the rest of Lanes) |
7 | */ | 6 | */ |
8 | 7 | ||
8 | #ifdef __cplusplus | ||
9 | extern "C" { | ||
10 | #endif // __cplusplus | ||
9 | #include "lua.h" | 11 | #include "lua.h" |
10 | #include "platform.h" | 12 | #ifdef __cplusplus |
13 | } | ||
14 | #endif // __cplusplus | ||
15 | |||
16 | #include "lanesconf.h" | ||
11 | #include "uniquekey.h" | 17 | #include "uniquekey.h" |
12 | 18 | ||
13 | // forwards | 19 | #include <atomic> |
14 | struct s_Universe; | ||
15 | typedef struct s_Universe Universe; | ||
16 | 20 | ||
17 | #if !defined LANES_API // when deep is compiled standalone outside Lanes | 21 | // forwards |
18 | #if (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC) | 22 | class Universe; |
19 | #define LANES_API __declspec(dllexport) | ||
20 | #else | ||
21 | #define LANES_API | ||
22 | #endif // (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC) | ||
23 | #endif // LANES_API | ||
24 | 23 | ||
25 | enum eLookupMode | 24 | enum class LookupMode |
26 | { | 25 | { |
27 | eLM_LaneBody, // send the lane body directly from the source to the destination lane | 26 | LaneBody, // send the lane body directly from the source to the destination lane |
28 | eLM_ToKeeper, // send a function from a lane to a keeper state | 27 | ToKeeper, // send a function from a lane to a keeper state |
29 | eLM_FromKeeper // send a function from a keeper state to a lane | 28 | FromKeeper // send a function from a keeper state to a lane |
30 | }; | 29 | }; |
31 | typedef enum eLookupMode LookupMode; | ||
32 | 30 | ||
33 | enum eDeepOp | 31 | enum class DeepOp |
34 | { | 32 | { |
35 | eDO_new, | 33 | New, |
36 | eDO_delete, | 34 | Delete, |
37 | eDO_metatable, | 35 | Metatable, |
38 | eDO_module, | 36 | Module, |
39 | }; | 37 | }; |
40 | typedef enum eDeepOp DeepOp; | ||
41 | 38 | ||
42 | typedef void* (*luaG_IdFunction)( lua_State* L, DeepOp op_); | 39 | using luaG_IdFunction = void*(*)(lua_State* L, DeepOp op_); |
43 | 40 | ||
44 | // ################################################################################################ | 41 | // ################################################################################################ |
45 | 42 | ||
46 | // fnv164 of string "DEEP_VERSION_2" generated at https://www.pelock.com/products/hash-calculator | 43 | // xxh64 of string "DEEP_VERSION_3" generated at https://www.pelock.com/products/hash-calculator |
47 | static DECLARE_CONST_UNIQUE_KEY( DEEP_VERSION, 0xB4B0119C10642B29); | 44 | static constexpr UniqueKey DEEP_VERSION{ 0xB2CC0FD9C0AE9674ull }; |
48 | 45 | ||
49 | // should be used as header for full userdata | 46 | // should be used as header for deep userdata |
50 | struct s_DeepPrelude | 47 | // a deep userdata is a full userdata that stores a single pointer to the actual DeepPrelude-derived object |
48 | struct DeepPrelude | ||
51 | { | 49 | { |
52 | DECLARE_UNIQUE_KEY( magic); // must be filled by the Deep userdata idfunc that allocates it on eDO_new operation | 50 | UniqueKey const magic{ DEEP_VERSION }; |
53 | // when stored in a keeper state, the full userdata doesn't have a metatable, so we need direct access to the idfunc | 51 | // when stored in a keeper state, the full userdata doesn't have a metatable, so we need direct access to the idfunc |
54 | luaG_IdFunction idfunc; | 52 | luaG_IdFunction idfunc { nullptr }; |
55 | // data is destroyed when refcount is 0 | 53 | // data is destroyed when refcount is 0 |
56 | volatile int refcount; | 54 | std::atomic<int> m_refcount{ 0 }; |
57 | }; | 55 | }; |
58 | typedef struct s_DeepPrelude DeepPrelude; | ||
59 | |||
60 | char const* push_deep_proxy( Universe* U, lua_State* L, DeepPrelude* prelude, int nuv_, LookupMode mode_); | ||
61 | void free_deep_prelude( lua_State* L, DeepPrelude* prelude_); | ||
62 | 56 | ||
63 | extern LANES_API int luaG_newdeepuserdata( lua_State* L, luaG_IdFunction idfunc, int nuv_); | 57 | [[nodiscard]] char const* push_deep_proxy(Dest L, DeepPrelude* prelude, int nuv_, LookupMode mode_); |
64 | extern LANES_API void* luaG_todeep( lua_State* L, luaG_IdFunction idfunc, int index); | 58 | void free_deep_prelude(lua_State* L, DeepPrelude* prelude_); |
65 | 59 | ||
66 | #endif // __LANES_DEEP_H__ | 60 | LANES_API [[nodiscard]] int luaG_newdeepuserdata(Dest L, luaG_IdFunction idfunc, int nuv_); |
61 | LANES_API [[nodiscard]] DeepPrelude* luaG_todeep(lua_State* L, luaG_IdFunction idfunc, int index); | ||
diff --git a/src/keeper.c b/src/keeper.c deleted file mode 100644 index a1505b7..0000000 --- a/src/keeper.c +++ /dev/null | |||
@@ -1,862 +0,0 @@ | |||
1 | /* | ||
2 | -- | ||
3 | -- KEEPER.C | ||
4 | -- | ||
5 | -- Keeper state logic | ||
6 | -- | ||
7 | -- This code is read in for each "keeper state", which are the hidden, inter- | ||
8 | -- mediate data stores used by Lanes inter-state communication objects. | ||
9 | -- | ||
10 | -- Author: Benoit Germain <bnt.germain@gmail.com> | ||
11 | -- | ||
12 | -- C implementation replacement of the original keeper.lua | ||
13 | -- | ||
14 | --[[ | ||
15 | =============================================================================== | ||
16 | |||
17 | Copyright (C) 2011-2013 Benoit Germain <bnt.germain@gmail.com> | ||
18 | |||
19 | Permission is hereby granted, free of charge, to any person obtaining a copy | ||
20 | of this software and associated documentation files (the "Software"), to deal | ||
21 | in the Software without restriction, including without limitation the rights | ||
22 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||
23 | copies of the Software, and to permit persons to whom the Software is | ||
24 | furnished to do so, subject to the following conditions: | ||
25 | |||
26 | The above copyright notice and this permission notice shall be included in | ||
27 | all copies or substantial portions of the Software. | ||
28 | |||
29 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
30 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
31 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
32 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
33 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
34 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | ||
35 | THE SOFTWARE. | ||
36 | |||
37 | =============================================================================== | ||
38 | ]]-- | ||
39 | */ | ||
40 | |||
41 | #include <string.h> | ||
42 | #include <stdio.h> | ||
43 | #include <stdlib.h> | ||
44 | #include <ctype.h> | ||
45 | #include <assert.h> | ||
46 | |||
47 | #include "keeper.h" | ||
48 | #include "compat.h" | ||
49 | #include "tools.h" | ||
50 | #include "state.h" | ||
51 | #include "universe.h" | ||
52 | #include "uniquekey.h" | ||
53 | |||
54 | //################################################################################### | ||
55 | // Keeper implementation | ||
56 | //################################################################################### | ||
57 | |||
58 | #ifndef __min | ||
59 | #define __min( a, b) (((a) < (b)) ? (a) : (b)) | ||
60 | #endif // __min | ||
61 | |||
62 | typedef struct | ||
63 | { | ||
64 | lua_Integer first; | ||
65 | lua_Integer count; | ||
66 | lua_Integer limit; | ||
67 | } keeper_fifo; | ||
68 | |||
69 | static int const CONTENTS_TABLE = 1; | ||
70 | |||
71 | // replaces the fifo ud by its uservalue on the stack | ||
72 | static keeper_fifo* prepare_fifo_access( lua_State* L, int idx_) | ||
73 | { | ||
74 | keeper_fifo* fifo = (keeper_fifo*) lua_touserdata( L, idx_); | ||
75 | if( fifo != NULL) | ||
76 | { | ||
77 | idx_ = lua_absindex( L, idx_); | ||
78 | STACK_GROW( L, 1); | ||
79 | // we can replace the fifo userdata in the stack without fear of it being GCed, there are other references around | ||
80 | lua_getiuservalue( L, idx_, CONTENTS_TABLE); | ||
81 | lua_replace( L, idx_); | ||
82 | } | ||
83 | return fifo; | ||
84 | } | ||
85 | |||
86 | // in: nothing | ||
87 | // out: { first = 1, count = 0, limit = -1} | ||
88 | static void fifo_new( lua_State* L) | ||
89 | { | ||
90 | keeper_fifo* fifo; | ||
91 | STACK_GROW( L, 2); | ||
92 | // a fifo full userdata has one uservalue, the table that holds the actual fifo contents | ||
93 | fifo = (keeper_fifo*)lua_newuserdatauv( L, sizeof( keeper_fifo), 1); | ||
94 | fifo->first = 1; | ||
95 | fifo->count = 0; | ||
96 | fifo->limit = -1; | ||
97 | lua_newtable( L); | ||
98 | lua_setiuservalue( L, -2, CONTENTS_TABLE); | ||
99 | } | ||
100 | |||
101 | // in: expect fifo ... on top of the stack | ||
102 | // out: nothing, removes all pushed values from the stack | ||
103 | static void fifo_push( lua_State* L, keeper_fifo* fifo_, lua_Integer count_) | ||
104 | { | ||
105 | int const idx = lua_gettop( L) - (int) count_; | ||
106 | lua_Integer start = fifo_->first + fifo_->count - 1; | ||
107 | lua_Integer i; | ||
108 | // pop all additional arguments, storing them in the fifo | ||
109 | for( i = count_; i >= 1; -- i) | ||
110 | { | ||
111 | // store in the fifo the value at the top of the stack at the specified index, popping it from the stack | ||
112 | lua_rawseti( L, idx, (int)(start + i)); | ||
113 | } | ||
114 | fifo_->count += count_; | ||
115 | } | ||
116 | |||
117 | // in: fifo | ||
118 | // out: ...|nothing | ||
119 | // expects exactly 1 value on the stack! | ||
120 | // currently only called with a count of 1, but this may change in the future | ||
121 | // function assumes that there is enough data in the fifo to satisfy the request | ||
122 | static void fifo_peek( lua_State* L, keeper_fifo* fifo_, lua_Integer count_) | ||
123 | { | ||
124 | lua_Integer i; | ||
125 | STACK_GROW( L, (int) count_); | ||
126 | for( i = 0; i < count_; ++ i) | ||
127 | { | ||
128 | lua_rawgeti( L, 1, (int)( fifo_->first + i)); | ||
129 | } | ||
130 | } | ||
131 | |||
132 | // in: fifo | ||
133 | // out: remove the fifo from the stack, push as many items as required on the stack (function assumes they exist in sufficient number) | ||
134 | static void fifo_pop( lua_State* L, keeper_fifo* fifo_, lua_Integer count_) | ||
135 | { | ||
136 | int const fifo_idx = lua_gettop( L); // ... fifo | ||
137 | int i; | ||
138 | // each iteration pushes a value on the stack! | ||
139 | STACK_GROW( L, (int) count_ + 2); | ||
140 | // skip first item, we will push it last | ||
141 | for( i = 1; i < count_; ++ i) | ||
142 | { | ||
143 | int const at = (int)( fifo_->first + i); | ||
144 | // push item on the stack | ||
145 | lua_rawgeti( L, fifo_idx, at); // ... fifo val | ||
146 | // remove item from the fifo | ||
147 | lua_pushnil( L); // ... fifo val nil | ||
148 | lua_rawseti( L, fifo_idx, at); // ... fifo val | ||
149 | } | ||
150 | // now process first item | ||
151 | { | ||
152 | int const at = (int)( fifo_->first); | ||
153 | lua_rawgeti( L, fifo_idx, at); // ... fifo vals val | ||
154 | lua_pushnil( L); // ... fifo vals val nil | ||
155 | lua_rawseti( L, fifo_idx, at); // ... fifo vals val | ||
156 | lua_replace( L, fifo_idx); // ... vals | ||
157 | } | ||
158 | { | ||
159 | // avoid ever-growing indexes by resetting each time we detect the fifo is empty | ||
160 | lua_Integer const new_count = fifo_->count - count_; | ||
161 | fifo_->first = (new_count == 0) ? 1 : (fifo_->first + count_); | ||
162 | fifo_->count = new_count; | ||
163 | } | ||
164 | } | ||
165 | |||
166 | // in: linda_ud expected at *absolute* stack slot idx | ||
167 | // out: fifos[ud] | ||
168 | // crc64/we of string "FIFOS_KEY" generated at http://www.nitrxgen.net/hashgen/ | ||
169 | static DECLARE_CONST_UNIQUE_KEY( FIFOS_KEY, 0xdce50bbc351cd465); | ||
170 | static void push_table( lua_State* L, int idx_) | ||
171 | { | ||
172 | STACK_GROW( L, 5); | ||
173 | STACK_CHECK( L, 0); | ||
174 | idx_ = lua_absindex( L, idx_); | ||
175 | REGISTRY_GET( L, FIFOS_KEY); // ud fifos | ||
176 | lua_pushvalue( L, idx_); // ud fifos ud | ||
177 | lua_rawget( L, -2); // ud fifos fifos[ud] | ||
178 | STACK_MID( L, 2); | ||
179 | if( lua_isnil( L, -1)) | ||
180 | { | ||
181 | lua_pop( L, 1); // ud fifos | ||
182 | // add a new fifos table for this linda | ||
183 | lua_newtable( L); // ud fifos fifos[ud] | ||
184 | lua_pushvalue( L, idx_); // ud fifos fifos[ud] ud | ||
185 | lua_pushvalue( L, -2); // ud fifos fifos[ud] ud fifos[ud] | ||
186 | lua_rawset( L, -4); // ud fifos fifos[ud] | ||
187 | } | ||
188 | lua_remove( L, -2); // ud fifos[ud] | ||
189 | STACK_END( L, 1); | ||
190 | } | ||
191 | |||
192 | int keeper_push_linda_storage( Universe* U, lua_State* L, void* ptr_, uintptr_t magic_) | ||
193 | { | ||
194 | Keeper* const K = which_keeper( U->keepers, magic_); | ||
195 | lua_State* const KL = K ? K->L : NULL; | ||
196 | if( KL == NULL) return 0; | ||
197 | STACK_GROW( KL, 4); | ||
198 | STACK_CHECK( KL, 0); | ||
199 | REGISTRY_GET( KL, FIFOS_KEY); // fifos | ||
200 | lua_pushlightuserdata( KL, ptr_); // fifos ud | ||
201 | lua_rawget( KL, -2); // fifos storage | ||
202 | lua_remove( KL, -2); // storage | ||
203 | if( !lua_istable( KL, -1)) | ||
204 | { | ||
205 | lua_pop( KL, 1); // | ||
206 | STACK_MID( KL, 0); | ||
207 | return 0; | ||
208 | } | ||
209 | // move data from keeper to destination state KEEPER MAIN | ||
210 | lua_pushnil( KL); // storage nil | ||
211 | STACK_GROW( L, 5); | ||
212 | STACK_CHECK( L, 0); | ||
213 | lua_newtable( L); // out | ||
214 | while( lua_next( KL, -2)) // storage key fifo | ||
215 | { | ||
216 | keeper_fifo* fifo = prepare_fifo_access( KL, -1); // storage key fifo | ||
217 | lua_pushvalue( KL, -2); // storage key fifo key | ||
218 | luaG_inter_move( U, KL, L, 1, eLM_FromKeeper); // storage key fifo // out key | ||
219 | STACK_MID( L, 2); | ||
220 | lua_newtable( L); // out key keyout | ||
221 | luaG_inter_move( U, KL, L, 1, eLM_FromKeeper); // storage key // out key keyout fifo | ||
222 | lua_pushinteger( L, fifo->first); // out key keyout fifo first | ||
223 | STACK_MID( L, 5); | ||
224 | lua_setfield( L, -3, "first"); // out key keyout fifo | ||
225 | lua_pushinteger( L, fifo->count); // out key keyout fifo count | ||
226 | STACK_MID( L, 5); | ||
227 | lua_setfield( L, -3, "count"); // out key keyout fifo | ||
228 | lua_pushinteger( L, fifo->limit); // out key keyout fifo limit | ||
229 | STACK_MID( L, 5); | ||
230 | lua_setfield( L, -3, "limit"); // out key keyout fifo | ||
231 | lua_setfield( L, -2, "fifo"); // out key keyout | ||
232 | lua_rawset( L, -3); // out | ||
233 | STACK_MID( L, 1); | ||
234 | } | ||
235 | STACK_END( L, 1); | ||
236 | lua_pop( KL, 1); // | ||
237 | STACK_END( KL, 0); | ||
238 | return 1; | ||
239 | } | ||
240 | |||
241 | // in: linda_ud | ||
242 | int keepercall_clear( lua_State* L) | ||
243 | { | ||
244 | STACK_GROW( L, 3); | ||
245 | STACK_CHECK( L, 0); | ||
246 | REGISTRY_GET( L, FIFOS_KEY); // ud fifos | ||
247 | lua_pushvalue( L, 1); // ud fifos ud | ||
248 | lua_pushnil( L); // ud fifos ud nil | ||
249 | lua_rawset( L, -3); // ud fifos | ||
250 | lua_pop( L, 1); // ud | ||
251 | STACK_END( L, 0); | ||
252 | return 0; | ||
253 | } | ||
254 | |||
255 | |||
256 | // in: linda_ud, key, ... | ||
257 | // out: true|false | ||
258 | int keepercall_send( lua_State* L) | ||
259 | { | ||
260 | keeper_fifo* fifo; | ||
261 | int n = lua_gettop( L) - 2; | ||
262 | push_table( L, 1); // ud key ... fifos | ||
263 | // get the fifo associated to this key in this linda, create it if it doesn't exist | ||
264 | lua_pushvalue( L, 2); // ud key ... fifos key | ||
265 | lua_rawget( L, -2); // ud key ... fifos fifo | ||
266 | if( lua_isnil( L, -1)) | ||
267 | { | ||
268 | lua_pop( L, 1); // ud key ... fifos | ||
269 | fifo_new( L); // ud key ... fifos fifo | ||
270 | lua_pushvalue( L, 2); // ud key ... fifos fifo key | ||
271 | lua_pushvalue( L, -2); // ud key ... fifos fifo key fifo | ||
272 | lua_rawset( L, -4); // ud key ... fifos fifo | ||
273 | } | ||
274 | lua_remove( L, -2); // ud key ... fifo | ||
275 | fifo = (keeper_fifo*) lua_touserdata( L, -1); | ||
276 | if( fifo->limit >= 0 && fifo->count + n > fifo->limit) | ||
277 | { | ||
278 | lua_settop( L, 0); // | ||
279 | lua_pushboolean( L, 0); // false | ||
280 | } | ||
281 | else | ||
282 | { | ||
283 | fifo = prepare_fifo_access( L, -1); | ||
284 | lua_replace( L, 2); // ud fifo ... | ||
285 | fifo_push( L, fifo, n); // ud fifo | ||
286 | lua_settop( L, 0); // | ||
287 | lua_pushboolean( L, 1); // true | ||
288 | } | ||
289 | return 1; | ||
290 | } | ||
291 | |||
292 | // in: linda_ud, key [, key]? | ||
293 | // out: (key, val) or nothing | ||
294 | int keepercall_receive( lua_State* L) | ||
295 | { | ||
296 | int top = lua_gettop( L); | ||
297 | int i; | ||
298 | push_table( L, 1); // ud keys fifos | ||
299 | lua_replace( L, 1); // fifos keys | ||
300 | for( i = 2; i <= top; ++ i) | ||
301 | { | ||
302 | keeper_fifo* fifo; | ||
303 | lua_pushvalue( L, i); // fifos keys key[i] | ||
304 | lua_rawget( L, 1); // fifos keys fifo | ||
305 | fifo = prepare_fifo_access( L, -1); // fifos keys fifo | ||
306 | if( fifo != NULL && fifo->count > 0) | ||
307 | { | ||
308 | fifo_pop( L, fifo, 1); // fifos keys val | ||
309 | if( !lua_isnil( L, -1)) | ||
310 | { | ||
311 | lua_replace( L, 1); // val keys | ||
312 | lua_settop( L, i); // val keys key[i] | ||
313 | if( i != 2) | ||
314 | { | ||
315 | lua_replace( L, 2); // val key keys | ||
316 | lua_settop( L, 2); // val key | ||
317 | } | ||
318 | lua_insert( L, 1); // key, val | ||
319 | return 2; | ||
320 | } | ||
321 | } | ||
322 | lua_settop( L, top); // data keys | ||
323 | } | ||
324 | // nothing to receive | ||
325 | return 0; | ||
326 | } | ||
327 | |||
328 | //in: linda_ud key mincount [maxcount] | ||
329 | int keepercall_receive_batched( lua_State* L) | ||
330 | { | ||
331 | lua_Integer const min_count = lua_tointeger( L, 3); | ||
332 | if( min_count > 0) | ||
333 | { | ||
334 | keeper_fifo* fifo; | ||
335 | lua_Integer const max_count = luaL_optinteger( L, 4, min_count); | ||
336 | lua_settop( L, 2); // ud key | ||
337 | lua_insert( L, 1); // key ud | ||
338 | push_table( L, 2); // key ud fifos | ||
339 | lua_remove( L, 2); // key fifos | ||
340 | lua_pushvalue( L, 1); // key fifos key | ||
341 | lua_rawget( L, 2); // key fifos fifo | ||
342 | lua_remove( L, 2); // key fifo | ||
343 | fifo = prepare_fifo_access( L, 2); // key fifo | ||
344 | if( fifo != NULL && fifo->count >= min_count) | ||
345 | { | ||
346 | fifo_pop( L, fifo, __min( max_count, fifo->count)); // key ... | ||
347 | } | ||
348 | else | ||
349 | { | ||
350 | lua_settop( L, 0); | ||
351 | } | ||
352 | return lua_gettop( L); | ||
353 | } | ||
354 | else | ||
355 | { | ||
356 | return 0; | ||
357 | } | ||
358 | } | ||
359 | |||
360 | // in: linda_ud key n | ||
361 | // out: true or nil | ||
362 | int keepercall_limit( lua_State* L) | ||
363 | { | ||
364 | keeper_fifo* fifo; | ||
365 | lua_Integer limit = lua_tointeger( L, 3); | ||
366 | push_table( L, 1); // ud key n fifos | ||
367 | lua_replace( L, 1); // fifos key n | ||
368 | lua_pop( L, 1); // fifos key | ||
369 | lua_pushvalue( L, -1); // fifos key key | ||
370 | lua_rawget( L, -3); // fifos key fifo|nil | ||
371 | fifo = (keeper_fifo*) lua_touserdata( L, -1); | ||
372 | if( fifo == NULL) | ||
373 | { // fifos key nil | ||
374 | lua_pop( L, 1); // fifos key | ||
375 | fifo_new( L); // fifos key fifo | ||
376 | fifo = (keeper_fifo*) lua_touserdata( L, -1); | ||
377 | lua_rawset( L, -3); // fifos | ||
378 | } | ||
379 | // remove any clutter on the stack | ||
380 | lua_settop( L, 0); | ||
381 | // return true if we decide that blocked threads waiting to write on that key should be awakened | ||
382 | // this is the case if we detect the key was full but it is no longer the case | ||
383 | if( | ||
384 | ((fifo->limit >= 0) && (fifo->count >= fifo->limit)) // the key was full if limited and count exceeded the previous limit | ||
385 | && ((limit < 0) || (fifo->count < limit)) // the key is not full if unlimited or count is lower than the new limit | ||
386 | ) | ||
387 | { | ||
388 | lua_pushboolean( L, 1); | ||
389 | } | ||
390 | // set the new limit | ||
391 | fifo->limit = limit; | ||
392 | // return 0 or 1 value | ||
393 | return lua_gettop( L); | ||
394 | } | ||
395 | |||
396 | //in: linda_ud key [[val] ...] | ||
397 | //out: true or nil | ||
398 | int keepercall_set( lua_State* L) | ||
399 | { | ||
400 | bool_t should_wake_writers = FALSE; | ||
401 | STACK_GROW( L, 6); | ||
402 | |||
403 | // retrieve fifos associated with the linda | ||
404 | push_table( L, 1); // ud key [val [, ...]] fifos | ||
405 | lua_replace( L, 1); // fifos key [val [, ...]] | ||
406 | |||
407 | // make sure we have a value on the stack | ||
408 | if( lua_gettop( L) == 2) // fifos key | ||
409 | { | ||
410 | keeper_fifo* fifo; | ||
411 | lua_pushvalue( L, -1); // fifos key key | ||
412 | lua_rawget( L, 1); // fifos key fifo|nil | ||
413 | // empty the fifo for the specified key: replace uservalue with a virgin table, reset counters, but leave limit unchanged! | ||
414 | fifo = (keeper_fifo*) lua_touserdata( L, -1); | ||
415 | if( fifo != NULL) // might be NULL if we set a nonexistent key to nil | ||
416 | { // fifos key fifo | ||
417 | if( fifo->limit < 0) // fifo limit value is the default (unlimited): we can totally remove it | ||
418 | { | ||
419 | lua_pop( L, 1); // fifos key | ||
420 | lua_pushnil( L); // fifos key nil | ||
421 | lua_rawset( L, -3); // fifos | ||
422 | } | ||
423 | else | ||
424 | { | ||
425 | // we create room if the fifo was full but it is no longer the case | ||
426 | should_wake_writers = (fifo->limit > 0) && (fifo->count >= fifo->limit); | ||
427 | lua_remove( L, -2); // fifos fifo | ||
428 | lua_newtable( L); // fifos fifo {} | ||
429 | lua_setiuservalue( L, -2, CONTENTS_TABLE); // fifos fifo | ||
430 | fifo->first = 1; | ||
431 | fifo->count = 0; | ||
432 | } | ||
433 | } | ||
434 | } | ||
435 | else // set/replace contents stored at the specified key? | ||
436 | { | ||
437 | lua_Integer count = lua_gettop( L) - 2; // number of items we want to store | ||
438 | keeper_fifo* fifo; // fifos key [val [, ...]] | ||
439 | lua_pushvalue( L, 2); // fifos key [val [, ...]] key | ||
440 | lua_rawget( L, 1); // fifos key [val [, ...]] fifo|nil | ||
441 | fifo = (keeper_fifo*) lua_touserdata( L, -1); | ||
442 | if( fifo == NULL) // can be NULL if we store a value at a new key | ||
443 | { // fifos key [val [, ...]] nil | ||
444 | // no need to wake writers in that case, because a writer can't wait on an inexistent key | ||
445 | lua_pop( L, 1); // fifos key [val [, ...]] | ||
446 | fifo_new( L); // fifos key [val [, ...]] fifo | ||
447 | lua_pushvalue( L, 2); // fifos key [val [, ...]] fifo key | ||
448 | lua_pushvalue( L, -2); // fifos key [val [, ...]] fifo key fifo | ||
449 | lua_rawset( L, 1); // fifos key [val [, ...]] fifo | ||
450 | } | ||
451 | else // the fifo exists, we just want to update its contents | ||
452 | { // fifos key [val [, ...]] fifo | ||
453 | // we create room if the fifo was full but it is no longer the case | ||
454 | should_wake_writers = (fifo->limit > 0) && (fifo->count >= fifo->limit) && (count < fifo->limit); | ||
455 | // empty the fifo for the specified key: replace uservalue with a virgin table, reset counters, but leave limit unchanged! | ||
456 | lua_newtable( L); // fifos key [val [, ...]] fifo {} | ||
457 | lua_setiuservalue( L, -2, CONTENTS_TABLE); // fifos key [val [, ...]] fifo | ||
458 | fifo->first = 1; | ||
459 | fifo->count = 0; | ||
460 | } | ||
461 | fifo = prepare_fifo_access( L, -1); | ||
462 | // move the fifo below the values we want to store | ||
463 | lua_insert( L, 3); // fifos key fifo [val [, ...]] | ||
464 | fifo_push( L, fifo, count); // fifos key fifo | ||
465 | } | ||
466 | return should_wake_writers ? (lua_pushboolean( L, 1), 1) : 0; | ||
467 | } | ||
468 | |||
469 | // in: linda_ud key [count] | ||
470 | // out: at most <count> values | ||
471 | int keepercall_get( lua_State* L) | ||
472 | { | ||
473 | keeper_fifo* fifo; | ||
474 | lua_Integer count = 1; | ||
475 | if( lua_gettop( L) == 3) // ud key count | ||
476 | { | ||
477 | count = lua_tointeger( L, 3); | ||
478 | lua_pop( L, 1); // ud key | ||
479 | } | ||
480 | push_table( L, 1); // ud key fifos | ||
481 | lua_replace( L, 1); // fifos key | ||
482 | lua_rawget( L, 1); // fifos fifo | ||
483 | fifo = prepare_fifo_access( L, -1); // fifos fifo | ||
484 | if( fifo != NULL && fifo->count > 0) | ||
485 | { | ||
486 | lua_remove( L, 1); // fifo | ||
487 | count = __min( count, fifo->count); | ||
488 | // read <count> value off the fifo | ||
489 | fifo_peek( L, fifo, count); // fifo ... | ||
490 | return (int) count; | ||
491 | } | ||
492 | // no fifo was ever registered for this key, or it is empty | ||
493 | return 0; | ||
494 | } | ||
495 | |||
496 | // in: linda_ud [, key [, ...]] | ||
497 | int keepercall_count( lua_State* L) | ||
498 | { | ||
499 | push_table( L, 1); // ud keys fifos | ||
500 | switch( lua_gettop( L)) | ||
501 | { | ||
502 | // no key is specified: return a table giving the count of all known keys | ||
503 | case 2: // ud fifos | ||
504 | lua_newtable( L); // ud fifos out | ||
505 | lua_replace( L, 1); // out fifos | ||
506 | lua_pushnil( L); // out fifos nil | ||
507 | while( lua_next( L, 2)) // out fifos key fifo | ||
508 | { | ||
509 | keeper_fifo* fifo = prepare_fifo_access( L, -1); // out fifos key fifo | ||
510 | lua_pop( L, 1); // out fifos key | ||
511 | lua_pushvalue( L, -1); // out fifos key key | ||
512 | lua_pushinteger( L, fifo->count); // out fifos key key count | ||
513 | lua_rawset( L, -5); // out fifos key | ||
514 | } | ||
515 | lua_pop( L, 1); // out | ||
516 | break; | ||
517 | |||
518 | // 1 key is specified: return its count | ||
519 | case 3: // ud key fifos | ||
520 | { | ||
521 | keeper_fifo* fifo; | ||
522 | lua_replace( L, 1); // fifos key | ||
523 | lua_rawget( L, -2); // fifos fifo|nil | ||
524 | if( lua_isnil( L, -1)) // the key is unknown | ||
525 | { // fifos nil | ||
526 | lua_remove( L, -2); // nil | ||
527 | } | ||
528 | else // the key is known | ||
529 | { // fifos fifo | ||
530 | fifo = prepare_fifo_access( L, -1); // fifos fifo | ||
531 | lua_pushinteger( L, fifo->count); // fifos fifo count | ||
532 | lua_replace( L, -3); // count fifo | ||
533 | lua_pop( L, 1); // count | ||
534 | } | ||
535 | } | ||
536 | break; | ||
537 | |||
538 | // a variable number of keys is specified: return a table of their counts | ||
539 | default: // ud keys fifos | ||
540 | lua_newtable( L); // ud keys fifos out | ||
541 | lua_replace( L, 1); // out keys fifos | ||
542 | // shifts all keys up in the stack. potentially slow if there are a lot of them, but then it should be bearable | ||
543 | lua_insert( L, 2); // out fifos keys | ||
544 | while( lua_gettop( L) > 2) | ||
545 | { | ||
546 | keeper_fifo* fifo; | ||
547 | lua_pushvalue( L, -1); // out fifos keys key | ||
548 | lua_rawget( L, 2); // out fifos keys fifo|nil | ||
549 | fifo = prepare_fifo_access( L, -1); // out fifos keys fifo|nil | ||
550 | lua_pop( L, 1); // out fifos keys | ||
551 | if( fifo != NULL) // the key is known | ||
552 | { | ||
553 | lua_pushinteger( L, fifo->count); // out fifos keys count | ||
554 | lua_rawset( L, 1); // out fifos keys | ||
555 | } | ||
556 | else // the key is unknown | ||
557 | { | ||
558 | lua_pop( L, 1); // out fifos keys | ||
559 | } | ||
560 | } // all keys are exhausted // out fifos | ||
561 | lua_pop( L, 1); // out | ||
562 | } | ||
563 | ASSERT_L( lua_gettop( L) == 1); | ||
564 | return 1; | ||
565 | } | ||
566 | |||
567 | //################################################################################### | ||
568 | // Keeper API, accessed from linda methods | ||
569 | //################################################################################### | ||
570 | |||
571 | /*---=== Keeper states ===--- | ||
572 | */ | ||
573 | |||
574 | /* | ||
575 | * Pool of keeper states | ||
576 | * | ||
577 | * Access to keeper states is locked (only one OS thread at a time) so the | ||
578 | * bigger the pool, the less chances of unnecessary waits. Lindas map to the | ||
579 | * keepers randomly, by a hash. | ||
580 | */ | ||
581 | |||
582 | // called as __gc for the keepers array userdata | ||
583 | void close_keepers( Universe* U) | ||
584 | { | ||
585 | if( U->keepers != NULL) | ||
586 | { | ||
587 | int i; | ||
588 | int nbKeepers = U->keepers->nb_keepers; | ||
589 | // NOTE: imagine some keeper state N+1 currently holds a linda that uses another keeper N, and a _gc that will make use of it | ||
590 | // when keeper N+1 is closed, object is GCed, linda operation is called, which attempts to acquire keeper N, whose Lua state no longer exists | ||
591 | // in that case, the linda operation should do nothing. which means that these operations must check for keeper acquisition success | ||
592 | // which is early-outed with a U->keepers->nbKeepers null-check | ||
593 | U->keepers->nb_keepers = 0; | ||
594 | for( i = 0; i < nbKeepers; ++ i) | ||
595 | { | ||
596 | lua_State* K = U->keepers->keeper_array[i].L; | ||
597 | U->keepers->keeper_array[i].L = NULL; | ||
598 | if( K != NULL) | ||
599 | { | ||
600 | lua_close( K); | ||
601 | } | ||
602 | else | ||
603 | { | ||
604 | // detected partial init: destroy only the mutexes that got initialized properly | ||
605 | nbKeepers = i; | ||
606 | } | ||
607 | } | ||
608 | for( i = 0; i < nbKeepers; ++ i) | ||
609 | { | ||
610 | MUTEX_FREE( &U->keepers->keeper_array[i].keeper_cs); | ||
611 | } | ||
612 | // free the keeper bookkeeping structure | ||
613 | { | ||
614 | AllocatorDefinition* const allocD = &U->internal_allocator; | ||
615 | (void) allocD->allocF( allocD->allocUD, U->keepers, sizeof( Keepers) + (nbKeepers - 1) * sizeof( Keeper), 0); | ||
616 | U->keepers = NULL; | ||
617 | } | ||
618 | } | ||
619 | } | ||
620 | |||
621 | /* | ||
622 | * Initialize keeper states | ||
623 | * | ||
624 | * If there is a problem, returns NULL and pushes the error message on the stack | ||
625 | * else returns the keepers bookkeeping structure. | ||
626 | * | ||
627 | * Note: Any problems would be design flaws; the created Lua state is left | ||
628 | * unclosed, because it does not really matter. In production code, this | ||
629 | * function never fails. | ||
630 | * settings table is at position 1 on the stack | ||
631 | */ | ||
632 | void init_keepers( Universe* U, lua_State* L) | ||
633 | { | ||
634 | int i; | ||
635 | int nb_keepers; | ||
636 | int keepers_gc_threshold; | ||
637 | |||
638 | STACK_CHECK( L, 0); // L K | ||
639 | lua_getfield( L, 1, "nb_keepers"); // nb_keepers | ||
640 | nb_keepers = (int) lua_tointeger( L, -1); | ||
641 | lua_pop( L, 1); // | ||
642 | if( nb_keepers < 1) | ||
643 | { | ||
644 | (void) luaL_error( L, "Bad number of keepers (%d)", nb_keepers); | ||
645 | } | ||
646 | STACK_MID(L, 0); | ||
647 | |||
648 | lua_getfield(L, 1, "keepers_gc_threshold"); // keepers_gc_threshold | ||
649 | keepers_gc_threshold = (int) lua_tointeger(L, -1); | ||
650 | lua_pop(L, 1); // | ||
651 | STACK_MID(L, 0); | ||
652 | |||
653 | // Keepers contains an array of 1 s_Keeper, adjust for the actual number of keeper states | ||
654 | { | ||
655 | size_t const bytes = sizeof( Keepers) + (nb_keepers - 1) * sizeof( Keeper); | ||
656 | { | ||
657 | AllocatorDefinition* const allocD = &U->internal_allocator; | ||
658 | U->keepers = (Keepers*) allocD->allocF( allocD->allocUD, NULL, 0, bytes); | ||
659 | } | ||
660 | if( U->keepers == NULL) | ||
661 | { | ||
662 | (void) luaL_error( L, "init_keepers() failed while creating keeper array; out of memory"); | ||
663 | return; | ||
664 | } | ||
665 | memset( U->keepers, 0, bytes); | ||
666 | U->keepers->gc_threshold = keepers_gc_threshold; | ||
667 | U->keepers->nb_keepers = nb_keepers; | ||
668 | } | ||
669 | for( i = 0; i < nb_keepers; ++ i) // keepersUD | ||
670 | { | ||
671 | // note that we will leak K if we raise an error later | ||
672 | lua_State* K = create_state( U, L); | ||
673 | if( K == NULL) | ||
674 | { | ||
675 | (void) luaL_error( L, "init_keepers() failed while creating keeper states; out of memory"); | ||
676 | return; | ||
677 | } | ||
678 | |||
679 | U->keepers->keeper_array[i].L = K; | ||
680 | MUTEX_INIT( &U->keepers->keeper_array[i].keeper_cs); | ||
681 | |||
682 | if (U->keepers->gc_threshold >= 0) | ||
683 | { | ||
684 | lua_gc(K, LUA_GCSTOP, 0); | ||
685 | } | ||
686 | |||
687 | STACK_CHECK( K, 0); | ||
688 | |||
689 | // copy the universe pointer in the keeper itself | ||
690 | universe_store( K, U); | ||
691 | STACK_MID( K, 0); | ||
692 | |||
693 | // make sure 'package' is initialized in keeper states, so that we have require() | ||
694 | // this because this is needed when transferring deep userdata object | ||
695 | luaL_requiref( K, "package", luaopen_package, 1); // package | ||
696 | lua_pop( K, 1); // | ||
697 | STACK_MID( K, 0); | ||
698 | serialize_require( DEBUGSPEW_PARAM_COMMA( U) K); | ||
699 | STACK_MID( K, 0); | ||
700 | |||
701 | // copy package.path and package.cpath from the source state | ||
702 | lua_getglobal( L, "package"); // "..." keepersUD package | ||
703 | if( !lua_isnil( L, -1)) | ||
704 | { | ||
705 | // when copying with mode eLM_ToKeeper, error message is pushed at the top of the stack, not raised immediately | ||
706 | if( luaG_inter_copy_package( U, L, K, -1, eLM_ToKeeper) != eICR_Success) | ||
707 | { | ||
708 | // if something went wrong, the error message is at the top of the stack | ||
709 | lua_remove( L, -2); // error_msg | ||
710 | (void) lua_error( L); | ||
711 | return; | ||
712 | } | ||
713 | } | ||
714 | lua_pop( L, 1); // | ||
715 | STACK_MID( L, 0); | ||
716 | |||
717 | // attempt to call on_state_create(), if we have one and it is a C function | ||
718 | // (only support a C function because we can't transfer executable Lua code in keepers) | ||
719 | // will raise an error in L in case of problem | ||
720 | call_on_state_create( U, K, L, eLM_ToKeeper); | ||
721 | |||
722 | // to see VM name in Decoda debugger | ||
723 | lua_pushfstring( K, "Keeper #%d", i + 1); // "Keeper #n" | ||
724 | lua_setglobal( K, "decoda_name"); // | ||
725 | |||
726 | // create the fifos table in the keeper state | ||
727 | REGISTRY_SET( K, FIFOS_KEY, lua_newtable( K)); | ||
728 | STACK_END( K, 0); | ||
729 | } | ||
730 | STACK_END( L, 0); | ||
731 | } | ||
732 | |||
733 | // should be called only when inside a keeper_acquire/keeper_release pair (see linda_protected_call) | ||
734 | Keeper* which_keeper(Keepers* keepers_, uintptr_t magic_) | ||
735 | { | ||
736 | int const nbKeepers = keepers_->nb_keepers; | ||
737 | if (nbKeepers) | ||
738 | { | ||
739 | unsigned int i = (unsigned int)((magic_ >> KEEPER_MAGIC_SHIFT) % nbKeepers); | ||
740 | return &keepers_->keeper_array[i]; | ||
741 | } | ||
742 | return NULL; | ||
743 | } | ||
744 | |||
745 | Keeper* keeper_acquire( Keepers* keepers_, uintptr_t magic_) | ||
746 | { | ||
747 | int const nbKeepers = keepers_->nb_keepers; | ||
748 | // can be 0 if this happens during main state shutdown (lanes is being GC'ed -> no keepers) | ||
749 | if( nbKeepers) | ||
750 | { | ||
751 | /* | ||
752 | * Any hashing will do that maps pointers to 0..GNbKeepers-1 | ||
753 | * consistently. | ||
754 | * | ||
755 | * Pointers are often aligned by 8 or so - ignore the low order bits | ||
756 | * have to cast to unsigned long to avoid compilation warnings about loss of data when converting pointer-to-integer | ||
757 | */ | ||
758 | unsigned int i = (unsigned int)((magic_ >> KEEPER_MAGIC_SHIFT) % nbKeepers); | ||
759 | Keeper* K = &keepers_->keeper_array[i]; | ||
760 | |||
761 | MUTEX_LOCK( &K->keeper_cs); | ||
762 | //++ K->count; | ||
763 | return K; | ||
764 | } | ||
765 | return NULL; | ||
766 | } | ||
767 | |||
768 | void keeper_release( Keeper* K_) | ||
769 | { | ||
770 | //-- K->count; | ||
771 | if( K_) MUTEX_UNLOCK( &K_->keeper_cs); | ||
772 | } | ||
773 | |||
774 | void keeper_toggle_nil_sentinels( lua_State* L, int val_i_, LookupMode const mode_) | ||
775 | { | ||
776 | int i, n = lua_gettop( L); | ||
777 | for( i = val_i_; i <= n; ++ i) | ||
778 | { | ||
779 | if( mode_ == eLM_ToKeeper) | ||
780 | { | ||
781 | if( lua_isnil( L, i)) | ||
782 | { | ||
783 | push_unique_key( L, NIL_SENTINEL); | ||
784 | lua_replace( L, i); | ||
785 | } | ||
786 | } | ||
787 | else | ||
788 | { | ||
789 | if( equal_unique_key( L, i, NIL_SENTINEL)) | ||
790 | { | ||
791 | lua_pushnil( L); | ||
792 | lua_replace( L, i); | ||
793 | } | ||
794 | } | ||
795 | } | ||
796 | } | ||
797 | |||
798 | /* | ||
799 | * Call a function ('func_name') in the keeper state, and pass on the returned | ||
800 | * values to 'L'. | ||
801 | * | ||
802 | * 'linda': deep Linda pointer (used only as a unique table key, first parameter) | ||
803 | * 'starting_index': first of the rest of parameters (none if 0) | ||
804 | * | ||
805 | * Returns: number of return values (pushed to 'L') or -1 in case of error | ||
806 | */ | ||
807 | int keeper_call( Universe* U, lua_State* K, keeper_api_t func_, lua_State* L, void* linda, uint_t starting_index) | ||
808 | { | ||
809 | int const args = starting_index ? (lua_gettop( L) - starting_index + 1) : 0; | ||
810 | int const Ktos = lua_gettop( K); | ||
811 | int retvals = -1; | ||
812 | |||
813 | STACK_GROW( K, 2); | ||
814 | |||
815 | PUSH_KEEPER_FUNC( K, func_); | ||
816 | |||
817 | lua_pushlightuserdata( K, linda); | ||
818 | |||
819 | if( (args == 0) || luaG_inter_copy( U, L, K, args, eLM_ToKeeper) == eICR_Success) // L->K | ||
820 | { | ||
821 | lua_call( K, 1 + args, LUA_MULTRET); | ||
822 | |||
823 | retvals = lua_gettop( K) - Ktos; | ||
824 | // note that this can raise a luaL_error while the keeper state (and its mutex) is acquired | ||
825 | // this may interrupt a lane, causing the destruction of the underlying OS thread | ||
826 | // after this, another lane making use of this keeper can get an error code from the mutex-locking function | ||
827 | // when attempting to grab the mutex again (WINVER <= 0x400 does this, but locks just fine, I don't know about pthread) | ||
828 | if( (retvals > 0) && luaG_inter_move( U, K, L, retvals, eLM_FromKeeper) != eICR_Success) // K->L | ||
829 | { | ||
830 | retvals = -1; | ||
831 | } | ||
832 | } | ||
833 | // whatever happens, restore the stack to where it was at the origin | ||
834 | lua_settop( K, Ktos); | ||
835 | |||
836 | |||
837 | // don't do this for this particular function, as it is only called during Linda destruction, and we don't want to raise an error, ever | ||
838 | if (func_ != KEEPER_API(clear)) | ||
839 | { | ||
840 | // since keeper state GC is stopped, let's run a step once in a while if required | ||
841 | int const gc_threshold = U->keepers->gc_threshold; | ||
842 | if (gc_threshold == 0) | ||
843 | { | ||
844 | lua_gc(K, LUA_GCSTEP, 0); | ||
845 | } | ||
846 | else if (gc_threshold > 0) | ||
847 | { | ||
848 | int const gc_usage = lua_gc(K, LUA_GCCOUNT, 0); | ||
849 | if (gc_usage >= gc_threshold) | ||
850 | { | ||
851 | lua_gc(K, LUA_GCCOLLECT, 0); | ||
852 | int const gc_usage_after = lua_gc(K, LUA_GCCOUNT, 0); | ||
853 | if (gc_usage_after > gc_threshold) | ||
854 | { | ||
855 | luaL_error(L, "Keeper GC threshold is too low, need at least %d", gc_usage_after); | ||
856 | } | ||
857 | } | ||
858 | } | ||
859 | } | ||
860 | |||
861 | return retvals; | ||
862 | } | ||
diff --git a/src/keeper.cpp b/src/keeper.cpp new file mode 100644 index 0000000..f56c50c --- /dev/null +++ b/src/keeper.cpp | |||
@@ -0,0 +1,884 @@ | |||
1 | /* | ||
2 | -- | ||
3 | -- KEEPER.CPP | ||
4 | -- | ||
5 | -- Keeper state logic | ||
6 | -- | ||
7 | -- This code is read in for each "keeper state", which are the hidden, inter- | ||
8 | -- mediate data stores used by Lanes inter-state communication objects. | ||
9 | -- | ||
10 | -- Author: Benoit Germain <bnt.germain@gmail.com> | ||
11 | -- | ||
12 | -- C implementation replacement of the original keeper.lua | ||
13 | -- | ||
14 | --[[ | ||
15 | =============================================================================== | ||
16 | |||
17 | Copyright (C) 2011-2024 Benoit Germain <bnt.germain@gmail.com> | ||
18 | |||
19 | Permission is hereby granted, free of charge, to any person obtaining a copy | ||
20 | of this software and associated documentation files (the "Software"), to deal | ||
21 | in the Software without restriction, including without limitation the rights | ||
22 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||
23 | copies of the Software, and to permit persons to whom the Software is | ||
24 | furnished to do so, subject to the following conditions: | ||
25 | |||
26 | The above copyright notice and this permission notice shall be included in | ||
27 | all copies or substantial portions of the Software. | ||
28 | |||
29 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
30 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
31 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
32 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
33 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
34 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | ||
35 | THE SOFTWARE. | ||
36 | |||
37 | =============================================================================== | ||
38 | ]]-- | ||
39 | */ | ||
40 | #include "keeper.h" | ||
41 | |||
42 | #include "compat.h" | ||
43 | #include "state.h" | ||
44 | #include "tools.h" | ||
45 | #include "uniquekey.h" | ||
46 | #include "universe.h" | ||
47 | |||
48 | #include <algorithm> | ||
49 | #include <cassert> | ||
50 | |||
51 | // ################################################################################### | ||
52 | // Keeper implementation | ||
53 | // ################################################################################### | ||
54 | |||
55 | class keeper_fifo | ||
56 | { | ||
57 | public: | ||
58 | |||
59 | int first{ 1 }; | ||
60 | int count{ 0 }; | ||
61 | int limit{ -1 }; | ||
62 | |||
63 | // a fifo full userdata has one uservalue, the table that holds the actual fifo contents | ||
64 | [[nodiscard]] static void* operator new([[maybe_unused]] size_t size_, lua_State* L) noexcept { return lua_newuserdatauv<keeper_fifo>(L, 1); } | ||
65 | // always embedded somewhere else or "in-place constructed" as a full userdata | ||
66 | // can't actually delete the operator because the compiler generates stack unwinding code that could call it in case of exception | ||
67 | static void operator delete([[maybe_unused]] void* p_, lua_State* L) { ASSERT_L(!"should never be called") }; | ||
68 | |||
69 | [[nodiscard]] static keeper_fifo* getPtr(lua_State* L, int idx_) | ||
70 | { | ||
71 | return lua_tofulluserdata<keeper_fifo>(L, idx_); | ||
72 | } | ||
73 | }; | ||
74 | |||
75 | static constexpr int CONTENTS_TABLE{ 1 }; | ||
76 | |||
77 | // ################################################################################################## | ||
78 | |||
79 | // replaces the fifo ud by its uservalue on the stack | ||
80 | [[nodiscard]] static keeper_fifo* prepare_fifo_access(lua_State* L, int idx_) | ||
81 | { | ||
82 | keeper_fifo* const fifo{ keeper_fifo::getPtr(L, idx_) }; | ||
83 | if (fifo != nullptr) | ||
84 | { | ||
85 | idx_ = lua_absindex(L, idx_); | ||
86 | STACK_GROW(L, 1); | ||
87 | // we can replace the fifo userdata in the stack without fear of it being GCed, there are other references around | ||
88 | lua_getiuservalue(L, idx_, CONTENTS_TABLE); | ||
89 | lua_replace(L, idx_); | ||
90 | } | ||
91 | return fifo; | ||
92 | } | ||
93 | |||
94 | // ################################################################################################## | ||
95 | |||
96 | // in: nothing | ||
97 | // out: { first = 1, count = 0, limit = -1} | ||
98 | [[nodiscard]] static keeper_fifo* fifo_new(lua_State* L) | ||
99 | { | ||
100 | STACK_GROW(L, 2); | ||
101 | STACK_CHECK_START_REL(L, 0); | ||
102 | keeper_fifo* const fifo{ new (L) keeper_fifo{} }; | ||
103 | STACK_CHECK(L, 1); | ||
104 | lua_newtable(L); | ||
105 | lua_setiuservalue(L, -2, CONTENTS_TABLE); | ||
106 | STACK_CHECK(L, 1); | ||
107 | return fifo; | ||
108 | } | ||
109 | |||
110 | // ################################################################################################## | ||
111 | |||
112 | // in: expect fifo ... on top of the stack | ||
113 | // out: nothing, removes all pushed values from the stack | ||
114 | static void fifo_push(lua_State* L, keeper_fifo* fifo_, int count_) | ||
115 | { | ||
116 | int const idx{ lua_gettop(L) - count_ }; | ||
117 | int const start{ fifo_->first + fifo_->count - 1 }; | ||
118 | // pop all additional arguments, storing them in the fifo | ||
119 | for (int i = count_; i >= 1; --i) | ||
120 | { | ||
121 | // store in the fifo the value at the top of the stack at the specified index, popping it from the stack | ||
122 | lua_rawseti(L, idx, start + i); | ||
123 | } | ||
124 | fifo_->count += count_; | ||
125 | } | ||
126 | |||
127 | // ################################################################################################## | ||
128 | |||
129 | // in: fifo | ||
130 | // out: ...|nothing | ||
131 | // expects exactly 1 value on the stack! | ||
132 | // currently only called with a count of 1, but this may change in the future | ||
133 | // function assumes that there is enough data in the fifo to satisfy the request | ||
134 | static void fifo_peek(lua_State* L, keeper_fifo* fifo_, int count_) | ||
135 | { | ||
136 | STACK_GROW(L, count_); | ||
137 | for (int i = 0; i < count_; ++i) | ||
138 | { | ||
139 | lua_rawgeti(L, 1, (fifo_->first + i)); | ||
140 | } | ||
141 | } | ||
142 | |||
143 | // ################################################################################################## | ||
144 | |||
145 | // in: fifo | ||
146 | // out: remove the fifo from the stack, push as many items as required on the stack (function assumes they exist in sufficient number) | ||
147 | static void fifo_pop( lua_State* L, keeper_fifo* fifo_, int count_) | ||
148 | { | ||
149 | ASSERT_L(lua_istable(L, -1)); | ||
150 | int const fifo_idx{ lua_gettop(L) }; // ... fifotbl | ||
151 | // each iteration pushes a value on the stack! | ||
152 | STACK_GROW(L, count_ + 2); | ||
153 | // skip first item, we will push it last | ||
154 | for (int i = 1; i < count_; ++i) | ||
155 | { | ||
156 | int const at{ fifo_->first + i }; | ||
157 | // push item on the stack | ||
158 | lua_rawgeti(L, fifo_idx, at); // ... fifotbl val | ||
159 | // remove item from the fifo | ||
160 | lua_pushnil(L); // ... fifotbl val nil | ||
161 | lua_rawseti(L, fifo_idx, at); // ... fifotbl val | ||
162 | } | ||
163 | // now process first item | ||
164 | { | ||
165 | int const at{ fifo_->first }; | ||
166 | lua_rawgeti(L, fifo_idx, at); // ... fifotbl vals val | ||
167 | lua_pushnil(L); // ... fifotbl vals val nil | ||
168 | lua_rawseti(L, fifo_idx, at); // ... fifotbl vals val | ||
169 | lua_replace(L, fifo_idx); // ... vals | ||
170 | } | ||
171 | |||
172 | // avoid ever-growing indexes by resetting each time we detect the fifo is empty | ||
173 | { | ||
174 | int const new_count{ fifo_->count - count_ }; | ||
175 | fifo_->first = (new_count == 0) ? 1 : (fifo_->first + count_); | ||
176 | fifo_->count = new_count; | ||
177 | } | ||
178 | } | ||
179 | |||
180 | // ################################################################################################## | ||
181 | |||
182 | // in: linda_ud expected at stack slot idx | ||
183 | // out: fifos[ud] | ||
184 | // crc64/we of string "FIFOS_KEY" generated at http://www.nitrxgen.net/hashgen/ | ||
185 | static constexpr UniqueKey FIFOS_KEY{ 0xdce50bbc351cd465ull }; | ||
186 | static void push_table(lua_State* L, int idx_) | ||
187 | { | ||
188 | STACK_GROW(L, 5); | ||
189 | STACK_CHECK_START_REL(L, 0); | ||
190 | idx_ = lua_absindex(L, idx_); | ||
191 | FIFOS_KEY.pushValue(L); // ud fifos | ||
192 | lua_pushvalue(L, idx_); // ud fifos ud | ||
193 | lua_rawget(L, -2); // ud fifos fifos[ud] | ||
194 | STACK_CHECK(L, 2); | ||
195 | if (lua_isnil(L, -1)) | ||
196 | { | ||
197 | lua_pop(L, 1); // ud fifos | ||
198 | // add a new fifos table for this linda | ||
199 | lua_newtable(L); // ud fifos fifos[ud] | ||
200 | lua_pushvalue(L, idx_); // ud fifos fifos[ud] ud | ||
201 | lua_pushvalue(L, -2); // ud fifos fifos[ud] ud fifos[ud] | ||
202 | lua_rawset(L, -4); // ud fifos fifos[ud] | ||
203 | } | ||
204 | lua_remove(L, -2); // ud fifos[ud] | ||
205 | STACK_CHECK(L, 1); | ||
206 | } | ||
207 | |||
208 | // ################################################################################################## | ||
209 | |||
210 | int keeper_push_linda_storage(Universe* U, Dest L, void* ptr_, uintptr_t magic_) | ||
211 | { | ||
212 | Keeper* const K{ which_keeper(U->keepers, magic_) }; | ||
213 | Source const KL{ K ? K->L : nullptr }; | ||
214 | if (KL == nullptr) | ||
215 | return 0; | ||
216 | STACK_GROW(KL, 4); // KEEPER MAIN | ||
217 | STACK_CHECK_START_REL(KL, 0); | ||
218 | FIFOS_KEY.pushValue(KL); // fifos | ||
219 | lua_pushlightuserdata(KL, ptr_); // fifos ud | ||
220 | lua_rawget(KL, -2); // fifos storage | ||
221 | lua_remove(KL, -2); // storage | ||
222 | if (!lua_istable(KL, -1)) | ||
223 | { | ||
224 | lua_pop(KL, 1); // | ||
225 | STACK_CHECK(KL, 0); | ||
226 | return 0; | ||
227 | } | ||
228 | // move data from keeper to destination state | ||
229 | lua_pushnil(KL); // storage nil | ||
230 | STACK_GROW(L, 5); | ||
231 | STACK_CHECK_START_REL(L, 0); | ||
232 | lua_newtable(L); // out | ||
233 | while (lua_next(KL, -2)) // storage key fifo | ||
234 | { | ||
235 | keeper_fifo* fifo = prepare_fifo_access(KL, -1); // storage key fifotbl | ||
236 | lua_pushvalue(KL, -2); // storage key fifotbl key | ||
237 | std::ignore = luaG_inter_move(U, KL, L, 1, LookupMode::FromKeeper); // storage key fifotbl // out key | ||
238 | STACK_CHECK(L, 2); | ||
239 | lua_newtable(L); // out key keyout | ||
240 | std::ignore = luaG_inter_move(U, KL, L, 1, LookupMode::FromKeeper); // storage key // out key keyout fifotbl | ||
241 | lua_pushinteger(L, fifo->first); // out key keyout fifotbl first | ||
242 | STACK_CHECK(L, 5); | ||
243 | lua_setfield(L, -3, "first"); // out key keyout fifotbl | ||
244 | lua_pushinteger(L, fifo->count); // out key keyout fifobtl count | ||
245 | STACK_CHECK(L, 5); | ||
246 | lua_setfield(L, -3, "count"); // out key keyout fifotbl | ||
247 | lua_pushinteger(L, fifo->limit); // out key keyout fifotbl limit | ||
248 | STACK_CHECK(L, 5); | ||
249 | lua_setfield(L, -3, "limit"); // out key keyout fifotbl | ||
250 | lua_setfield(L, -2, "fifo"); // out key keyout | ||
251 | lua_rawset(L, -3); // out | ||
252 | STACK_CHECK(L, 1); | ||
253 | } | ||
254 | STACK_CHECK(L, 1); | ||
255 | lua_pop(KL, 1); // | ||
256 | STACK_CHECK(KL, 0); | ||
257 | return 1; | ||
258 | } | ||
259 | |||
260 | // ################################################################################################## | ||
261 | |||
262 | // in: linda_ud | ||
263 | int keepercall_clear(lua_State* L) | ||
264 | { | ||
265 | STACK_GROW(L, 3); | ||
266 | STACK_CHECK_START_REL(L, 0); | ||
267 | FIFOS_KEY.pushValue(L); // ud fifos | ||
268 | lua_pushvalue(L, 1); // ud fifos ud | ||
269 | lua_pushnil(L); // ud fifos ud nil | ||
270 | lua_rawset(L, -3); // ud fifos | ||
271 | lua_pop(L, 1); // ud | ||
272 | STACK_CHECK(L, 0); | ||
273 | return 0; | ||
274 | } | ||
275 | |||
276 | // ################################################################################################## | ||
277 | |||
278 | // in: linda_ud, key, ... | ||
279 | // out: true|false | ||
280 | int keepercall_send(lua_State* L) | ||
281 | { | ||
282 | int const n{ lua_gettop(L) - 2 }; | ||
283 | push_table(L, 1); // ud key ... fifos | ||
284 | // get the fifo associated to this key in this linda, create it if it doesn't exist | ||
285 | lua_pushvalue(L, 2); // ud key ... fifos key | ||
286 | lua_rawget(L, -2); // ud key ... fifos fifo | ||
287 | if( lua_isnil(L, -1)) | ||
288 | { | ||
289 | lua_pop(L, 1); // ud key ... fifos | ||
290 | std::ignore = fifo_new(L); // ud key ... fifos fifo | ||
291 | lua_pushvalue(L, 2); // ud key ... fifos fifo key | ||
292 | lua_pushvalue(L, -2); // ud key ... fifos fifo key fifo | ||
293 | lua_rawset(L, -4); // ud key ... fifos fifo | ||
294 | } | ||
295 | lua_remove(L, -2); // ud key ... fifo | ||
296 | keeper_fifo* fifo{ keeper_fifo::getPtr(L, -1) }; | ||
297 | if (fifo->limit >= 0 && fifo->count + n > fifo->limit) | ||
298 | { | ||
299 | lua_settop(L, 0); // | ||
300 | lua_pushboolean(L, 0); // false | ||
301 | } | ||
302 | else | ||
303 | { | ||
304 | fifo = prepare_fifo_access(L, -1); // ud fifotbl | ||
305 | lua_replace(L, 2); // ud fifotbl ... | ||
306 | fifo_push(L, fifo, n); // ud fifotbl | ||
307 | lua_settop(L, 0); // | ||
308 | lua_pushboolean(L, 1); // true | ||
309 | } | ||
310 | return 1; | ||
311 | } | ||
312 | |||
313 | // ################################################################################################## | ||
314 | |||
315 | // in: linda_ud, key [, key]? | ||
316 | // out: (key, val) or nothing | ||
317 | int keepercall_receive(lua_State* L) | ||
318 | { | ||
319 | int const top{ lua_gettop(L) }; | ||
320 | push_table(L, 1); // ud keys fifos | ||
321 | lua_replace(L, 1); // fifos keys | ||
322 | for (int i = 2; i <= top; ++i) | ||
323 | { | ||
324 | lua_pushvalue(L, i); // fifos keys key[i] | ||
325 | lua_rawget(L, 1); // fifos keys fifo | ||
326 | keeper_fifo* const fifo{ prepare_fifo_access(L, -1) }; // fifos keys fifotbl | ||
327 | if (fifo != nullptr && fifo->count > 0) | ||
328 | { | ||
329 | fifo_pop(L, fifo, 1); // fifos keys val | ||
330 | if (!lua_isnil(L, -1)) | ||
331 | { | ||
332 | lua_replace(L, 1); // val keys | ||
333 | lua_settop(L, i); // val keys key[i] | ||
334 | if (i != 2) | ||
335 | { | ||
336 | lua_replace(L, 2); // val key keys | ||
337 | lua_settop(L, 2); // val key | ||
338 | } | ||
339 | lua_insert(L, 1); // key, val | ||
340 | return 2; | ||
341 | } | ||
342 | } | ||
343 | lua_settop(L, top); // data keys | ||
344 | } | ||
345 | // nothing to receive | ||
346 | return 0; | ||
347 | } | ||
348 | |||
349 | // ################################################################################################## | ||
350 | |||
351 | // in: linda_ud key mincount [maxcount] | ||
352 | int keepercall_receive_batched(lua_State* L) | ||
353 | { | ||
354 | int const min_count{ static_cast<int>(lua_tointeger(L, 3)) }; | ||
355 | if( min_count > 0) | ||
356 | { | ||
357 | int const max_count{ static_cast<int>(luaL_optinteger(L, 4, min_count)) }; | ||
358 | lua_settop(L, 2); // ud key | ||
359 | lua_insert(L, 1); // key ud | ||
360 | push_table(L, 2); // key ud fifos | ||
361 | lua_remove(L, 2); // key fifos | ||
362 | lua_pushvalue(L, 1); // key fifos key | ||
363 | lua_rawget(L, 2); // key fifos fifo | ||
364 | lua_remove(L, 2); // key fifo | ||
365 | keeper_fifo* const fifo{ prepare_fifo_access(L, 2) }; // key fifotbl | ||
366 | if( fifo != nullptr && fifo->count >= min_count) | ||
367 | { | ||
368 | fifo_pop(L, fifo, std::min( max_count, fifo->count)); // key ... | ||
369 | } | ||
370 | else | ||
371 | { | ||
372 | lua_settop(L, 0); // | ||
373 | } | ||
374 | return lua_gettop(L); | ||
375 | } | ||
376 | else | ||
377 | { | ||
378 | return 0; | ||
379 | } | ||
380 | } | ||
381 | |||
382 | // ################################################################################################## | ||
383 | |||
384 | // in: linda_ud key n | ||
385 | // out: true or nil | ||
386 | int keepercall_limit(lua_State* L) | ||
387 | { | ||
388 | int const limit{ static_cast<int>(lua_tointeger(L, 3)) }; | ||
389 | push_table(L, 1); // ud key n fifos | ||
390 | lua_replace(L, 1); // fifos key n | ||
391 | lua_pop(L, 1); // fifos key | ||
392 | lua_pushvalue(L, -1); // fifos key key | ||
393 | lua_rawget(L, -3); // fifos key fifo|nil | ||
394 | keeper_fifo* fifo{ keeper_fifo::getPtr(L, -1) }; | ||
395 | if (fifo == nullptr) | ||
396 | { // fifos key nil | ||
397 | lua_pop(L, 1); // fifos key | ||
398 | fifo = fifo_new(L); // fifos key fifo | ||
399 | lua_rawset(L, -3); // fifos | ||
400 | } | ||
401 | // remove any clutter on the stack | ||
402 | lua_settop(L, 0); | ||
403 | // return true if we decide that blocked threads waiting to write on that key should be awakened | ||
404 | // this is the case if we detect the key was full but it is no longer the case | ||
405 | if( | ||
406 | ((fifo->limit >= 0) && (fifo->count >= fifo->limit)) // the key was full if limited and count exceeded the previous limit | ||
407 | && ((limit < 0) || (fifo->count < limit)) // the key is not full if unlimited or count is lower than the new limit | ||
408 | ) | ||
409 | { | ||
410 | lua_pushboolean(L, 1); // true | ||
411 | } | ||
412 | // set the new limit | ||
413 | fifo->limit = limit; | ||
414 | // return 0 or 1 value | ||
415 | return lua_gettop(L); | ||
416 | } | ||
417 | |||
418 | // ################################################################################################## | ||
419 | |||
420 | // in: linda_ud key [[val] ...] | ||
421 | //out: true or nil | ||
422 | int keepercall_set(lua_State* L) | ||
423 | { | ||
424 | bool should_wake_writers{ false }; | ||
425 | STACK_GROW(L, 6); | ||
426 | |||
427 | // retrieve fifos associated with the linda | ||
428 | push_table(L, 1); // ud key [val [, ...]] fifos | ||
429 | lua_replace(L, 1); // fifos key [val [, ...]] | ||
430 | |||
431 | // make sure we have a value on the stack | ||
432 | if (lua_gettop(L) == 2) // fifos key | ||
433 | { | ||
434 | lua_pushvalue(L, -1); // fifos key key | ||
435 | lua_rawget(L, 1); // fifos key fifo|nil | ||
436 | // empty the fifo for the specified key: replace uservalue with a virgin table, reset counters, but leave limit unchanged! | ||
437 | keeper_fifo* const fifo{ keeper_fifo::getPtr(L, -1) }; | ||
438 | if (fifo != nullptr) // might be nullptr if we set a nonexistent key to nil | ||
439 | { // fifos key fifo | ||
440 | if (fifo->limit < 0) // fifo limit value is the default (unlimited): we can totally remove it | ||
441 | { | ||
442 | lua_pop(L, 1); // fifos key | ||
443 | lua_pushnil(L); // fifos key nil | ||
444 | lua_rawset(L, -3); // fifos | ||
445 | } | ||
446 | else | ||
447 | { | ||
448 | // we create room if the fifo was full but it is no longer the case | ||
449 | should_wake_writers = (fifo->limit > 0) && (fifo->count >= fifo->limit); | ||
450 | lua_remove(L, -2); // fifos fifo | ||
451 | lua_newtable(L); // fifos fifo {} | ||
452 | lua_setiuservalue(L, -2, CONTENTS_TABLE); // fifos fifo | ||
453 | fifo->first = 1; | ||
454 | fifo->count = 0; | ||
455 | } | ||
456 | } | ||
457 | } | ||
458 | else // set/replace contents stored at the specified key? | ||
459 | { | ||
460 | int const count{ lua_gettop(L) - 2 }; // number of items we want to store | ||
461 | lua_pushvalue(L, 2); // fifos key [val [, ...]] key | ||
462 | lua_rawget(L, 1); // fifos key [val [, ...]] fifo|nil | ||
463 | keeper_fifo* fifo{ keeper_fifo::getPtr(L, -1) }; | ||
464 | if( fifo == nullptr) // can be nullptr if we store a value at a new key | ||
465 | { // fifos key [val [, ...]] nil | ||
466 | // no need to wake writers in that case, because a writer can't wait on an inexistent key | ||
467 | lua_pop(L, 1); // fifos key [val [, ...]] | ||
468 | std::ignore = fifo_new(L); // fifos key [val [, ...]] fifo | ||
469 | lua_pushvalue(L, 2); // fifos key [val [, ...]] fifo key | ||
470 | lua_pushvalue(L, -2); // fifos key [val [, ...]] fifo key fifo | ||
471 | lua_rawset(L, 1); // fifos key [val [, ...]] fifo | ||
472 | } | ||
473 | else // the fifo exists, we just want to update its contents | ||
474 | { // fifos key [val [, ...]] fifo | ||
475 | // we create room if the fifo was full but it is no longer the case | ||
476 | should_wake_writers = (fifo->limit > 0) && (fifo->count >= fifo->limit) && (count < fifo->limit); | ||
477 | // empty the fifo for the specified key: replace uservalue with a virgin table, reset counters, but leave limit unchanged! | ||
478 | lua_newtable(L); // fifos key [val [, ...]] fifo {} | ||
479 | lua_setiuservalue(L, -2, CONTENTS_TABLE); // fifos key [val [, ...]] fifo | ||
480 | fifo->first = 1; | ||
481 | fifo->count = 0; | ||
482 | } | ||
483 | fifo = prepare_fifo_access(L, -1); // fifos key [val [, ...]] fifotbl | ||
484 | // move the fifo below the values we want to store | ||
485 | lua_insert(L, 3); // fifos key fifotbl [val [, ...]] | ||
486 | fifo_push(L, fifo, count); // fifos key fifotbl | ||
487 | } | ||
488 | return should_wake_writers ? (lua_pushboolean(L, 1), 1) : 0; | ||
489 | } | ||
490 | |||
491 | // ################################################################################################## | ||
492 | |||
493 | // in: linda_ud key [count] | ||
494 | // out: at most <count> values | ||
495 | int keepercall_get(lua_State* L) | ||
496 | { | ||
497 | int count{ 1 }; | ||
498 | if (lua_gettop(L) == 3) // ud key count | ||
499 | { | ||
500 | count = static_cast<int>(lua_tointeger(L, 3)); | ||
501 | lua_pop(L, 1); // ud key | ||
502 | } | ||
503 | push_table(L, 1); // ud key fifos | ||
504 | lua_replace(L, 1); // fifos key | ||
505 | lua_rawget(L, 1); // fifos fifo | ||
506 | keeper_fifo* const fifo{ prepare_fifo_access(L, -1) }; // fifos fifotbl | ||
507 | if (fifo != nullptr && fifo->count > 0) | ||
508 | { | ||
509 | lua_remove(L, 1); // fifotbl | ||
510 | count = std::min(count, fifo->count); | ||
511 | // read <count> value off the fifo | ||
512 | fifo_peek(L, fifo, count); // fifotbl ... | ||
513 | return count; | ||
514 | } | ||
515 | // no fifo was ever registered for this key, or it is empty | ||
516 | return 0; | ||
517 | } | ||
518 | |||
519 | // ################################################################################################## | ||
520 | |||
521 | // in: linda_ud [, key [, ...]] | ||
522 | int keepercall_count(lua_State* L) | ||
523 | { | ||
524 | push_table(L, 1); // ud keys fifos | ||
525 | switch (lua_gettop(L)) | ||
526 | { | ||
527 | // no key is specified: return a table giving the count of all known keys | ||
528 | case 2: // ud fifos | ||
529 | lua_newtable(L); // ud fifos out | ||
530 | lua_replace(L, 1); // out fifos | ||
531 | lua_pushnil(L); // out fifos nil | ||
532 | while (lua_next(L, 2)) // out fifos key fifo | ||
533 | { | ||
534 | keeper_fifo* const fifo{ keeper_fifo::getPtr(L, -1) }; | ||
535 | lua_pop(L, 1); // out fifos key | ||
536 | lua_pushvalue(L, -1); // out fifos key key | ||
537 | lua_pushinteger(L, fifo->count); // out fifos key key count | ||
538 | lua_rawset(L, -5); // out fifos key | ||
539 | } | ||
540 | lua_pop(L, 1); // out | ||
541 | break; | ||
542 | |||
543 | // 1 key is specified: return its count | ||
544 | case 3: // ud key fifos | ||
545 | lua_replace(L, 1); // fifos key | ||
546 | lua_rawget(L, -2); // fifos fifo|nil | ||
547 | if (lua_isnil(L, -1)) // the key is unknown | ||
548 | { // fifos nil | ||
549 | lua_remove(L, -2); // nil | ||
550 | } | ||
551 | else // the key is known | ||
552 | { // fifos fifo | ||
553 | keeper_fifo* const fifo{ keeper_fifo::getPtr(L, -1) }; | ||
554 | lua_pushinteger(L, fifo->count); // fifos fifo count | ||
555 | lua_replace(L, -3); // count fifo | ||
556 | lua_pop(L, 1); // count | ||
557 | } | ||
558 | break; | ||
559 | |||
560 | // a variable number of keys is specified: return a table of their counts | ||
561 | default: // ud keys fifos | ||
562 | lua_newtable(L); // ud keys... fifos out | ||
563 | lua_replace(L, 1); // out keys... fifos | ||
564 | // shifts all keys up in the stack. potentially slow if there are a lot of them, but then it should be bearable | ||
565 | lua_insert(L, 2); // out fifos keys... | ||
566 | while (lua_gettop(L) > 2) | ||
567 | { | ||
568 | lua_pushvalue(L, -1); // out fifos keys... key | ||
569 | lua_rawget(L, 2); // out fifos keys... fifo|nil | ||
570 | keeper_fifo* const fifo{ keeper_fifo::getPtr(L, -1) }; | ||
571 | lua_pop(L, 1); // out fifos keys... | ||
572 | if (fifo != nullptr) // the key is known | ||
573 | { | ||
574 | lua_pushinteger(L, fifo->count); // out fifos keys... count | ||
575 | lua_rawset(L, 1); // out fifos keys... | ||
576 | } | ||
577 | else // the key is unknown | ||
578 | { | ||
579 | lua_pop(L, 1); // out fifos keys... | ||
580 | } | ||
581 | } // all keys are exhausted // out fifos | ||
582 | lua_pop(L, 1); // out | ||
583 | } | ||
584 | ASSERT_L(lua_gettop(L) == 1); | ||
585 | return 1; | ||
586 | } | ||
587 | |||
588 | //################################################################################### | ||
589 | // Keeper API, accessed from linda methods | ||
590 | //################################################################################### | ||
591 | |||
592 | /*---=== Keeper states ===--- | ||
593 | */ | ||
594 | |||
595 | /* | ||
596 | * Pool of keeper states | ||
597 | * | ||
598 | * Access to keeper states is locked (only one OS thread at a time) so the | ||
599 | * bigger the pool, the less chances of unnecessary waits. Lindas map to the | ||
600 | * keepers randomly, by a hash. | ||
601 | */ | ||
602 | |||
603 | // called as __gc for the keepers array userdata | ||
604 | void close_keepers(Universe* U) | ||
605 | { | ||
606 | if (U->keepers != nullptr) | ||
607 | { | ||
608 | int nbKeepers = U->keepers->nb_keepers; | ||
609 | // NOTE: imagine some keeper state N+1 currently holds a linda that uses another keeper N, and a _gc that will make use of it | ||
610 | // when keeper N+1 is closed, object is GCed, linda operation is called, which attempts to acquire keeper N, whose Lua state no longer exists | ||
611 | // in that case, the linda operation should do nothing. which means that these operations must check for keeper acquisition success | ||
612 | // which is early-outed with a U->keepers->nbKeepers null-check | ||
613 | U->keepers->nb_keepers = 0; | ||
614 | for (int i = 0; i < nbKeepers; ++i) | ||
615 | { | ||
616 | lua_State* K = U->keepers->keeper_array[i].L; | ||
617 | U->keepers->keeper_array[i].L = nullptr; | ||
618 | if (K != nullptr) | ||
619 | { | ||
620 | lua_close(K); | ||
621 | } | ||
622 | else | ||
623 | { | ||
624 | // detected partial init: destroy only the mutexes that got initialized properly | ||
625 | nbKeepers = i; | ||
626 | } | ||
627 | } | ||
628 | for (int i = 0; i < nbKeepers; ++i) | ||
629 | { | ||
630 | U->keepers->keeper_array[i].~Keeper(); | ||
631 | } | ||
632 | // free the keeper bookkeeping structure | ||
633 | U->internal_allocator.free(U->keepers, sizeof(Keepers) + (nbKeepers - 1) * sizeof(Keeper)); | ||
634 | U->keepers = nullptr; | ||
635 | } | ||
636 | } | ||
637 | |||
638 | // ################################################################################################## | ||
639 | |||
640 | /* | ||
641 | * Initialize keeper states | ||
642 | * | ||
643 | * If there is a problem, returns nullptr and pushes the error message on the stack | ||
644 | * else returns the keepers bookkeeping structure. | ||
645 | * | ||
646 | * Note: Any problems would be design flaws; the created Lua state is left | ||
647 | * unclosed, because it does not really matter. In production code, this | ||
648 | * function never fails. | ||
649 | * settings table is at position 1 on the stack | ||
650 | */ | ||
651 | void init_keepers(Universe* U, lua_State* L) | ||
652 | { | ||
653 | STACK_CHECK_START_REL(L, 0); // L K | ||
654 | lua_getfield(L, 1, "nb_keepers"); // nb_keepers | ||
655 | int const nb_keepers{ static_cast<int>(lua_tointeger(L, -1)) }; | ||
656 | lua_pop(L, 1); // | ||
657 | if (nb_keepers < 1) | ||
658 | { | ||
659 | luaL_error(L, "Bad number of keepers (%d)", nb_keepers); // doesn't return | ||
660 | } | ||
661 | STACK_CHECK(L, 0); | ||
662 | |||
663 | lua_getfield(L, 1, "keepers_gc_threshold"); // keepers_gc_threshold | ||
664 | int const keepers_gc_threshold{ static_cast<int>(lua_tointeger(L, -1)) }; | ||
665 | lua_pop(L, 1); // | ||
666 | STACK_CHECK(L, 0); | ||
667 | |||
668 | // Keepers contains an array of 1 Keeper, adjust for the actual number of keeper states | ||
669 | { | ||
670 | size_t const bytes = sizeof(Keepers) + (nb_keepers - 1) * sizeof(Keeper); | ||
671 | U->keepers = static_cast<Keepers*>(U->internal_allocator.alloc(bytes)); | ||
672 | if (U->keepers == nullptr) | ||
673 | { | ||
674 | luaL_error(L, "init_keepers() failed while creating keeper array; out of memory"); // doesn't return | ||
675 | } | ||
676 | U->keepers->Keepers::Keepers(); | ||
677 | U->keepers->gc_threshold = keepers_gc_threshold; | ||
678 | U->keepers->nb_keepers = nb_keepers; | ||
679 | |||
680 | for (int i = 0; i < nb_keepers; ++i) | ||
681 | { | ||
682 | U->keepers->keeper_array[i].Keeper::Keeper(); | ||
683 | } | ||
684 | } | ||
685 | for (int i = 0; i < nb_keepers; ++i) // keepersUD | ||
686 | { | ||
687 | // note that we will leak K if we raise an error later | ||
688 | lua_State* const K{ create_state(U, L) }; | ||
689 | if (K == nullptr) | ||
690 | { | ||
691 | luaL_error(L, "init_keepers() failed while creating keeper states; out of memory"); // doesn't return | ||
692 | } | ||
693 | |||
694 | U->keepers->keeper_array[i].L = K; | ||
695 | |||
696 | if (U->keepers->gc_threshold >= 0) | ||
697 | { | ||
698 | lua_gc(K, LUA_GCSTOP, 0); | ||
699 | } | ||
700 | |||
701 | STACK_CHECK_START_ABS(K, 0); | ||
702 | |||
703 | // copy the universe pointer in the keeper itself | ||
704 | universe_store(K, U); | ||
705 | STACK_CHECK(K, 0); | ||
706 | |||
707 | // make sure 'package' is initialized in keeper states, so that we have require() | ||
708 | // this because this is needed when transferring deep userdata object | ||
709 | luaL_requiref(K, "package", luaopen_package, 1); // package | ||
710 | lua_pop(K, 1); // | ||
711 | STACK_CHECK(K, 0); | ||
712 | serialize_require(DEBUGSPEW_PARAM_COMMA(U) K); | ||
713 | STACK_CHECK(K, 0); | ||
714 | |||
715 | // copy package.path and package.cpath from the source state | ||
716 | lua_getglobal(L, "package"); // "..." keepersUD package | ||
717 | if (!lua_isnil(L, -1)) | ||
718 | { | ||
719 | // when copying with mode LookupMode::ToKeeper, error message is pushed at the top of the stack, not raised immediately | ||
720 | if (luaG_inter_copy_package(U, Source{ L }, Dest{ K }, -1, LookupMode::ToKeeper) != InterCopyResult::Success) | ||
721 | { | ||
722 | // if something went wrong, the error message is at the top of the stack | ||
723 | lua_remove(L, -2); // error_msg | ||
724 | raise_lua_error(L); | ||
725 | } | ||
726 | } | ||
727 | lua_pop(L, 1); // | ||
728 | STACK_CHECK(L, 0); | ||
729 | |||
730 | // attempt to call on_state_create(), if we have one and it is a C function | ||
731 | // (only support a C function because we can't transfer executable Lua code in keepers) | ||
732 | // will raise an error in L in case of problem | ||
733 | call_on_state_create(U, K, L, LookupMode::ToKeeper); | ||
734 | |||
735 | // to see VM name in Decoda debugger | ||
736 | lua_pushfstring(K, "Keeper #%d", i + 1); // "Keeper #n" | ||
737 | lua_setglobal(K, "decoda_name"); // | ||
738 | // create the fifos table in the keeper state | ||
739 | FIFOS_KEY.setValue(K, [](lua_State* L) { lua_newtable(L); }); | ||
740 | STACK_CHECK(K, 0); | ||
741 | } | ||
742 | STACK_CHECK(L, 0); | ||
743 | } | ||
744 | |||
745 | // ################################################################################################## | ||
746 | |||
747 | // should be called only when inside a keeper_acquire/keeper_release pair (see linda_protected_call) | ||
748 | Keeper* which_keeper(Keepers* keepers_, uintptr_t magic_) | ||
749 | { | ||
750 | int const nbKeepers{ keepers_->nb_keepers }; | ||
751 | if (nbKeepers) | ||
752 | { | ||
753 | unsigned int i = (unsigned int) ((magic_ >> KEEPER_MAGIC_SHIFT) % nbKeepers); | ||
754 | return &keepers_->keeper_array[i]; | ||
755 | } | ||
756 | return nullptr; | ||
757 | } | ||
758 | |||
759 | // ################################################################################################## | ||
760 | |||
761 | Keeper* keeper_acquire(Keepers* keepers_, uintptr_t magic_) | ||
762 | { | ||
763 | int const nbKeepers{ keepers_->nb_keepers }; | ||
764 | // can be 0 if this happens during main state shutdown (lanes is being GC'ed -> no keepers) | ||
765 | if (nbKeepers) | ||
766 | { | ||
767 | /* | ||
768 | * Any hashing will do that maps pointers to 0..GNbKeepers-1 | ||
769 | * consistently. | ||
770 | * | ||
771 | * Pointers are often aligned by 8 or so - ignore the low order bits | ||
772 | * have to cast to unsigned long to avoid compilation warnings about loss of data when converting pointer-to-integer | ||
773 | */ | ||
774 | unsigned int i = (unsigned int)((magic_ >> KEEPER_MAGIC_SHIFT) % nbKeepers); | ||
775 | Keeper* K = &keepers_->keeper_array[i]; | ||
776 | K->m_mutex.lock(); | ||
777 | //++ K->count; | ||
778 | return K; | ||
779 | } | ||
780 | return nullptr; | ||
781 | } | ||
782 | |||
783 | // ################################################################################################## | ||
784 | |||
785 | void keeper_release(Keeper* K) | ||
786 | { | ||
787 | //-- K->count; | ||
788 | if (K) | ||
789 | { | ||
790 | K->m_mutex.unlock(); | ||
791 | } | ||
792 | } | ||
793 | |||
794 | // ################################################################################################## | ||
795 | |||
796 | void keeper_toggle_nil_sentinels(lua_State* L, int val_i_, LookupMode const mode_) | ||
797 | { | ||
798 | int const n{ lua_gettop(L) }; | ||
799 | for (int i = val_i_; i <= n; ++i) | ||
800 | { | ||
801 | if (mode_ == LookupMode::ToKeeper) | ||
802 | { | ||
803 | if (lua_isnil(L, i)) | ||
804 | { | ||
805 | NIL_SENTINEL.pushKey(L); | ||
806 | lua_replace(L, i); | ||
807 | } | ||
808 | } | ||
809 | else | ||
810 | { | ||
811 | if (NIL_SENTINEL.equals(L, i)) | ||
812 | { | ||
813 | lua_pushnil(L); | ||
814 | lua_replace(L, i); | ||
815 | } | ||
816 | } | ||
817 | } | ||
818 | } | ||
819 | |||
820 | // ################################################################################################## | ||
821 | |||
822 | /* | ||
823 | * Call a function ('func_name') in the keeper state, and pass on the returned | ||
824 | * values to 'L'. | ||
825 | * | ||
826 | * 'linda': deep Linda pointer (used only as a unique table key, first parameter) | ||
827 | * 'starting_index': first of the rest of parameters (none if 0) | ||
828 | * | ||
829 | * Returns: number of return values (pushed to 'L') or -1 in case of error | ||
830 | */ | ||
831 | int keeper_call(Universe* U, lua_State* K, keeper_api_t func_, lua_State* L, void* linda, int starting_index) | ||
832 | { | ||
833 | int const args{ starting_index ? (lua_gettop(L) - starting_index + 1) : 0 }; | ||
834 | int const Ktos{ lua_gettop(K) }; | ||
835 | int retvals = -1; | ||
836 | |||
837 | STACK_GROW(K, 2); | ||
838 | |||
839 | PUSH_KEEPER_FUNC(K, func_); | ||
840 | |||
841 | lua_pushlightuserdata(K, linda); | ||
842 | |||
843 | if ((args == 0) || luaG_inter_copy(U, Source{ L }, Dest{ K }, args, LookupMode::ToKeeper) == InterCopyResult::Success) // L->K | ||
844 | { | ||
845 | lua_call(K, 1 + args, LUA_MULTRET); | ||
846 | retvals = lua_gettop(K) - Ktos; | ||
847 | // note that this can raise a luaL_error while the keeper state (and its mutex) is acquired | ||
848 | // this may interrupt a lane, causing the destruction of the underlying OS thread | ||
849 | // after this, another lane making use of this keeper can get an error code from the mutex-locking function | ||
850 | // when attempting to grab the mutex again (WINVER <= 0x400 does this, but locks just fine, I don't know about pthread) | ||
851 | if ((retvals > 0) && luaG_inter_move(U, Source{ K }, Dest{ L }, retvals, LookupMode::FromKeeper) != InterCopyResult::Success) // K->L | ||
852 | { | ||
853 | retvals = -1; | ||
854 | } | ||
855 | } | ||
856 | // whatever happens, restore the stack to where it was at the origin | ||
857 | lua_settop(K, Ktos); | ||
858 | |||
859 | // don't do this for this particular function, as it is only called during Linda destruction, and we don't want to raise an error, ever | ||
860 | if (func_ != KEEPER_API(clear)) [[unlikely]] | ||
861 | { | ||
862 | // since keeper state GC is stopped, let's run a step once in a while if required | ||
863 | int const gc_threshold{ U->keepers->gc_threshold }; | ||
864 | if (gc_threshold == 0) [[unlikely]] | ||
865 | { | ||
866 | lua_gc(K, LUA_GCSTEP, 0); | ||
867 | } | ||
868 | else if (gc_threshold > 0) [[likely]] | ||
869 | { | ||
870 | int const gc_usage{ lua_gc(K, LUA_GCCOUNT, 0) }; | ||
871 | if (gc_usage >= gc_threshold) | ||
872 | { | ||
873 | lua_gc(K, LUA_GCCOLLECT, 0); | ||
874 | int const gc_usage_after{ lua_gc(K, LUA_GCCOUNT, 0) }; | ||
875 | if (gc_usage_after > gc_threshold) [[unlikely]] | ||
876 | { | ||
877 | luaL_error(L, "Keeper GC threshold is too low, need at least %d", gc_usage_after); | ||
878 | } | ||
879 | } | ||
880 | } | ||
881 | } | ||
882 | |||
883 | return retvals; | ||
884 | } | ||
diff --git a/src/keeper.h b/src/keeper.h index 7c55809..627c7ea 100644 --- a/src/keeper.h +++ b/src/keeper.h | |||
@@ -1,58 +1,60 @@ | |||
1 | #if !defined( __keeper_h__) | 1 | #pragma once |
2 | #define __keeper_h__ 1 | ||
3 | 2 | ||
3 | #ifdef __cplusplus | ||
4 | extern "C" { | ||
5 | #endif // __cplusplus | ||
4 | #include "lua.h" | 6 | #include "lua.h" |
7 | #ifdef __cplusplus | ||
8 | } | ||
9 | #endif // __cplusplus | ||
10 | |||
5 | #include "threading.h" | 11 | #include "threading.h" |
6 | #include "uniquekey.h" | 12 | #include "uniquekey.h" |
7 | 13 | ||
14 | #include <mutex> | ||
15 | |||
8 | // forwards | 16 | // forwards |
9 | struct s_Universe; | 17 | enum class LookupMode; |
10 | typedef struct s_Universe Universe; | 18 | class Universe; |
11 | enum eLookupMode; | ||
12 | typedef enum eLookupMode LookupMode; | ||
13 | 19 | ||
14 | struct s_Keeper | 20 | struct Keeper |
15 | { | 21 | { |
16 | MUTEX_T keeper_cs; | 22 | std::mutex m_mutex; |
17 | lua_State* L; | 23 | lua_State* L{ nullptr }; |
18 | //int count; | 24 | // int count; |
19 | }; | 25 | }; |
20 | typedef struct s_Keeper Keeper; | ||
21 | 26 | ||
22 | struct s_Keepers | 27 | struct Keepers |
23 | { | 28 | { |
24 | int gc_threshold; | 29 | int gc_threshold{ 0 }; |
25 | int nb_keepers; | 30 | int nb_keepers{ 0 }; |
26 | Keeper keeper_array[1]; | 31 | Keeper keeper_array[1]; |
27 | }; | 32 | }; |
28 | typedef struct s_Keepers Keepers; | ||
29 | 33 | ||
30 | void init_keepers( Universe* U, lua_State* L); | 34 | static constexpr uintptr_t KEEPER_MAGIC_SHIFT{ 3 }; |
31 | void close_keepers( Universe* U); | 35 | // crc64/we of string "NIL_SENTINEL" generated at http://www.nitrxgen.net/hashgen/ |
36 | static constexpr UniqueKey NIL_SENTINEL{ 0x7eaafa003a1d11a1ull }; | ||
32 | 37 | ||
33 | Keeper* which_keeper( Keepers* keepers_, uintptr_t magic_); | 38 | void init_keepers(Universe* U, lua_State* L); |
34 | Keeper* keeper_acquire( Keepers* keepers_, uintptr_t magic_); | 39 | void close_keepers(Universe* U); |
35 | #define KEEPER_MAGIC_SHIFT 3 | ||
36 | void keeper_release( Keeper* K_); | ||
37 | void keeper_toggle_nil_sentinels( lua_State* L, int val_i_, LookupMode const mode_); | ||
38 | int keeper_push_linda_storage( Universe* U, lua_State* L, void* ptr_, uintptr_t magic_); | ||
39 | 40 | ||
40 | // crc64/we of string "NIL_SENTINEL" generated at http://www.nitrxgen.net/hashgen/ | 41 | [[nodiscard]] Keeper* which_keeper(Keepers* keepers_, uintptr_t magic_); |
41 | static DECLARE_CONST_UNIQUE_KEY( NIL_SENTINEL, 0x7eaafa003a1d11a1); | 42 | [[nodiscard]] Keeper* keeper_acquire(Keepers* keepers_, uintptr_t magic_); |
43 | void keeper_release(Keeper* K_); | ||
44 | void keeper_toggle_nil_sentinels(lua_State* L, int val_i_, LookupMode const mode_); | ||
45 | [[nodiscard]] int keeper_push_linda_storage(Universe* U, Dest L, void* ptr_, uintptr_t magic_); | ||
42 | 46 | ||
43 | typedef lua_CFunction keeper_api_t; | 47 | using keeper_api_t = lua_CFunction; |
44 | #define KEEPER_API( _op) keepercall_ ## _op | 48 | #define KEEPER_API(_op) keepercall_##_op |
45 | #define PUSH_KEEPER_FUNC lua_pushcfunction | 49 | #define PUSH_KEEPER_FUNC lua_pushcfunction |
46 | // lua_Cfunctions to run inside a keeper state (formerly implemented in Lua) | 50 | // lua_Cfunctions to run inside a keeper state |
47 | int keepercall_clear( lua_State* L); | 51 | [[nodiscard]] int keepercall_clear(lua_State* L); |
48 | int keepercall_send( lua_State* L); | 52 | [[nodiscard]] int keepercall_send(lua_State* L); |
49 | int keepercall_receive( lua_State* L); | 53 | [[nodiscard]] int keepercall_receive(lua_State* L); |
50 | int keepercall_receive_batched( lua_State* L); | 54 | [[nodiscard]] int keepercall_receive_batched(lua_State* L); |
51 | int keepercall_limit( lua_State* L); | 55 | [[nodiscard]] int keepercall_limit(lua_State* L); |
52 | int keepercall_get( lua_State* L); | 56 | [[nodiscard]] int keepercall_get(lua_State* L); |
53 | int keepercall_set( lua_State* L); | 57 | [[nodiscard]] int keepercall_set(lua_State* L); |
54 | int keepercall_count( lua_State* L); | 58 | [[nodiscard]] int keepercall_count(lua_State* L); |
55 | 59 | ||
56 | int keeper_call( Universe* U, lua_State* K, keeper_api_t _func, lua_State* L, void* linda, uint_t starting_index); | 60 | [[nodiscard]] int keeper_call(Universe* U, lua_State* K, keeper_api_t _func, lua_State* L, void* linda, int starting_index); |
57 | |||
58 | #endif // __keeper_h__ \ No newline at end of file | ||
diff --git a/src/lanes.c b/src/lanes.c deleted file mode 100644 index ca2b53a..0000000 --- a/src/lanes.c +++ /dev/null | |||
@@ -1,2147 +0,0 @@ | |||
1 | /* | ||
2 | * LANES.C Copyright (c) 2007-08, Asko Kauppi | ||
3 | * Copyright (C) 2009-19, Benoit Germain | ||
4 | * | ||
5 | * Multithreading in Lua. | ||
6 | * | ||
7 | * History: | ||
8 | * See CHANGES | ||
9 | * | ||
10 | * Platforms (tested internally): | ||
11 | * OS X (10.5.7 PowerPC/Intel) | ||
12 | * Linux x86 (Ubuntu 8.04) | ||
13 | * Win32 (Windows XP Home SP2, Visual C++ 2005/2008 Express) | ||
14 | * | ||
15 | * Platforms (tested externally): | ||
16 | * Win32 (MSYS) by Ross Berteig. | ||
17 | * | ||
18 | * Platforms (testers appreciated): | ||
19 | * Win64 - should work??? | ||
20 | * Linux x64 - should work | ||
21 | * FreeBSD - should work | ||
22 | * QNX - porting shouldn't be hard | ||
23 | * Sun Solaris - porting shouldn't be hard | ||
24 | * | ||
25 | * References: | ||
26 | * "Porting multithreaded applications from Win32 to Mac OS X": | ||
27 | * <http://developer.apple.com/macosx/multithreadedprogramming.html> | ||
28 | * | ||
29 | * Pthreads: | ||
30 | * <http://vergil.chemistry.gatech.edu/resources/programming/threads.html> | ||
31 | * | ||
32 | * MSDN: <http://msdn2.microsoft.com/en-us/library/ms686679.aspx> | ||
33 | * | ||
34 | * <http://ridiculousfish.com/blog/archives/2007/02/17/barrier> | ||
35 | * | ||
36 | * Defines: | ||
37 | * -DLINUX_SCHED_RR: all threads are lifted to SCHED_RR category, to | ||
38 | * allow negative priorities [-3,-1] be used. Even without this, | ||
39 | * using priorities will require 'sudo' privileges on Linux. | ||
40 | * | ||
41 | * -DUSE_PTHREAD_TIMEDJOIN: use 'pthread_timedjoin_np()' for waiting | ||
42 | * for threads with a timeout. This changes the thread cleanup | ||
43 | * mechanism slightly (cleans up at the join, not once the thread | ||
44 | * has finished). May or may not be a good idea to use it. | ||
45 | * Available only in selected operating systems (Linux). | ||
46 | * | ||
47 | * Bugs: | ||
48 | * | ||
49 | * To-do: | ||
50 | * | ||
51 | * Make waiting threads cancellable. | ||
52 | * ... | ||
53 | */ | ||
54 | |||
55 | /* | ||
56 | =============================================================================== | ||
57 | |||
58 | Copyright (C) 2007-10 Asko Kauppi <akauppi@gmail.com> | ||
59 | 2011-19 Benoit Germain <bnt.germain@gmail.com> | ||
60 | |||
61 | Permission is hereby granted, free of charge, to any person obtaining a copy | ||
62 | of this software and associated documentation files (the "Software"), to deal | ||
63 | in the Software without restriction, including without limitation the rights | ||
64 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||
65 | copies of the Software, and to permit persons to whom the Software is | ||
66 | furnished to do so, subject to the following conditions: | ||
67 | |||
68 | The above copyright notice and this permission notice shall be included in | ||
69 | all copies or substantial portions of the Software. | ||
70 | |||
71 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
72 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
73 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
74 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
75 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
76 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | ||
77 | THE SOFTWARE. | ||
78 | |||
79 | =============================================================================== | ||
80 | */ | ||
81 | |||
82 | #include <string.h> | ||
83 | #include <stdio.h> | ||
84 | #include <stdlib.h> | ||
85 | #include <ctype.h> | ||
86 | #include <assert.h> | ||
87 | |||
88 | #include "lanes.h" | ||
89 | #include "threading.h" | ||
90 | #include "compat.h" | ||
91 | #include "tools.h" | ||
92 | #include "state.h" | ||
93 | #include "universe.h" | ||
94 | #include "keeper.h" | ||
95 | #include "lanes_private.h" | ||
96 | |||
97 | #if !(defined( PLATFORM_XBOX) || defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC)) | ||
98 | # include <sys/time.h> | ||
99 | #endif | ||
100 | |||
101 | /* geteuid() */ | ||
102 | #ifdef PLATFORM_LINUX | ||
103 | # include <unistd.h> | ||
104 | # include <sys/types.h> | ||
105 | #endif | ||
106 | |||
107 | /* Do you want full call stacks, or just the line where the error happened? | ||
108 | * | ||
109 | * TBD: The full stack feature does not seem to work (try 'make error'). | ||
110 | */ | ||
111 | #define ERROR_FULL_STACK 1 // must be either 0 or 1 as we do some index arithmetics with it! | ||
112 | |||
113 | // intern the debug name in the specified lua state so that the pointer remains valid when the lane's state is closed | ||
114 | static void securize_debug_threadname( lua_State* L, Lane* s) | ||
115 | { | ||
116 | STACK_CHECK( L, 0); | ||
117 | STACK_GROW( L, 3); | ||
118 | lua_getiuservalue( L, 1, 1); | ||
119 | lua_newtable( L); | ||
120 | // Lua 5.1 can't do 's->debug_name = lua_pushstring( L, s->debug_name);' | ||
121 | lua_pushstring( L, s->debug_name); | ||
122 | s->debug_name = lua_tostring( L, -1); | ||
123 | lua_rawset( L, -3); | ||
124 | lua_pop( L, 1); | ||
125 | STACK_END( L, 0); | ||
126 | } | ||
127 | |||
128 | #if ERROR_FULL_STACK | ||
129 | static int lane_error( lua_State* L); | ||
130 | // crc64/we of string "STACKTRACE_REGKEY" generated at http://www.nitrxgen.net/hashgen/ | ||
131 | static DECLARE_CONST_UNIQUE_KEY( STACKTRACE_REGKEY, 0x534af7d3226a429f); | ||
132 | #endif // ERROR_FULL_STACK | ||
133 | |||
134 | /* | ||
135 | * registry[FINALIZER_REG_KEY] is either nil (no finalizers) or a table | ||
136 | * of functions that Lanes will call after the executing 'pcall' has ended. | ||
137 | * | ||
138 | * We're NOT using the GC system for finalizer mainly because providing the | ||
139 | * error (and maybe stack trace) parameters to the finalizer functions would | ||
140 | * anyways complicate that approach. | ||
141 | */ | ||
142 | // crc64/we of string "FINALIZER_REGKEY" generated at http://www.nitrxgen.net/hashgen/ | ||
143 | static DECLARE_CONST_UNIQUE_KEY( FINALIZER_REGKEY, 0x188fccb8bf348e09); | ||
144 | |||
145 | struct s_Linda; | ||
146 | |||
147 | /* | ||
148 | * Push a table stored in registry onto Lua stack. | ||
149 | * | ||
150 | * If there is no existing table, create one if 'create' is TRUE. | ||
151 | * | ||
152 | * Returns: TRUE if a table was pushed | ||
153 | * FALSE if no table found, not created, and nothing pushed | ||
154 | */ | ||
155 | static bool_t push_registry_table( lua_State* L, UniqueKey key, bool_t create) | ||
156 | { | ||
157 | STACK_GROW( L, 3); | ||
158 | STACK_CHECK( L, 0); | ||
159 | |||
160 | REGISTRY_GET( L, key); // ? | ||
161 | if( lua_isnil( L, -1)) // nil? | ||
162 | { | ||
163 | lua_pop( L, 1); // | ||
164 | |||
165 | if( !create) | ||
166 | { | ||
167 | return FALSE; | ||
168 | } | ||
169 | |||
170 | lua_newtable( L); // t | ||
171 | REGISTRY_SET( L, key, lua_pushvalue( L, -2)); | ||
172 | } | ||
173 | STACK_END( L, 1); | ||
174 | return TRUE; // table pushed | ||
175 | } | ||
176 | |||
177 | #if HAVE_LANE_TRACKING() | ||
178 | |||
179 | // The chain is ended by '(Lane*)(-1)', not NULL: | ||
180 | // 'tracking_first -> ... -> ... -> (-1)' | ||
181 | #define TRACKING_END ((Lane *)(-1)) | ||
182 | |||
183 | /* | ||
184 | * Add the lane to tracking chain; the ones still running at the end of the | ||
185 | * whole process will be cancelled. | ||
186 | */ | ||
187 | static void tracking_add( Lane* s) | ||
188 | { | ||
189 | |||
190 | MUTEX_LOCK( &s->U->tracking_cs); | ||
191 | { | ||
192 | assert( s->tracking_next == NULL); | ||
193 | |||
194 | s->tracking_next = s->U->tracking_first; | ||
195 | s->U->tracking_first = s; | ||
196 | } | ||
197 | MUTEX_UNLOCK( &s->U->tracking_cs); | ||
198 | } | ||
199 | |||
200 | /* | ||
201 | * A free-running lane has ended; remove it from tracking chain | ||
202 | */ | ||
203 | static bool_t tracking_remove( Lane* s) | ||
204 | { | ||
205 | bool_t found = FALSE; | ||
206 | MUTEX_LOCK( &s->U->tracking_cs); | ||
207 | { | ||
208 | // Make sure (within the MUTEX) that we actually are in the chain | ||
209 | // still (at process exit they will remove us from chain and then | ||
210 | // cancel/kill). | ||
211 | // | ||
212 | if( s->tracking_next != NULL) | ||
213 | { | ||
214 | Lane** ref = (Lane**) &s->U->tracking_first; | ||
215 | |||
216 | while( *ref != TRACKING_END) | ||
217 | { | ||
218 | if( *ref == s) | ||
219 | { | ||
220 | *ref = s->tracking_next; | ||
221 | s->tracking_next = NULL; | ||
222 | found = TRUE; | ||
223 | break; | ||
224 | } | ||
225 | ref = (Lane**) &((*ref)->tracking_next); | ||
226 | } | ||
227 | assert( found); | ||
228 | } | ||
229 | } | ||
230 | MUTEX_UNLOCK( &s->U->tracking_cs); | ||
231 | return found; | ||
232 | } | ||
233 | |||
234 | #endif // HAVE_LANE_TRACKING() | ||
235 | |||
236 | //--- | ||
237 | // low-level cleanup | ||
238 | |||
239 | static void lane_cleanup( Lane* s) | ||
240 | { | ||
241 | // Clean up after a (finished) thread | ||
242 | // | ||
243 | #if THREADWAIT_METHOD == THREADWAIT_CONDVAR | ||
244 | SIGNAL_FREE( &s->done_signal); | ||
245 | MUTEX_FREE( &s->done_lock); | ||
246 | #endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR | ||
247 | |||
248 | #if HAVE_LANE_TRACKING() | ||
249 | if( s->U->tracking_first != NULL) | ||
250 | { | ||
251 | // Lane was cleaned up, no need to handle at process termination | ||
252 | tracking_remove( s); | ||
253 | } | ||
254 | #endif // HAVE_LANE_TRACKING() | ||
255 | |||
256 | { | ||
257 | AllocatorDefinition* const allocD = &s->U->internal_allocator; | ||
258 | (void) allocD->allocF(allocD->allocUD, s, sizeof(Lane), 0); | ||
259 | } | ||
260 | } | ||
261 | |||
262 | /* | ||
263 | * ############################################################################################### | ||
264 | * ########################################## Finalizer ########################################## | ||
265 | * ############################################################################################### | ||
266 | */ | ||
267 | |||
268 | //--- | ||
269 | // void= finalizer( finalizer_func ) | ||
270 | // | ||
271 | // finalizer_func( [err, stack_tbl] ) | ||
272 | // | ||
273 | // Add a function that will be called when exiting the lane, either via | ||
274 | // normal return or an error. | ||
275 | // | ||
276 | LUAG_FUNC( set_finalizer) | ||
277 | { | ||
278 | luaL_argcheck( L, lua_isfunction( L, 1), 1, "finalizer should be a function"); | ||
279 | luaL_argcheck( L, lua_gettop( L) == 1, 1, "too many arguments"); | ||
280 | // Get the current finalizer table (if any) | ||
281 | push_registry_table( L, FINALIZER_REGKEY, TRUE /*do create if none*/); // finalizer {finalisers} | ||
282 | STACK_GROW( L, 2); | ||
283 | lua_pushinteger( L, lua_rawlen( L, -1) + 1); // finalizer {finalisers} idx | ||
284 | lua_pushvalue( L, 1); // finalizer {finalisers} idx finalizer | ||
285 | lua_rawset( L, -3); // finalizer {finalisers} | ||
286 | lua_pop( L, 2); // | ||
287 | return 0; | ||
288 | } | ||
289 | |||
290 | |||
291 | //--- | ||
292 | // Run finalizers - if any - with the given parameters | ||
293 | // | ||
294 | // If 'rc' is nonzero, error message and stack index (the latter only when ERROR_FULL_STACK == 1) are available as: | ||
295 | // [-1]: stack trace (table) | ||
296 | // [-2]: error message (any type) | ||
297 | // | ||
298 | // Returns: | ||
299 | // 0 if finalizers were run without error (or there were none) | ||
300 | // LUA_ERRxxx return code if any of the finalizers failed | ||
301 | // | ||
302 | // TBD: should we add stack trace on failing finalizer, wouldn't be hard.. | ||
303 | // | ||
304 | static void push_stack_trace( lua_State* L, int rc_, int stk_base_); | ||
305 | |||
306 | static int run_finalizers( lua_State* L, int lua_rc) | ||
307 | { | ||
308 | int finalizers_index; | ||
309 | int n; | ||
310 | int err_handler_index = 0; | ||
311 | int rc = LUA_OK; // ... | ||
312 | if( !push_registry_table( L, FINALIZER_REGKEY, FALSE)) // ... finalizers? | ||
313 | { | ||
314 | return 0; // no finalizers | ||
315 | } | ||
316 | |||
317 | STACK_GROW( L, 5); | ||
318 | |||
319 | finalizers_index = lua_gettop( L); | ||
320 | |||
321 | #if ERROR_FULL_STACK | ||
322 | lua_pushcfunction( L, lane_error); // ... finalizers lane_error | ||
323 | err_handler_index = lua_gettop( L); | ||
324 | #endif // ERROR_FULL_STACK | ||
325 | |||
326 | for( n = (int) lua_rawlen( L, finalizers_index); n > 0; -- n) | ||
327 | { | ||
328 | int args = 0; | ||
329 | lua_pushinteger( L, n); // ... finalizers lane_error n | ||
330 | lua_rawget( L, finalizers_index); // ... finalizers lane_error finalizer | ||
331 | ASSERT_L( lua_isfunction( L, -1)); | ||
332 | if( lua_rc != LUA_OK) // we have an error message and an optional stack trace at the bottom of the stack | ||
333 | { | ||
334 | ASSERT_L( finalizers_index == 2 || finalizers_index == 3); | ||
335 | //char const* err_msg = lua_tostring( L, 1); | ||
336 | lua_pushvalue( L, 1); // ... finalizers lane_error finalizer err_msg | ||
337 | // note we don't always have a stack trace for example when CANCEL_ERROR, or when we got an error that doesn't call our handler, such as LUA_ERRMEM | ||
338 | if( finalizers_index == 3) | ||
339 | { | ||
340 | lua_pushvalue( L, 2); // ... finalizers lane_error finalizer err_msg stack_trace | ||
341 | } | ||
342 | args = finalizers_index - 1; | ||
343 | } | ||
344 | |||
345 | // if no error from the main body, finalizer doesn't receive any argument, else it gets the error message and optional stack trace | ||
346 | rc = lua_pcall( L, args, 0, err_handler_index); // ... finalizers lane_error err_msg2? | ||
347 | if( rc != LUA_OK) | ||
348 | { | ||
349 | push_stack_trace( L, rc, lua_gettop( L)); | ||
350 | // If one finalizer fails, don't run the others. Return this | ||
351 | // as the 'real' error, replacing what we could have had (or not) | ||
352 | // from the actual code. | ||
353 | break; | ||
354 | } | ||
355 | // no error, proceed to next finalizer // ... finalizers lane_error | ||
356 | } | ||
357 | |||
358 | if( rc != LUA_OK) | ||
359 | { | ||
360 | // ERROR_FULL_STACK accounts for the presence of lane_error on the stack | ||
361 | int nb_err_slots = lua_gettop( L) - finalizers_index - ERROR_FULL_STACK; | ||
362 | // a finalizer generated an error, this is what we leave of the stack | ||
363 | for( n = nb_err_slots; n > 0; -- n) | ||
364 | { | ||
365 | lua_replace( L, n); | ||
366 | } | ||
367 | // leave on the stack only the error and optional stack trace produced by the error in the finalizer | ||
368 | lua_settop( L, nb_err_slots); | ||
369 | } | ||
370 | else // no error from the finalizers, make sure only the original return values from the lane body remain on the stack | ||
371 | { | ||
372 | lua_settop( L, finalizers_index - 1); | ||
373 | } | ||
374 | |||
375 | return rc; | ||
376 | } | ||
377 | |||
378 | /* | ||
379 | * ############################################################################################### | ||
380 | * ########################################### Threads ########################################### | ||
381 | * ############################################################################################### | ||
382 | */ | ||
383 | |||
384 | // | ||
385 | // Protects modifying the selfdestruct chain | ||
386 | |||
387 | #define SELFDESTRUCT_END ((Lane*)(-1)) | ||
388 | // | ||
389 | // The chain is ended by '(Lane*)(-1)', not NULL: | ||
390 | // 'selfdestruct_first -> ... -> ... -> (-1)' | ||
391 | |||
392 | /* | ||
393 | * Add the lane to selfdestruct chain; the ones still running at the end of the | ||
394 | * whole process will be cancelled. | ||
395 | */ | ||
396 | static void selfdestruct_add( Lane* s) | ||
397 | { | ||
398 | MUTEX_LOCK( &s->U->selfdestruct_cs); | ||
399 | assert( s->selfdestruct_next == NULL); | ||
400 | |||
401 | s->selfdestruct_next = s->U->selfdestruct_first; | ||
402 | s->U->selfdestruct_first= s; | ||
403 | MUTEX_UNLOCK( &s->U->selfdestruct_cs); | ||
404 | } | ||
405 | |||
406 | /* | ||
407 | * A free-running lane has ended; remove it from selfdestruct chain | ||
408 | */ | ||
409 | static bool_t selfdestruct_remove( Lane* s) | ||
410 | { | ||
411 | bool_t found = FALSE; | ||
412 | MUTEX_LOCK( &s->U->selfdestruct_cs); | ||
413 | { | ||
414 | // Make sure (within the MUTEX) that we actually are in the chain | ||
415 | // still (at process exit they will remove us from chain and then | ||
416 | // cancel/kill). | ||
417 | // | ||
418 | if( s->selfdestruct_next != NULL) | ||
419 | { | ||
420 | Lane** ref = (Lane**) &s->U->selfdestruct_first; | ||
421 | |||
422 | while( *ref != SELFDESTRUCT_END ) | ||
423 | { | ||
424 | if( *ref == s) | ||
425 | { | ||
426 | *ref = s->selfdestruct_next; | ||
427 | s->selfdestruct_next = NULL; | ||
428 | // the terminal shutdown should wait until the lane is done with its lua_close() | ||
429 | ++ s->U->selfdestructing_count; | ||
430 | found = TRUE; | ||
431 | break; | ||
432 | } | ||
433 | ref = (Lane**) &((*ref)->selfdestruct_next); | ||
434 | } | ||
435 | assert( found); | ||
436 | } | ||
437 | } | ||
438 | MUTEX_UNLOCK( &s->U->selfdestruct_cs); | ||
439 | return found; | ||
440 | } | ||
441 | |||
442 | /* | ||
443 | * Process end; cancel any still free-running threads | ||
444 | */ | ||
445 | static int universe_gc( lua_State* L) | ||
446 | { | ||
447 | Universe* U = (Universe*) lua_touserdata( L, 1); | ||
448 | |||
449 | while( U->selfdestruct_first != SELFDESTRUCT_END) // true at most once! | ||
450 | { | ||
451 | // Signal _all_ still running threads to exit (including the timer thread) | ||
452 | // | ||
453 | MUTEX_LOCK( &U->selfdestruct_cs); | ||
454 | { | ||
455 | Lane* s = U->selfdestruct_first; | ||
456 | while( s != SELFDESTRUCT_END) | ||
457 | { | ||
458 | // attempt a regular unforced hard cancel with a small timeout | ||
459 | bool_t cancelled = THREAD_ISNULL( s->thread) || (thread_cancel( L, s, CO_Hard, 0.0001, FALSE, 0.0) != CR_Timeout); | ||
460 | // if we failed, and we know the thread is waiting on a linda | ||
461 | if( cancelled == FALSE && s->status == WAITING && s->waiting_on != NULL) | ||
462 | { | ||
463 | // signal the linda to wake up the thread so that it can react to the cancel query | ||
464 | // let us hope we never land here with a pointer on a linda that has been destroyed... | ||
465 | SIGNAL_T* waiting_on = s->waiting_on; | ||
466 | //s->waiting_on = NULL; // useful, or not? | ||
467 | SIGNAL_ALL( waiting_on); | ||
468 | } | ||
469 | s = s->selfdestruct_next; | ||
470 | } | ||
471 | } | ||
472 | MUTEX_UNLOCK( &U->selfdestruct_cs); | ||
473 | |||
474 | // When noticing their cancel, the lanes will remove themselves from | ||
475 | // the selfdestruct chain. | ||
476 | |||
477 | // TBD: Not sure if Windows (multi core) will require the timed approach, | ||
478 | // or single Yield. I don't have machine to test that (so leaving | ||
479 | // for timed approach). -- AKa 25-Oct-2008 | ||
480 | |||
481 | // OS X 10.5 (Intel) needs more to avoid segfaults. | ||
482 | // | ||
483 | // "make test" is okay. 100's of "make require" are okay. | ||
484 | // | ||
485 | // Tested on MacBook Core Duo 2GHz and 10.5.5: | ||
486 | // -- AKa 25-Oct-2008 | ||
487 | // | ||
488 | { | ||
489 | lua_Number const shutdown_timeout = lua_tonumber( L, lua_upvalueindex( 1)); | ||
490 | double const t_until = now_secs() + shutdown_timeout; | ||
491 | |||
492 | while( U->selfdestruct_first != SELFDESTRUCT_END) | ||
493 | { | ||
494 | YIELD(); // give threads time to act on their cancel | ||
495 | { | ||
496 | // count the number of cancelled thread that didn't have the time to act yet | ||
497 | int n = 0; | ||
498 | double t_now = 0.0; | ||
499 | MUTEX_LOCK( &U->selfdestruct_cs); | ||
500 | { | ||
501 | Lane* s = U->selfdestruct_first; | ||
502 | while( s != SELFDESTRUCT_END) | ||
503 | { | ||
504 | if( s->cancel_request == CANCEL_HARD) | ||
505 | ++ n; | ||
506 | s = s->selfdestruct_next; | ||
507 | } | ||
508 | } | ||
509 | MUTEX_UNLOCK( &U->selfdestruct_cs); | ||
510 | // if timeout elapsed, or we know all threads have acted, stop waiting | ||
511 | t_now = now_secs(); | ||
512 | if( n == 0 || (t_now >= t_until)) | ||
513 | { | ||
514 | DEBUGSPEW_CODE( fprintf( stderr, "%d uncancelled lane(s) remain after waiting %fs at process end.\n", n, shutdown_timeout - (t_until - t_now))); | ||
515 | break; | ||
516 | } | ||
517 | } | ||
518 | } | ||
519 | } | ||
520 | |||
521 | // If some lanes are currently cleaning after themselves, wait until they are done. | ||
522 | // They are no longer listed in the selfdestruct chain, but they still have to lua_close(). | ||
523 | while( U->selfdestructing_count > 0) | ||
524 | { | ||
525 | YIELD(); | ||
526 | } | ||
527 | |||
528 | //--- | ||
529 | // Kill the still free running threads | ||
530 | // | ||
531 | if( U->selfdestruct_first != SELFDESTRUCT_END) | ||
532 | { | ||
533 | unsigned int n = 0; | ||
534 | // first thing we did was to raise the linda signals the threads were waiting on (if any) | ||
535 | // therefore, any well-behaved thread should be in CANCELLED state | ||
536 | // these are not running, and the state can be closed | ||
537 | MUTEX_LOCK( &U->selfdestruct_cs); | ||
538 | { | ||
539 | Lane* s = U->selfdestruct_first; | ||
540 | while( s != SELFDESTRUCT_END) | ||
541 | { | ||
542 | Lane* next_s = s->selfdestruct_next; | ||
543 | s->selfdestruct_next = NULL; // detach from selfdestruct chain | ||
544 | if( !THREAD_ISNULL( s->thread)) // can be NULL if previous 'soft' termination succeeded | ||
545 | { | ||
546 | THREAD_KILL( &s->thread); | ||
547 | #if THREADAPI == THREADAPI_PTHREAD | ||
548 | // pthread: make sure the thread is really stopped! | ||
549 | THREAD_WAIT( &s->thread, -1, &s->done_signal, &s->done_lock, &s->status); | ||
550 | #endif // THREADAPI == THREADAPI_PTHREAD | ||
551 | } | ||
552 | // NO lua_close() in this case because we don't know where execution of the state was interrupted | ||
553 | lane_cleanup( s); | ||
554 | s = next_s; | ||
555 | ++ n; | ||
556 | } | ||
557 | U->selfdestruct_first = SELFDESTRUCT_END; | ||
558 | } | ||
559 | MUTEX_UNLOCK( &U->selfdestruct_cs); | ||
560 | |||
561 | DEBUGSPEW_CODE( fprintf( stderr, "Killed %d lane(s) at process end.\n", n)); | ||
562 | } | ||
563 | } | ||
564 | |||
565 | // If some lanes are currently cleaning after themselves, wait until they are done. | ||
566 | // They are no longer listed in the selfdestruct chain, but they still have to lua_close(). | ||
567 | while( U->selfdestructing_count > 0) | ||
568 | { | ||
569 | YIELD(); | ||
570 | } | ||
571 | |||
572 | // necessary so that calling free_deep_prelude doesn't crash because linda_id expects a linda lightuserdata at absolute slot 1 | ||
573 | lua_settop( L, 0); | ||
574 | // no need to mutex-protect this as all threads in the universe are gone at that point | ||
575 | if( U->timer_deep != NULL) // test ins case some early internal error prevented Lanes from creating the deep timer | ||
576 | { | ||
577 | -- U->timer_deep->refcount; // should be 0 now | ||
578 | free_deep_prelude( L, (DeepPrelude*) U->timer_deep); | ||
579 | U->timer_deep = NULL; | ||
580 | } | ||
581 | |||
582 | close_keepers( U); | ||
583 | |||
584 | // remove the protected allocator, if any | ||
585 | cleanup_allocator_function( U, L); | ||
586 | |||
587 | #if HAVE_LANE_TRACKING() | ||
588 | MUTEX_FREE( &U->tracking_cs); | ||
589 | #endif // HAVE_LANE_TRACKING() | ||
590 | // Linked chains handling | ||
591 | MUTEX_FREE( &U->selfdestruct_cs); | ||
592 | MUTEX_FREE( &U->require_cs); | ||
593 | // Locks for 'tools.c' inc/dec counters | ||
594 | MUTEX_FREE( &U->deep_lock); | ||
595 | MUTEX_FREE( &U->mtid_lock); | ||
596 | // universe is no longer available (nor necessary) | ||
597 | // we need to do this in case some deep userdata objects were created before Lanes was initialized, | ||
598 | // as potentially they will be garbage collected after Lanes at application shutdown | ||
599 | universe_store( L, NULL); | ||
600 | return 0; | ||
601 | } | ||
602 | |||
603 | |||
604 | //--- | ||
605 | // = _single( [cores_uint=1] ) | ||
606 | // | ||
607 | // Limits the process to use only 'cores' CPU cores. To be used for performance | ||
608 | // testing on multicore devices. DEBUGGING ONLY! | ||
609 | // | ||
610 | LUAG_FUNC( set_singlethreaded) | ||
611 | { | ||
612 | lua_Integer cores = luaG_optunsigned( L, 1, 1); | ||
613 | (void) cores; // prevent "unused" warning | ||
614 | |||
615 | #ifdef PLATFORM_OSX | ||
616 | #ifdef _UTILBINDTHREADTOCPU | ||
617 | if( cores > 1) | ||
618 | { | ||
619 | return luaL_error( L, "Limiting to N>1 cores not possible"); | ||
620 | } | ||
621 | // requires 'chudInitialize()' | ||
622 | utilBindThreadToCPU(0); // # of CPU to run on (we cannot limit to 2..N CPUs?) | ||
623 | return 0; | ||
624 | #else | ||
625 | return luaL_error( L, "Not available: compile with _UTILBINDTHREADTOCPU"); | ||
626 | #endif | ||
627 | #else | ||
628 | return luaL_error( L, "not implemented"); | ||
629 | #endif | ||
630 | } | ||
631 | |||
632 | |||
633 | /* | ||
634 | * str= lane_error( error_val|str ) | ||
635 | * | ||
636 | * Called if there's an error in some lane; add call stack to error message | ||
637 | * just like 'lua.c' normally does. | ||
638 | * | ||
639 | * ".. will be called with the error message and its return value will be the | ||
640 | * message returned on the stack by lua_pcall." | ||
641 | * | ||
642 | * Note: Rather than modifying the error message itself, it would be better | ||
643 | * to provide the call stack (as string) completely separated. This would | ||
644 | * work great with non-string error values as well (current system does not). | ||
645 | * (This is NOT possible with the Lua 5.1 'lua_pcall()'; we could of course | ||
646 | * implement a Lanes-specific 'pcall' of our own that does this). TBD!!! :) | ||
647 | * --AKa 22-Jan-2009 | ||
648 | */ | ||
649 | #if ERROR_FULL_STACK | ||
650 | |||
651 | // crc64/we of string "EXTENDED_STACKTRACE_REGKEY" generated at http://www.nitrxgen.net/hashgen/ | ||
652 | static DECLARE_CONST_UNIQUE_KEY( EXTENDED_STACKTRACE_REGKEY, 0x2357c69a7c92c936); // used as registry key | ||
653 | |||
654 | LUAG_FUNC( set_error_reporting) | ||
655 | { | ||
656 | luaL_checktype(L, 1, LUA_TSTRING); | ||
657 | char const* mode = lua_tostring(L, 1); | ||
658 | bool_t const extended = (strcmp(mode, "extended") == 0); | ||
659 | bool_t const basic = (strcmp(mode, "basic") == 0); | ||
660 | if (!extended && !basic) | ||
661 | { | ||
662 | return luaL_error(L, "unsupported error reporting model %s", mode); | ||
663 | } | ||
664 | |||
665 | REGISTRY_SET( L, EXTENDED_STACKTRACE_REGKEY, lua_pushboolean( L, extended ? 1 : 0)); | ||
666 | return 0; | ||
667 | } | ||
668 | |||
669 | static int lane_error( lua_State* L) | ||
670 | { | ||
671 | lua_Debug ar; | ||
672 | int n; | ||
673 | bool_t extended; | ||
674 | |||
675 | // error message (any type) | ||
676 | STACK_CHECK_ABS( L, 1); // some_error | ||
677 | |||
678 | // Don't do stack survey for cancelled lanes. | ||
679 | // | ||
680 | if( equal_unique_key( L, 1, CANCEL_ERROR)) | ||
681 | { | ||
682 | return 1; // just pass on | ||
683 | } | ||
684 | |||
685 | STACK_GROW( L, 3); | ||
686 | REGISTRY_GET( L, EXTENDED_STACKTRACE_REGKEY); // some_error basic|extended | ||
687 | extended = lua_toboolean( L, -1); | ||
688 | lua_pop( L, 1); // some_error | ||
689 | |||
690 | // Place stack trace at 'registry[lane_error]' for the 'lua_pcall()' | ||
691 | // caller to fetch. This bypasses the Lua 5.1 limitation of only one | ||
692 | // return value from error handler to 'lua_pcall()' caller. | ||
693 | |||
694 | // It's adequate to push stack trace as a table. This gives the receiver | ||
695 | // of the stack best means to format it to their liking. Also, it allows | ||
696 | // us to add more stack info later, if needed. | ||
697 | // | ||
698 | // table of { "sourcefile.lua:<line>", ... } | ||
699 | // | ||
700 | lua_newtable( L); // some_error {} | ||
701 | |||
702 | // Best to start from level 1, but in some cases it might be a C function | ||
703 | // and we don't get '.currentline' for that. It's okay - just keep level | ||
704 | // and table index growing separate. --AKa 22-Jan-2009 | ||
705 | // | ||
706 | for( n = 1; lua_getstack( L, n, &ar); ++ n) | ||
707 | { | ||
708 | lua_getinfo( L, extended ? "Sln" : "Sl", &ar); | ||
709 | if( extended) | ||
710 | { | ||
711 | lua_newtable( L); // some_error {} {} | ||
712 | |||
713 | lua_pushstring( L, ar.source); // some_error {} {} source | ||
714 | lua_setfield( L, -2, "source"); // some_error {} {} | ||
715 | |||
716 | lua_pushinteger( L, ar.currentline); // some_error {} {} currentline | ||
717 | lua_setfield( L, -2, "currentline"); // some_error {} {} | ||
718 | |||
719 | lua_pushstring( L, ar.name); // some_error {} {} name | ||
720 | lua_setfield( L, -2, "name"); // some_error {} {} | ||
721 | |||
722 | lua_pushstring( L, ar.namewhat); // some_error {} {} namewhat | ||
723 | lua_setfield( L, -2, "namewhat"); // some_error {} {} | ||
724 | |||
725 | lua_pushstring( L, ar.what); // some_error {} {} what | ||
726 | lua_setfield( L, -2, "what"); // some_error {} {} | ||
727 | } | ||
728 | else if( ar.currentline > 0) | ||
729 | { | ||
730 | lua_pushfstring( L, "%s:%d", ar.short_src, ar.currentline); // some_error {} "blah:blah" | ||
731 | } | ||
732 | else | ||
733 | { | ||
734 | lua_pushfstring( L, "%s:?", ar.short_src); // some_error {} "blah" | ||
735 | } | ||
736 | lua_rawseti( L, -2, (lua_Integer) n); // some_error {} | ||
737 | } | ||
738 | |||
739 | REGISTRY_SET( L, STACKTRACE_REGKEY, lua_insert( L, -2)); // some_error | ||
740 | |||
741 | STACK_END( L, 1); | ||
742 | return 1; // the untouched error value | ||
743 | } | ||
744 | #endif // ERROR_FULL_STACK | ||
745 | |||
746 | static void push_stack_trace( lua_State* L, int rc_, int stk_base_) | ||
747 | { | ||
748 | // Lua 5.1 error handler is limited to one return value; it stored the stack trace in the registry | ||
749 | switch( rc_) | ||
750 | { | ||
751 | case LUA_OK: // no error, body return values are on the stack | ||
752 | break; | ||
753 | |||
754 | case LUA_ERRRUN: // cancellation or a runtime error | ||
755 | #if ERROR_FULL_STACK // when ERROR_FULL_STACK, we installed a handler | ||
756 | { | ||
757 | STACK_CHECK( L, 0); | ||
758 | // fetch the call stack table from the registry where the handler stored it | ||
759 | STACK_GROW( L, 1); | ||
760 | // yields nil if no stack was generated (in case of cancellation for example) | ||
761 | REGISTRY_GET( L, STACKTRACE_REGKEY); // err trace|nil | ||
762 | STACK_END( L, 1); | ||
763 | |||
764 | // For cancellation the error message is CANCEL_ERROR, and a stack trace isn't placed | ||
765 | // For other errors, the message can be whatever was thrown, and we should have a stack trace table | ||
766 | ASSERT_L( lua_type( L, 1 + stk_base_) == (equal_unique_key( L, stk_base_, CANCEL_ERROR) ? LUA_TNIL : LUA_TTABLE)); | ||
767 | // Just leaving the stack trace table on the stack is enough to get it through to the master. | ||
768 | break; | ||
769 | } | ||
770 | #endif // fall through if not ERROR_FULL_STACK | ||
771 | |||
772 | case LUA_ERRMEM: // memory allocation error (handler not called) | ||
773 | case LUA_ERRERR: // error while running the error handler (if any, for example an out-of-memory condition) | ||
774 | default: | ||
775 | // we should have a single value which is either a string (the error message) or CANCEL_ERROR | ||
776 | ASSERT_L( (lua_gettop( L) == stk_base_) && ((lua_type( L, stk_base_) == LUA_TSTRING) || equal_unique_key( L, stk_base_, CANCEL_ERROR))); | ||
777 | break; | ||
778 | } | ||
779 | } | ||
780 | |||
781 | LUAG_FUNC( set_debug_threadname) | ||
782 | { | ||
783 | // fnv164 of string "debug_threadname" generated at https://www.pelock.com/products/hash-calculator | ||
784 | static DECLARE_CONST_UNIQUE_KEY( hidden_regkey, 0x79C0669AAAE04440); | ||
785 | // C s_lane structure is a light userdata upvalue | ||
786 | Lane* s = lua_touserdata( L, lua_upvalueindex( 1)); | ||
787 | luaL_checktype( L, -1, LUA_TSTRING); // "name" | ||
788 | lua_settop( L, 1); | ||
789 | STACK_CHECK_ABS( L, 1); | ||
790 | // store a hidden reference in the registry to make sure the string is kept around even if a lane decides to manually change the "decoda_name" global... | ||
791 | REGISTRY_SET( L, hidden_regkey, lua_pushvalue( L, -2)); | ||
792 | STACK_MID( L, 1); | ||
793 | s->debug_name = lua_tostring( L, -1); | ||
794 | // keep a direct pointer on the string | ||
795 | THREAD_SETNAME( s->debug_name); | ||
796 | // to see VM name in Decoda debugger Virtual Machine window | ||
797 | lua_setglobal( L, "decoda_name"); // | ||
798 | STACK_END( L, 0); | ||
799 | return 0; | ||
800 | } | ||
801 | |||
802 | LUAG_FUNC( get_debug_threadname) | ||
803 | { | ||
804 | Lane* const s = lua_toLane( L, 1); | ||
805 | luaL_argcheck( L, lua_gettop( L) == 1, 2, "too many arguments"); | ||
806 | lua_pushstring( L, s->debug_name); | ||
807 | return 1; | ||
808 | } | ||
809 | |||
810 | LUAG_FUNC( set_thread_priority) | ||
811 | { | ||
812 | int const prio = (int) luaL_checkinteger( L, 1); | ||
813 | // public Lanes API accepts a generic range -3/+3 | ||
814 | // that will be remapped into the platform-specific scheduler priority scheme | ||
815 | // On some platforms, -3 is equivalent to -2 and +3 to +2 | ||
816 | if( prio < THREAD_PRIO_MIN || prio > THREAD_PRIO_MAX) | ||
817 | { | ||
818 | return luaL_error( L, "priority out of range: %d..+%d (%d)", THREAD_PRIO_MIN, THREAD_PRIO_MAX, prio); | ||
819 | } | ||
820 | THREAD_SET_PRIORITY( prio); | ||
821 | return 0; | ||
822 | } | ||
823 | |||
824 | LUAG_FUNC( set_thread_affinity) | ||
825 | { | ||
826 | lua_Integer affinity = luaL_checkinteger( L, 1); | ||
827 | if( affinity <= 0) | ||
828 | { | ||
829 | return luaL_error( L, "invalid affinity (%d)", affinity); | ||
830 | } | ||
831 | THREAD_SET_AFFINITY( (unsigned int) affinity); | ||
832 | return 0; | ||
833 | } | ||
834 | |||
835 | #if USE_DEBUG_SPEW() | ||
836 | // can't use direct LUA_x errcode indexing because the sequence is not the same between Lua 5.1 and 5.2 :-( | ||
837 | // LUA_ERRERR doesn't have the same value | ||
838 | struct errcode_name | ||
839 | { | ||
840 | int code; | ||
841 | char const* name; | ||
842 | }; | ||
843 | |||
844 | static struct errcode_name s_errcodes[] = | ||
845 | { | ||
846 | { LUA_OK, "LUA_OK"}, | ||
847 | { LUA_YIELD, "LUA_YIELD"}, | ||
848 | { LUA_ERRRUN, "LUA_ERRRUN"}, | ||
849 | { LUA_ERRSYNTAX, "LUA_ERRSYNTAX"}, | ||
850 | { LUA_ERRMEM, "LUA_ERRMEM"}, | ||
851 | { LUA_ERRGCMM, "LUA_ERRGCMM"}, | ||
852 | { LUA_ERRERR, "LUA_ERRERR"}, | ||
853 | }; | ||
854 | static char const* get_errcode_name( int _code) | ||
855 | { | ||
856 | int i; | ||
857 | for( i = 0; i < 7; ++ i) | ||
858 | { | ||
859 | if( s_errcodes[i].code == _code) | ||
860 | { | ||
861 | return s_errcodes[i].name; | ||
862 | } | ||
863 | } | ||
864 | return "<NULL>"; | ||
865 | } | ||
866 | #endif // USE_DEBUG_SPEW() | ||
867 | |||
868 | #if THREADWAIT_METHOD == THREADWAIT_CONDVAR // implies THREADAPI == THREADAPI_PTHREAD | ||
869 | static void thread_cleanup_handler( void* opaque) | ||
870 | { | ||
871 | Lane* s= (Lane*) opaque; | ||
872 | MUTEX_LOCK( &s->done_lock); | ||
873 | s->status = CANCELLED; | ||
874 | SIGNAL_ONE( &s->done_signal); // wake up master (while 's->done_lock' is on) | ||
875 | MUTEX_UNLOCK( &s->done_lock); | ||
876 | } | ||
877 | #endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR | ||
878 | |||
879 | static THREAD_RETURN_T THREAD_CALLCONV lane_main( void* vs) | ||
880 | { | ||
881 | Lane* s = (Lane*) vs; | ||
882 | int rc, rc2; | ||
883 | lua_State* L = s->L; | ||
884 | // Called with the lane function and arguments on the stack | ||
885 | int const nargs = lua_gettop( L) - 1; | ||
886 | DEBUGSPEW_CODE( Universe* U = universe_get( L)); | ||
887 | THREAD_MAKE_ASYNCH_CANCELLABLE(); | ||
888 | THREAD_CLEANUP_PUSH( thread_cleanup_handler, s); | ||
889 | s->status = RUNNING; // PENDING -> RUNNING | ||
890 | |||
891 | // Tie "set_finalizer()" to the state | ||
892 | lua_pushcfunction( L, LG_set_finalizer); | ||
893 | populate_func_lookup_table( L, -1, "set_finalizer"); | ||
894 | lua_setglobal( L, "set_finalizer"); | ||
895 | |||
896 | // Tie "set_debug_threadname()" to the state | ||
897 | // But don't register it in the lookup database because of the s_lane pointer upvalue | ||
898 | lua_pushlightuserdata( L, s); | ||
899 | lua_pushcclosure( L, LG_set_debug_threadname, 1); | ||
900 | lua_setglobal( L, "set_debug_threadname"); | ||
901 | |||
902 | // Tie "cancel_test()" to the state | ||
903 | lua_pushcfunction( L, LG_cancel_test); | ||
904 | populate_func_lookup_table( L, -1, "cancel_test"); | ||
905 | lua_setglobal( L, "cancel_test"); | ||
906 | |||
907 | // this could be done in lane_new before the lane body function is pushed on the stack to avoid unnecessary stack slot shifting around | ||
908 | #if ERROR_FULL_STACK | ||
909 | // Tie "set_error_reporting()" to the state | ||
910 | lua_pushcfunction( L, LG_set_error_reporting); | ||
911 | populate_func_lookup_table( L, -1, "set_error_reporting"); | ||
912 | lua_setglobal( L, "set_error_reporting"); | ||
913 | |||
914 | STACK_GROW( L, 1); | ||
915 | lua_pushcfunction( L, lane_error); // func args handler | ||
916 | lua_insert( L, 1); // handler func args | ||
917 | #endif // ERROR_FULL_STACK | ||
918 | |||
919 | rc = lua_pcall( L, nargs, LUA_MULTRET, ERROR_FULL_STACK); // retvals|err | ||
920 | |||
921 | #if ERROR_FULL_STACK | ||
922 | lua_remove( L, 1); // retvals|error | ||
923 | # endif // ERROR_FULL_STACK | ||
924 | |||
925 | // in case of error and if it exists, fetch stack trace from registry and push it | ||
926 | push_stack_trace( L, rc, 1); // retvals|error [trace] | ||
927 | |||
928 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "Lane %p body: %s (%s)\n" INDENT_END, L, get_errcode_name( rc), equal_unique_key( L, 1, CANCEL_ERROR) ? "cancelled" : lua_typename( L, lua_type( L, 1)))); | ||
929 | //STACK_DUMP(L); | ||
930 | // Call finalizers, if the script has set them up. | ||
931 | // | ||
932 | rc2 = run_finalizers( L, rc); | ||
933 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "Lane %p finalizer: %s\n" INDENT_END, L, get_errcode_name( rc2))); | ||
934 | if( rc2 != LUA_OK) // Error within a finalizer! | ||
935 | { | ||
936 | // the finalizer generated an error, and left its own error message [and stack trace] on the stack | ||
937 | rc = rc2; // we're overruling the earlier script error or normal return | ||
938 | } | ||
939 | s->waiting_on = NULL; // just in case | ||
940 | if( selfdestruct_remove( s)) // check and remove (under lock!) | ||
941 | { | ||
942 | // We're a free-running thread and no-one's there to clean us up. | ||
943 | // | ||
944 | lua_close( s->L); | ||
945 | |||
946 | MUTEX_LOCK( &s->U->selfdestruct_cs); | ||
947 | // done with lua_close(), terminal shutdown sequence may proceed | ||
948 | -- s->U->selfdestructing_count; | ||
949 | MUTEX_UNLOCK( &s->U->selfdestruct_cs); | ||
950 | |||
951 | lane_cleanup( s); // s is freed at this point | ||
952 | } | ||
953 | else | ||
954 | { | ||
955 | // leave results (1..top) or error message + stack trace (1..2) on the stack - master will copy them | ||
956 | |||
957 | enum e_status st = (rc == 0) ? DONE : equal_unique_key( L, 1, CANCEL_ERROR) ? CANCELLED : ERROR_ST; | ||
958 | |||
959 | // Posix no PTHREAD_TIMEDJOIN: | ||
960 | // 'done_lock' protects the -> DONE|ERROR_ST|CANCELLED state change | ||
961 | // | ||
962 | #if THREADWAIT_METHOD == THREADWAIT_CONDVAR | ||
963 | MUTEX_LOCK( &s->done_lock); | ||
964 | { | ||
965 | #endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR | ||
966 | s->status = st; | ||
967 | #if THREADWAIT_METHOD == THREADWAIT_CONDVAR | ||
968 | SIGNAL_ONE( &s->done_signal); // wake up master (while 's->done_lock' is on) | ||
969 | } | ||
970 | MUTEX_UNLOCK( &s->done_lock); | ||
971 | #endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR | ||
972 | } | ||
973 | THREAD_CLEANUP_POP( FALSE); | ||
974 | return 0; // ignored | ||
975 | } | ||
976 | |||
977 | // --- If a client wants to transfer stuff of a given module from the current state to another Lane, the module must be required | ||
978 | // with lanes.require, that will call the regular 'require', then populate the lookup database in the source lane | ||
979 | // module = lanes.require( "modname") | ||
980 | // upvalue[1]: _G.require | ||
981 | LUAG_FUNC( require) | ||
982 | { | ||
983 | char const* name = lua_tostring( L, 1); | ||
984 | int const nargs = lua_gettop( L); | ||
985 | DEBUGSPEW_CODE( Universe* U = universe_get( L)); | ||
986 | STACK_CHECK( L, 0); | ||
987 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lanes.require %s BEGIN\n" INDENT_END, name)); | ||
988 | DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); | ||
989 | lua_pushvalue( L, lua_upvalueindex(1)); // "name" require | ||
990 | lua_insert( L, 1); // require "name" | ||
991 | lua_call( L, nargs, 1); // module | ||
992 | populate_func_lookup_table( L, -1, name); | ||
993 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lanes.require %s END\n" INDENT_END, name)); | ||
994 | DEBUGSPEW_CODE( -- U->debugspew_indent_depth); | ||
995 | STACK_END( L, 0); | ||
996 | return 1; | ||
997 | } | ||
998 | |||
999 | |||
1000 | // --- If a client wants to transfer stuff of a previously required module from the current state to another Lane, the module must be registered | ||
1001 | // to populate the lookup database in the source lane (and in the destination too, of course) | ||
1002 | // lanes.register( "modname", module) | ||
1003 | LUAG_FUNC( register) | ||
1004 | { | ||
1005 | char const* name = luaL_checkstring( L, 1); | ||
1006 | int const mod_type = lua_type( L, 2); | ||
1007 | // ignore extra parameters, just in case | ||
1008 | lua_settop( L, 2); | ||
1009 | luaL_argcheck( L, (mod_type == LUA_TTABLE) || (mod_type == LUA_TFUNCTION), 2, "unexpected module type"); | ||
1010 | DEBUGSPEW_CODE( Universe* U = universe_get( L)); | ||
1011 | STACK_CHECK( L, 0); // "name" mod_table | ||
1012 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lanes.register %s BEGIN\n" INDENT_END, name)); | ||
1013 | DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); | ||
1014 | populate_func_lookup_table( L, -1, name); | ||
1015 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lanes.register %s END\n" INDENT_END, name)); | ||
1016 | DEBUGSPEW_CODE( -- U->debugspew_indent_depth); | ||
1017 | STACK_END( L, 0); | ||
1018 | return 0; | ||
1019 | } | ||
1020 | |||
1021 | // crc64/we of string "GCCB_KEY" generated at http://www.nitrxgen.net/hashgen/ | ||
1022 | static DECLARE_CONST_UNIQUE_KEY( GCCB_KEY, 0xcfb1f046ef074e88); | ||
1023 | |||
1024 | //--- | ||
1025 | // lane_ud = lane_new( function | ||
1026 | // , [libs_str] | ||
1027 | // , [priority_int=0] | ||
1028 | // , [globals_tbl] | ||
1029 | // , [package_tbl] | ||
1030 | // , [required_tbl] | ||
1031 | // , [gc_cb_func] | ||
1032 | // [, ... args ...]) | ||
1033 | // | ||
1034 | // Upvalues: metatable to use for 'lane_ud' | ||
1035 | // | ||
1036 | LUAG_FUNC( lane_new) | ||
1037 | { | ||
1038 | lua_State* L2; | ||
1039 | Lane* s; | ||
1040 | Lane** ud; | ||
1041 | |||
1042 | char const* libs_str = lua_tostring( L, 2); | ||
1043 | bool_t const have_priority = !lua_isnoneornil( L, 3); | ||
1044 | int const priority = have_priority ? (int) lua_tointeger( L, 3) : THREAD_PRIO_DEFAULT; | ||
1045 | int const globals_idx = lua_isnoneornil( L, 4) ? 0 : 4; | ||
1046 | int const package_idx = lua_isnoneornil( L, 5) ? 0 : 5; | ||
1047 | int const required_idx = lua_isnoneornil( L, 6) ? 0 : 6; | ||
1048 | int const gc_cb_idx = lua_isnoneornil( L, 7) ? 0 : 7; | ||
1049 | |||
1050 | #define FIXED_ARGS 7 | ||
1051 | int const nargs = lua_gettop(L) - FIXED_ARGS; | ||
1052 | Universe* const U = universe_get( L); | ||
1053 | ASSERT_L( nargs >= 0); | ||
1054 | |||
1055 | // public Lanes API accepts a generic range -3/+3 | ||
1056 | // that will be remapped into the platform-specific scheduler priority scheme | ||
1057 | // On some platforms, -3 is equivalent to -2 and +3 to +2 | ||
1058 | if( have_priority && (priority < THREAD_PRIO_MIN || priority > THREAD_PRIO_MAX)) | ||
1059 | { | ||
1060 | return luaL_error( L, "Priority out of range: %d..+%d (%d)", THREAD_PRIO_MIN, THREAD_PRIO_MAX, priority); | ||
1061 | } | ||
1062 | |||
1063 | /* --- Create and prepare the sub state --- */ | ||
1064 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: setup\n" INDENT_END)); | ||
1065 | DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); | ||
1066 | |||
1067 | // populate with selected libraries at the same time | ||
1068 | L2 = luaG_newstate( U, L, libs_str); // L // L2 | ||
1069 | |||
1070 | STACK_GROW( L2, nargs + 3); // | ||
1071 | STACK_CHECK( L2, 0); | ||
1072 | |||
1073 | STACK_GROW( L, 3); // func libs priority globals package required gc_cb [... args ...] | ||
1074 | STACK_CHECK( L, 0); | ||
1075 | |||
1076 | // give a default "Lua" name to the thread to see VM name in Decoda debugger | ||
1077 | lua_pushfstring( L2, "Lane #%p", L2); // "..." | ||
1078 | lua_setglobal( L2, "decoda_name"); // | ||
1079 | ASSERT_L( lua_gettop( L2) == 0); | ||
1080 | |||
1081 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: update 'package'\n" INDENT_END)); | ||
1082 | // package | ||
1083 | if( package_idx != 0) | ||
1084 | { | ||
1085 | // when copying with mode eLM_LaneBody, should raise an error in case of problem, not leave it one the stack | ||
1086 | InterCopyResult const ret = luaG_inter_copy_package( U, L, L2, package_idx, eLM_LaneBody); | ||
1087 | ASSERT_L(ret == eICR_Success); // either all went well, or we should not even get here | ||
1088 | } | ||
1089 | |||
1090 | // modules to require in the target lane *before* the function is transfered! | ||
1091 | |||
1092 | if( required_idx != 0) | ||
1093 | { | ||
1094 | int nbRequired = 1; | ||
1095 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: require 'required' list\n" INDENT_END)); | ||
1096 | DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); | ||
1097 | // should not happen, was checked in lanes.lua before calling lane_new() | ||
1098 | if( lua_type( L, required_idx) != LUA_TTABLE) | ||
1099 | { | ||
1100 | return luaL_error( L, "expected required module list as a table, got %s", luaL_typename( L, required_idx)); | ||
1101 | } | ||
1102 | |||
1103 | lua_pushnil( L); // func libs priority globals package required gc_cb [... args ...] nil | ||
1104 | while( lua_next( L, required_idx) != 0) // func libs priority globals package required gc_cb [... args ...] n "modname" | ||
1105 | { | ||
1106 | if( lua_type( L, -1) != LUA_TSTRING || lua_type( L, -2) != LUA_TNUMBER || lua_tonumber( L, -2) != nbRequired) | ||
1107 | { | ||
1108 | return luaL_error( L, "required module list should be a list of strings"); | ||
1109 | } | ||
1110 | else | ||
1111 | { | ||
1112 | // require the module in the target state, and populate the lookup table there too | ||
1113 | size_t len; | ||
1114 | char const* name = lua_tolstring( L, -1, &len); | ||
1115 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: require '%s'\n" INDENT_END, name)); | ||
1116 | |||
1117 | // require the module in the target lane | ||
1118 | lua_getglobal( L2, "require"); // require()? | ||
1119 | if( lua_isnil( L2, -1)) | ||
1120 | { | ||
1121 | lua_pop( L2, 1); // | ||
1122 | luaL_error( L, "cannot pre-require modules without loading 'package' library first"); | ||
1123 | } | ||
1124 | else | ||
1125 | { | ||
1126 | lua_pushlstring( L2, name, len); // require() name | ||
1127 | if( lua_pcall( L2, 1, 1, 0) != LUA_OK) // ret/errcode | ||
1128 | { | ||
1129 | // propagate error to main state if any | ||
1130 | luaG_inter_move( U, L2, L, 1, eLM_LaneBody); // func libs priority globals package required gc_cb [... args ...] n "modname" error | ||
1131 | return lua_error( L); | ||
1132 | } | ||
1133 | // after requiring the module, register the functions it exported in our name<->function database | ||
1134 | populate_func_lookup_table( L2, -1, name); | ||
1135 | lua_pop( L2, 1); // | ||
1136 | } | ||
1137 | } | ||
1138 | lua_pop( L, 1); // func libs priority globals package required gc_cb [... args ...] n | ||
1139 | ++ nbRequired; | ||
1140 | } // func libs priority globals package required gc_cb [... args ...] | ||
1141 | DEBUGSPEW_CODE( -- U->debugspew_indent_depth); | ||
1142 | } | ||
1143 | STACK_MID( L, 0); | ||
1144 | STACK_MID( L2, 0); // | ||
1145 | |||
1146 | // Appending the specified globals to the global environment | ||
1147 | // *after* stdlibs have been loaded and modules required, in case we transfer references to native functions they exposed... | ||
1148 | // | ||
1149 | if( globals_idx != 0) | ||
1150 | { | ||
1151 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: transfer globals\n" INDENT_END)); | ||
1152 | if( !lua_istable( L, globals_idx)) | ||
1153 | { | ||
1154 | return luaL_error( L, "Expected table, got %s", luaL_typename( L, globals_idx)); | ||
1155 | } | ||
1156 | |||
1157 | DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); | ||
1158 | lua_pushnil( L); // func libs priority globals package required gc_cb [... args ...] nil | ||
1159 | // Lua 5.2 wants us to push the globals table on the stack | ||
1160 | lua_pushglobaltable( L2); // _G | ||
1161 | while( lua_next( L, globals_idx)) // func libs priority globals package required gc_cb [... args ...] k v | ||
1162 | { | ||
1163 | luaG_inter_copy( U, L, L2, 2, eLM_LaneBody); // _G k v | ||
1164 | // assign it in L2's globals table | ||
1165 | lua_rawset( L2, -3); // _G | ||
1166 | lua_pop( L, 1); // func libs priority globals package required gc_cb [... args ...] k | ||
1167 | } // func libs priority globals package required gc_cb [... args ...] | ||
1168 | lua_pop( L2, 1); // | ||
1169 | |||
1170 | DEBUGSPEW_CODE( -- U->debugspew_indent_depth); | ||
1171 | } | ||
1172 | STACK_MID( L, 0); | ||
1173 | STACK_MID( L2, 0); | ||
1174 | |||
1175 | // Lane main function | ||
1176 | { | ||
1177 | int const func_type = lua_type(L, 1); | ||
1178 | if (func_type == LUA_TFUNCTION) | ||
1179 | { | ||
1180 | InterCopyResult res; | ||
1181 | DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lane_new: transfer lane body\n" INDENT_END)); | ||
1182 | DEBUGSPEW_CODE(++U->debugspew_indent_depth); | ||
1183 | lua_pushvalue(L, 1); // func libs priority globals package required gc_cb [... args ...] func | ||
1184 | res = luaG_inter_move(U, L, L2, 1, eLM_LaneBody); // func libs priority globals package required gc_cb [... args ...] // func | ||
1185 | DEBUGSPEW_CODE(--U->debugspew_indent_depth); | ||
1186 | if (res != eICR_Success) | ||
1187 | { | ||
1188 | return luaL_error(L, "tried to copy unsupported types"); | ||
1189 | } | ||
1190 | } | ||
1191 | else if (func_type == LUA_TSTRING) | ||
1192 | { | ||
1193 | // compile the string | ||
1194 | if (luaL_loadstring(L2, lua_tostring(L, 1)) != 0) // func | ||
1195 | { | ||
1196 | return luaL_error(L, "error when parsing lane function code"); | ||
1197 | } | ||
1198 | } | ||
1199 | else | ||
1200 | { | ||
1201 | luaL_error(L, "Expected function, got %s", lua_typename(L, func_type)); // doesn't return | ||
1202 | } | ||
1203 | } | ||
1204 | STACK_MID( L, 0); | ||
1205 | STACK_MID( L2, 1); | ||
1206 | ASSERT_L( lua_isfunction( L2, 1)); | ||
1207 | |||
1208 | // revive arguments | ||
1209 | if( nargs > 0) | ||
1210 | { | ||
1211 | InterCopyResult res; | ||
1212 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: transfer lane arguments\n" INDENT_END)); | ||
1213 | DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); | ||
1214 | res = luaG_inter_move( U, L, L2, nargs, eLM_LaneBody); // func libs priority globals package required gc_cb // func [... args ...] | ||
1215 | DEBUGSPEW_CODE( -- U->debugspew_indent_depth); | ||
1216 | if( res != eICR_Success) | ||
1217 | { | ||
1218 | return luaL_error( L, "tried to copy unsupported types"); | ||
1219 | } | ||
1220 | } | ||
1221 | STACK_END( L, -nargs); | ||
1222 | ASSERT_L( lua_gettop( L) == FIXED_ARGS); | ||
1223 | STACK_CHECK( L, 0); | ||
1224 | STACK_MID( L2, 1 + nargs); | ||
1225 | |||
1226 | // 's' is allocated from heap, not Lua, since its life span may surpass the handle's (if free running thread) | ||
1227 | // | ||
1228 | // a Lane full userdata needs a single uservalue | ||
1229 | ud = lua_newuserdatauv( L, sizeof( Lane*), 1); // func libs priority globals package required gc_cb lane | ||
1230 | { | ||
1231 | AllocatorDefinition* const allocD = &U->internal_allocator; | ||
1232 | s = *ud = (Lane*) allocD->allocF(allocD->allocUD, NULL, 0, sizeof(Lane)); | ||
1233 | } | ||
1234 | if( s == NULL) | ||
1235 | { | ||
1236 | return luaL_error( L, "could not create lane: out of memory"); | ||
1237 | } | ||
1238 | |||
1239 | s->L = L2; | ||
1240 | s->U = U; | ||
1241 | s->status = PENDING; | ||
1242 | s->waiting_on = NULL; | ||
1243 | s->debug_name = "<unnamed>"; | ||
1244 | s->cancel_request = CANCEL_NONE; | ||
1245 | |||
1246 | #if THREADWAIT_METHOD == THREADWAIT_CONDVAR | ||
1247 | MUTEX_INIT( &s->done_lock); | ||
1248 | SIGNAL_INIT( &s->done_signal); | ||
1249 | #endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR | ||
1250 | s->mstatus = NORMAL; | ||
1251 | s->selfdestruct_next = NULL; | ||
1252 | #if HAVE_LANE_TRACKING() | ||
1253 | s->tracking_next = NULL; | ||
1254 | if( s->U->tracking_first) | ||
1255 | { | ||
1256 | tracking_add( s); | ||
1257 | } | ||
1258 | #endif // HAVE_LANE_TRACKING() | ||
1259 | |||
1260 | // Set metatable for the userdata | ||
1261 | // | ||
1262 | lua_pushvalue( L, lua_upvalueindex( 1)); // func libs priority globals package required gc_cb lane mt | ||
1263 | lua_setmetatable( L, -2); // func libs priority globals package required gc_cb lane | ||
1264 | STACK_MID( L, 1); | ||
1265 | |||
1266 | // Create uservalue for the userdata | ||
1267 | // (this is where lane body return values will be stored when the handle is indexed by a numeric key) | ||
1268 | lua_newtable( L); // func libs cancelstep priority globals package required gc_cb lane uv | ||
1269 | |||
1270 | // Store the gc_cb callback in the uservalue | ||
1271 | if( gc_cb_idx > 0) | ||
1272 | { | ||
1273 | push_unique_key( L, GCCB_KEY); // func libs priority globals package required gc_cb lane uv k | ||
1274 | lua_pushvalue( L, gc_cb_idx); // func libs priority globals package required gc_cb lane uv k gc_cb | ||
1275 | lua_rawset( L, -3); // func libs priority globals package required gc_cb lane uv | ||
1276 | } | ||
1277 | |||
1278 | lua_setiuservalue( L, -2, 1); // func libs priority globals package required gc_cb lane | ||
1279 | |||
1280 | // Store 's' in the lane's registry, for 'cancel_test()' (we do cancel tests at pending send/receive). | ||
1281 | REGISTRY_SET( L2, LANE_POINTER_REGKEY, lua_pushlightuserdata( L2, s)); // func [... args ...] | ||
1282 | |||
1283 | STACK_END( L, 1); | ||
1284 | STACK_END( L2, 1 + nargs); | ||
1285 | |||
1286 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: launching thread\n" INDENT_END)); | ||
1287 | THREAD_CREATE( &s->thread, lane_main, s, priority); | ||
1288 | |||
1289 | DEBUGSPEW_CODE( -- U->debugspew_indent_depth); | ||
1290 | return 1; | ||
1291 | } | ||
1292 | |||
1293 | |||
1294 | //--- | ||
1295 | // = thread_gc( lane_ud ) | ||
1296 | // | ||
1297 | // Cleanup for a thread userdata. If the thread is still executing, leave it | ||
1298 | // alive as a free-running thread (will clean up itself). | ||
1299 | // | ||
1300 | // * Why NOT cancel/kill a loose thread: | ||
1301 | // | ||
1302 | // At least timer system uses a free-running thread, they should be handy | ||
1303 | // and the issue of canceling/killing threads at gc is not very nice, either | ||
1304 | // (would easily cause waits at gc cycle, which we don't want). | ||
1305 | // | ||
1306 | LUAG_FUNC( thread_gc) | ||
1307 | { | ||
1308 | bool_t have_gc_cb = FALSE; | ||
1309 | Lane* s = lua_toLane( L, 1); // ud | ||
1310 | |||
1311 | // if there a gc callback? | ||
1312 | lua_getiuservalue( L, 1, 1); // ud uservalue | ||
1313 | push_unique_key( L, GCCB_KEY); // ud uservalue __gc | ||
1314 | lua_rawget( L, -2); // ud uservalue gc_cb|nil | ||
1315 | if( !lua_isnil( L, -1)) | ||
1316 | { | ||
1317 | lua_remove( L, -2); // ud gc_cb|nil | ||
1318 | lua_pushstring( L, s->debug_name); // ud gc_cb name | ||
1319 | have_gc_cb = TRUE; | ||
1320 | } | ||
1321 | else | ||
1322 | { | ||
1323 | lua_pop( L, 2); // ud | ||
1324 | } | ||
1325 | |||
1326 | // We can read 's->status' without locks, but not wait for it | ||
1327 | // test KILLED state first, as it doesn't need to enter the selfdestruct chain | ||
1328 | if( s->mstatus == KILLED) | ||
1329 | { | ||
1330 | // Make sure a kill has proceeded, before cleaning up the data structure. | ||
1331 | // | ||
1332 | // NO lua_close() in this case because we don't know where execution of the state was interrupted | ||
1333 | DEBUGSPEW_CODE( fprintf( stderr, "** Joining with a killed thread (needs testing) **")); | ||
1334 | // make sure the thread is no longer running, just like thread_join() | ||
1335 | if(! THREAD_ISNULL( s->thread)) | ||
1336 | { | ||
1337 | THREAD_WAIT( &s->thread, -1, &s->done_signal, &s->done_lock, &s->status); | ||
1338 | } | ||
1339 | if( s->status >= DONE && s->L) | ||
1340 | { | ||
1341 | // we know the thread was killed while the Lua VM was not doing anything: we should be able to close it without crashing | ||
1342 | // now, thread_cancel() will not forcefully kill a lane with s->status >= DONE, so I am not sure it can ever happen | ||
1343 | lua_close( s->L); | ||
1344 | s->L = 0; | ||
1345 | // just in case, but s will be freed soon so... | ||
1346 | s->debug_name = "<gc>"; | ||
1347 | } | ||
1348 | DEBUGSPEW_CODE( fprintf( stderr, "** Joined ok **")); | ||
1349 | } | ||
1350 | else if( s->status < DONE) | ||
1351 | { | ||
1352 | // still running: will have to be cleaned up later | ||
1353 | selfdestruct_add( s); | ||
1354 | assert( s->selfdestruct_next); | ||
1355 | if( have_gc_cb) | ||
1356 | { | ||
1357 | lua_pushliteral( L, "selfdestruct"); // ud gc_cb name status | ||
1358 | lua_call( L, 2, 0); // ud | ||
1359 | } | ||
1360 | return 0; | ||
1361 | } | ||
1362 | else if( s->L) | ||
1363 | { | ||
1364 | // no longer accessing the Lua VM: we can close right now | ||
1365 | lua_close( s->L); | ||
1366 | s->L = 0; | ||
1367 | // just in case, but s will be freed soon so... | ||
1368 | s->debug_name = "<gc>"; | ||
1369 | } | ||
1370 | |||
1371 | // Clean up after a (finished) thread | ||
1372 | lane_cleanup( s); | ||
1373 | |||
1374 | // do this after lane cleanup in case the callback triggers an error | ||
1375 | if( have_gc_cb) | ||
1376 | { | ||
1377 | lua_pushliteral( L, "closed"); // ud gc_cb name status | ||
1378 | lua_call( L, 2, 0); // ud | ||
1379 | } | ||
1380 | return 0; | ||
1381 | } | ||
1382 | |||
1383 | //--- | ||
1384 | // str= thread_status( lane ) | ||
1385 | // | ||
1386 | // Returns: "pending" not started yet | ||
1387 | // -> "running" started, doing its work.. | ||
1388 | // <-> "waiting" blocked in a receive() | ||
1389 | // -> "done" finished, results are there | ||
1390 | // / "error" finished at an error, error value is there | ||
1391 | // / "cancelled" execution cancelled by M (state gone) | ||
1392 | // | ||
1393 | static char const * thread_status_string( Lane* s) | ||
1394 | { | ||
1395 | enum e_status st = s->status; // read just once (volatile) | ||
1396 | char const* str = | ||
1397 | (s->mstatus == KILLED) ? "killed" : // new to v3.3.0! | ||
1398 | (st == PENDING) ? "pending" : | ||
1399 | (st == RUNNING) ? "running" : // like in 'co.status()' | ||
1400 | (st == WAITING) ? "waiting" : | ||
1401 | (st == DONE) ? "done" : | ||
1402 | (st == ERROR_ST) ? "error" : | ||
1403 | (st == CANCELLED) ? "cancelled" : NULL; | ||
1404 | return str; | ||
1405 | } | ||
1406 | |||
1407 | int push_thread_status( lua_State* L, Lane* s) | ||
1408 | { | ||
1409 | char const* const str = thread_status_string( s); | ||
1410 | ASSERT_L( str); | ||
1411 | |||
1412 | lua_pushstring( L, str); | ||
1413 | return 1; | ||
1414 | } | ||
1415 | |||
1416 | |||
1417 | //--- | ||
1418 | // [...] | [nil, err_any, stack_tbl]= thread_join( lane_ud [, wait_secs=-1] ) | ||
1419 | // | ||
1420 | // timeout: returns nil | ||
1421 | // done: returns return values (0..N) | ||
1422 | // error: returns nil + error value [+ stack table] | ||
1423 | // cancelled: returns nil | ||
1424 | // | ||
1425 | LUAG_FUNC( thread_join) | ||
1426 | { | ||
1427 | Lane* const s = lua_toLane( L, 1); | ||
1428 | double wait_secs = luaL_optnumber( L, 2, -1.0); | ||
1429 | lua_State* L2 = s->L; | ||
1430 | int ret; | ||
1431 | bool_t done = THREAD_ISNULL( s->thread) || THREAD_WAIT( &s->thread, wait_secs, &s->done_signal, &s->done_lock, &s->status); | ||
1432 | if( !done || !L2) | ||
1433 | { | ||
1434 | STACK_GROW( L, 2); | ||
1435 | lua_pushnil( L); | ||
1436 | lua_pushliteral( L, "timeout"); | ||
1437 | return 2; | ||
1438 | } | ||
1439 | |||
1440 | STACK_CHECK( L, 0); | ||
1441 | // Thread is DONE/ERROR_ST/CANCELLED; all ours now | ||
1442 | |||
1443 | if( s->mstatus == KILLED) // OS thread was killed if thread_cancel was forced | ||
1444 | { | ||
1445 | // in that case, even if the thread was killed while DONE/ERROR_ST/CANCELLED, ignore regular return values | ||
1446 | STACK_GROW( L, 2); | ||
1447 | lua_pushnil( L); | ||
1448 | lua_pushliteral( L, "killed"); | ||
1449 | ret = 2; | ||
1450 | } | ||
1451 | else | ||
1452 | { | ||
1453 | Universe* U = universe_get( L); | ||
1454 | // debug_name is a pointer to string possibly interned in the lane's state, that no longer exists when the state is closed | ||
1455 | // so store it in the userdata uservalue at a key that can't possibly collide | ||
1456 | securize_debug_threadname( L, s); | ||
1457 | switch( s->status) | ||
1458 | { | ||
1459 | case DONE: | ||
1460 | { | ||
1461 | int n = lua_gettop( L2); // whole L2 stack | ||
1462 | if( (n > 0) && (luaG_inter_move( U, L2, L, n, eLM_LaneBody) != eICR_Success)) | ||
1463 | { | ||
1464 | return luaL_error( L, "tried to copy unsupported types"); | ||
1465 | } | ||
1466 | ret = n; | ||
1467 | } | ||
1468 | break; | ||
1469 | |||
1470 | case ERROR_ST: | ||
1471 | { | ||
1472 | int const n = lua_gettop( L2); | ||
1473 | STACK_GROW( L, 3); | ||
1474 | lua_pushnil( L); | ||
1475 | // even when ERROR_FULL_STACK, if the error is not LUA_ERRRUN, the handler wasn't called, and we only have 1 error message on the stack ... | ||
1476 | if( luaG_inter_move( U, L2, L, n, eLM_LaneBody) != eICR_Success) // nil "err" [trace] | ||
1477 | { | ||
1478 | return luaL_error( L, "tried to copy unsupported types: %s", lua_tostring( L, -n)); | ||
1479 | } | ||
1480 | ret = 1 + n; | ||
1481 | } | ||
1482 | break; | ||
1483 | |||
1484 | case CANCELLED: | ||
1485 | ret = 0; | ||
1486 | break; | ||
1487 | |||
1488 | default: | ||
1489 | DEBUGSPEW_CODE( fprintf( stderr, "Status: %d\n", s->status)); | ||
1490 | ASSERT_L( FALSE); | ||
1491 | ret = 0; | ||
1492 | } | ||
1493 | lua_close( L2); | ||
1494 | } | ||
1495 | s->L = 0; | ||
1496 | STACK_END( L, ret); | ||
1497 | return ret; | ||
1498 | } | ||
1499 | |||
1500 | |||
1501 | //--- | ||
1502 | // thread_index( ud, key) -> value | ||
1503 | // | ||
1504 | // If key is found in the environment, return it | ||
1505 | // If key is numeric, wait until the thread returns and populate the environment with the return values | ||
1506 | // If the return values signal an error, propagate it | ||
1507 | // If key is "status" return the thread status | ||
1508 | // Else raise an error | ||
1509 | LUAG_FUNC( thread_index) | ||
1510 | { | ||
1511 | int const UD = 1; | ||
1512 | int const KEY = 2; | ||
1513 | int const USR = 3; | ||
1514 | Lane* const s = lua_toLane( L, UD); | ||
1515 | ASSERT_L( lua_gettop( L) == 2); | ||
1516 | |||
1517 | STACK_GROW( L, 8); // up to 8 positions are needed in case of error propagation | ||
1518 | |||
1519 | // If key is numeric, wait until the thread returns and populate the environment with the return values | ||
1520 | if( lua_type( L, KEY) == LUA_TNUMBER) | ||
1521 | { | ||
1522 | // first, check that we don't already have an environment that holds the requested value | ||
1523 | { | ||
1524 | // If key is found in the uservalue, return it | ||
1525 | lua_getiuservalue( L, UD, 1); | ||
1526 | lua_pushvalue( L, KEY); | ||
1527 | lua_rawget( L, USR); | ||
1528 | if( !lua_isnil( L, -1)) | ||
1529 | { | ||
1530 | return 1; | ||
1531 | } | ||
1532 | lua_pop( L, 1); | ||
1533 | } | ||
1534 | { | ||
1535 | // check if we already fetched the values from the thread or not | ||
1536 | bool_t fetched; | ||
1537 | lua_Integer key = lua_tointeger( L, KEY); | ||
1538 | lua_pushinteger( L, 0); | ||
1539 | lua_rawget( L, USR); | ||
1540 | fetched = !lua_isnil( L, -1); | ||
1541 | lua_pop( L, 1); // back to our 2 args + uservalue on the stack | ||
1542 | if( !fetched) | ||
1543 | { | ||
1544 | lua_pushinteger( L, 0); | ||
1545 | lua_pushboolean( L, 1); | ||
1546 | lua_rawset( L, USR); | ||
1547 | // wait until thread has completed | ||
1548 | lua_pushcfunction( L, LG_thread_join); | ||
1549 | lua_pushvalue( L, UD); | ||
1550 | lua_call( L, 1, LUA_MULTRET); // all return values are on the stack, at slots 4+ | ||
1551 | switch( s->status) | ||
1552 | { | ||
1553 | default: | ||
1554 | if( s->mstatus != KILLED) | ||
1555 | { | ||
1556 | // this is an internal error, we probably never get here | ||
1557 | lua_settop( L, 0); | ||
1558 | lua_pushliteral( L, "Unexpected status: "); | ||
1559 | lua_pushstring( L, thread_status_string( s)); | ||
1560 | lua_concat( L, 2); | ||
1561 | lua_error( L); | ||
1562 | break; | ||
1563 | } | ||
1564 | // fall through if we are killed, as we got nil, "killed" on the stack | ||
1565 | |||
1566 | case DONE: // got regular return values | ||
1567 | { | ||
1568 | int i, nvalues = lua_gettop( L) - 3; | ||
1569 | for( i = nvalues; i > 0; -- i) | ||
1570 | { | ||
1571 | // pop the last element of the stack, to store it in the uservalue at its proper index | ||
1572 | lua_rawseti( L, USR, i); | ||
1573 | } | ||
1574 | } | ||
1575 | break; | ||
1576 | |||
1577 | case ERROR_ST: // got 3 values: nil, errstring, callstack table | ||
1578 | // me[-2] could carry the stack table, but even | ||
1579 | // me[-1] is rather unnecessary (and undocumented); | ||
1580 | // use ':join()' instead. --AKa 22-Jan-2009 | ||
1581 | ASSERT_L( lua_isnil( L, 4) && !lua_isnil( L, 5) && lua_istable( L, 6)); | ||
1582 | // store errstring at key -1 | ||
1583 | lua_pushnumber( L, -1); | ||
1584 | lua_pushvalue( L, 5); | ||
1585 | lua_rawset( L, USR); | ||
1586 | break; | ||
1587 | |||
1588 | case CANCELLED: | ||
1589 | // do nothing | ||
1590 | break; | ||
1591 | } | ||
1592 | } | ||
1593 | lua_settop( L, 3); // UD KEY ENV | ||
1594 | if( key != -1) | ||
1595 | { | ||
1596 | lua_pushnumber( L, -1); // UD KEY ENV -1 | ||
1597 | lua_rawget( L, USR); // UD KEY ENV "error" | ||
1598 | if( !lua_isnil( L, -1)) // an error was stored | ||
1599 | { | ||
1600 | // Note: Lua 5.1 interpreter is not prepared to show | ||
1601 | // non-string errors, so we use 'tostring()' here | ||
1602 | // to get meaningful output. --AKa 22-Jan-2009 | ||
1603 | // | ||
1604 | // Also, the stack dump we get is no good; it only | ||
1605 | // lists our internal Lanes functions. There seems | ||
1606 | // to be no way to switch it off, though. | ||
1607 | // | ||
1608 | // Level 3 should show the line where 'h[x]' was read | ||
1609 | // but this only seems to work for string messages | ||
1610 | // (Lua 5.1.4). No idea, why. --AKa 22-Jan-2009 | ||
1611 | lua_getmetatable( L, UD); // UD KEY ENV "error" mt | ||
1612 | lua_getfield( L, -1, "cached_error"); // UD KEY ENV "error" mt error() | ||
1613 | lua_getfield( L, -2, "cached_tostring"); // UD KEY ENV "error" mt error() tostring() | ||
1614 | lua_pushvalue( L, 4); // UD KEY ENV "error" mt error() tostring() "error" | ||
1615 | lua_call( L, 1, 1); // tostring( errstring) -- just in case // UD KEY ENV "error" mt error() "error" | ||
1616 | lua_pushinteger( L, 3); // UD KEY ENV "error" mt error() "error" 3 | ||
1617 | lua_call( L, 2, 0); // error( tostring( errstring), 3) // UD KEY ENV "error" mt | ||
1618 | } | ||
1619 | else | ||
1620 | { | ||
1621 | lua_pop( L, 1); // back to our 3 arguments on the stack | ||
1622 | } | ||
1623 | } | ||
1624 | lua_rawgeti( L, USR, (int)key); | ||
1625 | } | ||
1626 | return 1; | ||
1627 | } | ||
1628 | if( lua_type( L, KEY) == LUA_TSTRING) | ||
1629 | { | ||
1630 | char const * const keystr = lua_tostring( L, KEY); | ||
1631 | lua_settop( L, 2); // keep only our original arguments on the stack | ||
1632 | if( strcmp( keystr, "status") == 0) | ||
1633 | { | ||
1634 | return push_thread_status( L, s); // push the string representing the status | ||
1635 | } | ||
1636 | // return UD.metatable[key] | ||
1637 | lua_getmetatable( L, UD); // UD KEY mt | ||
1638 | lua_replace( L, -3); // mt KEY | ||
1639 | lua_rawget( L, -2); // mt value | ||
1640 | // only "cancel" and "join" are registered as functions, any other string will raise an error | ||
1641 | if( lua_iscfunction( L, -1)) | ||
1642 | { | ||
1643 | return 1; | ||
1644 | } | ||
1645 | return luaL_error( L, "can't index a lane with '%s'", keystr); | ||
1646 | } | ||
1647 | // unknown key | ||
1648 | lua_getmetatable( L, UD); | ||
1649 | lua_getfield( L, -1, "cached_error"); | ||
1650 | lua_pushliteral( L, "Unknown key: "); | ||
1651 | lua_pushvalue( L, KEY); | ||
1652 | lua_concat( L, 2); | ||
1653 | lua_call( L, 1, 0); // error( "Unknown key: " .. key) -> doesn't return | ||
1654 | return 0; | ||
1655 | } | ||
1656 | |||
1657 | #if HAVE_LANE_TRACKING() | ||
1658 | //--- | ||
1659 | // threads() -> {}|nil | ||
1660 | // | ||
1661 | // Return a list of all known lanes | ||
1662 | LUAG_FUNC( threads) | ||
1663 | { | ||
1664 | int const top = lua_gettop( L); | ||
1665 | Universe* U = universe_get( L); | ||
1666 | |||
1667 | // List _all_ still running threads | ||
1668 | // | ||
1669 | MUTEX_LOCK( &U->tracking_cs); | ||
1670 | if( U->tracking_first && U->tracking_first != TRACKING_END) | ||
1671 | { | ||
1672 | Lane* s = U->tracking_first; | ||
1673 | int index = 0; | ||
1674 | lua_newtable( L); // {} | ||
1675 | while( s != TRACKING_END) | ||
1676 | { | ||
1677 | // insert a { name, status } tuple, so that several lanes with the same name can't clobber each other | ||
1678 | lua_newtable( L); // {} {} | ||
1679 | lua_pushstring( L, s->debug_name); // {} {} "name" | ||
1680 | lua_setfield( L, -2, "name"); // {} {} | ||
1681 | push_thread_status( L, s); // {} {} "status" | ||
1682 | lua_setfield( L, -2, "status"); // {} {} | ||
1683 | lua_rawseti( L, -2, ++ index); // {} | ||
1684 | s = s->tracking_next; | ||
1685 | } | ||
1686 | } | ||
1687 | MUTEX_UNLOCK( &U->tracking_cs); | ||
1688 | return lua_gettop( L) - top; // 0 or 1 | ||
1689 | } | ||
1690 | #endif // HAVE_LANE_TRACKING() | ||
1691 | |||
1692 | /* | ||
1693 | * ############################################################################################### | ||
1694 | * ######################################## Timer support ######################################## | ||
1695 | * ############################################################################################### | ||
1696 | */ | ||
1697 | |||
1698 | /* | ||
1699 | * secs= now_secs() | ||
1700 | * | ||
1701 | * Returns the current time, as seconds (millisecond resolution). | ||
1702 | */ | ||
1703 | LUAG_FUNC( now_secs ) | ||
1704 | { | ||
1705 | lua_pushnumber( L, now_secs() ); | ||
1706 | return 1; | ||
1707 | } | ||
1708 | |||
1709 | /* | ||
1710 | * wakeup_at_secs= wakeup_conv( date_tbl ) | ||
1711 | */ | ||
1712 | LUAG_FUNC( wakeup_conv ) | ||
1713 | { | ||
1714 | int year, month, day, hour, min, sec, isdst; | ||
1715 | struct tm t; | ||
1716 | memset( &t, 0, sizeof( t)); | ||
1717 | // | ||
1718 | // .year (four digits) | ||
1719 | // .month (1..12) | ||
1720 | // .day (1..31) | ||
1721 | // .hour (0..23) | ||
1722 | // .min (0..59) | ||
1723 | // .sec (0..61) | ||
1724 | // .yday (day of the year) | ||
1725 | // .isdst (daylight saving on/off) | ||
1726 | |||
1727 | STACK_CHECK( L, 0); | ||
1728 | lua_getfield( L, 1, "year" ); year= (int)lua_tointeger(L,-1); lua_pop(L,1); | ||
1729 | lua_getfield( L, 1, "month" ); month= (int)lua_tointeger(L,-1); lua_pop(L,1); | ||
1730 | lua_getfield( L, 1, "day" ); day= (int)lua_tointeger(L,-1); lua_pop(L,1); | ||
1731 | lua_getfield( L, 1, "hour" ); hour= (int)lua_tointeger(L,-1); lua_pop(L,1); | ||
1732 | lua_getfield( L, 1, "min" ); min= (int)lua_tointeger(L,-1); lua_pop(L,1); | ||
1733 | lua_getfield( L, 1, "sec" ); sec= (int)lua_tointeger(L,-1); lua_pop(L,1); | ||
1734 | |||
1735 | // If Lua table has '.isdst' we trust that. If it does not, we'll let | ||
1736 | // 'mktime' decide on whether the time is within DST or not (value -1). | ||
1737 | // | ||
1738 | lua_getfield( L, 1, "isdst" ); | ||
1739 | isdst= lua_isboolean(L,-1) ? lua_toboolean(L,-1) : -1; | ||
1740 | lua_pop(L,1); | ||
1741 | STACK_END( L, 0); | ||
1742 | |||
1743 | t.tm_year= year-1900; | ||
1744 | t.tm_mon= month-1; // 0..11 | ||
1745 | t.tm_mday= day; // 1..31 | ||
1746 | t.tm_hour= hour; // 0..23 | ||
1747 | t.tm_min= min; // 0..59 | ||
1748 | t.tm_sec= sec; // 0..60 | ||
1749 | t.tm_isdst= isdst; // 0/1/negative | ||
1750 | |||
1751 | lua_pushnumber( L, (double) mktime( &t)); // ms=0 | ||
1752 | return 1; | ||
1753 | } | ||
1754 | |||
1755 | /* | ||
1756 | * ############################################################################################### | ||
1757 | * ######################################## Module linkage ####################################### | ||
1758 | * ############################################################################################### | ||
1759 | */ | ||
1760 | |||
1761 | extern int LG_linda( lua_State* L); | ||
1762 | static const struct luaL_Reg lanes_functions [] = { | ||
1763 | {"linda", LG_linda}, | ||
1764 | {"now_secs", LG_now_secs}, | ||
1765 | {"wakeup_conv", LG_wakeup_conv}, | ||
1766 | {"set_thread_priority", LG_set_thread_priority}, | ||
1767 | {"set_thread_affinity", LG_set_thread_affinity}, | ||
1768 | {"nameof", luaG_nameof}, | ||
1769 | {"register", LG_register}, | ||
1770 | {"set_singlethreaded", LG_set_singlethreaded}, | ||
1771 | {NULL, NULL} | ||
1772 | }; | ||
1773 | |||
1774 | /* | ||
1775 | * One-time initializations | ||
1776 | * settings table it at position 1 on the stack | ||
1777 | * pushes an error string on the stack in case of problem | ||
1778 | */ | ||
1779 | static void init_once_LOCKED( void) | ||
1780 | { | ||
1781 | #if (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC) | ||
1782 | now_secs(); // initialize 'now_secs()' internal offset | ||
1783 | #endif | ||
1784 | |||
1785 | #if (defined PLATFORM_OSX) && (defined _UTILBINDTHREADTOCPU) | ||
1786 | chudInitialize(); | ||
1787 | #endif | ||
1788 | |||
1789 | //--- | ||
1790 | // Linux needs SCHED_RR to change thread priorities, and that is only | ||
1791 | // allowed for sudo'ers. SCHED_OTHER (default) has no priorities. | ||
1792 | // SCHED_OTHER threads are always lower priority than SCHED_RR. | ||
1793 | // | ||
1794 | // ^-- those apply to 2.6 kernel. IF **wishful thinking** these | ||
1795 | // constraints will change in the future, non-sudo priorities can | ||
1796 | // be enabled also for Linux. | ||
1797 | // | ||
1798 | #ifdef PLATFORM_LINUX | ||
1799 | sudo = (geteuid() == 0); // we are root? | ||
1800 | |||
1801 | // If lower priorities (-2..-1) are wanted, we need to lift the main | ||
1802 | // thread to SCHED_RR and 50 (medium) level. Otherwise, we're always below | ||
1803 | // the launched threads (even -2). | ||
1804 | // | ||
1805 | #ifdef LINUX_SCHED_RR | ||
1806 | if( sudo) | ||
1807 | { | ||
1808 | struct sched_param sp; | ||
1809 | sp.sched_priority = _PRIO_0; | ||
1810 | PT_CALL( pthread_setschedparam( pthread_self(), SCHED_RR, &sp)); | ||
1811 | } | ||
1812 | #endif // LINUX_SCHED_RR | ||
1813 | #endif // PLATFORM_LINUX | ||
1814 | } | ||
1815 | |||
1816 | static volatile long s_initCount = 0; | ||
1817 | |||
1818 | // upvalue 1: module name | ||
1819 | // upvalue 2: module table | ||
1820 | // param 1: settings table | ||
1821 | LUAG_FUNC( configure) | ||
1822 | { | ||
1823 | Universe* U = universe_get( L); | ||
1824 | bool_t const from_master_state = (U == NULL); | ||
1825 | char const* name = luaL_checkstring( L, lua_upvalueindex( 1)); | ||
1826 | _ASSERT_L( L, lua_type( L, 1) == LUA_TTABLE); | ||
1827 | |||
1828 | /* | ||
1829 | ** Making one-time initializations. | ||
1830 | ** | ||
1831 | ** When the host application is single-threaded (and all threading happens via Lanes) | ||
1832 | ** there is no problem. But if the host is multithreaded, we need to lock around the | ||
1833 | ** initializations. | ||
1834 | */ | ||
1835 | #if THREADAPI == THREADAPI_WINDOWS | ||
1836 | { | ||
1837 | static volatile int /*bool*/ go_ahead; // = 0 | ||
1838 | if( InterlockedCompareExchange( &s_initCount, 1, 0) == 0) | ||
1839 | { | ||
1840 | init_once_LOCKED(); | ||
1841 | go_ahead = 1; // let others pass | ||
1842 | } | ||
1843 | else | ||
1844 | { | ||
1845 | while( !go_ahead) { Sleep(1); } // changes threads | ||
1846 | } | ||
1847 | } | ||
1848 | #else // THREADAPI == THREADAPI_PTHREAD | ||
1849 | if( s_initCount == 0) | ||
1850 | { | ||
1851 | static pthread_mutex_t my_lock = PTHREAD_MUTEX_INITIALIZER; | ||
1852 | pthread_mutex_lock( &my_lock); | ||
1853 | { | ||
1854 | // Recheck now that we're within the lock | ||
1855 | // | ||
1856 | if( s_initCount == 0) | ||
1857 | { | ||
1858 | init_once_LOCKED(); | ||
1859 | s_initCount = 1; | ||
1860 | } | ||
1861 | } | ||
1862 | pthread_mutex_unlock( &my_lock); | ||
1863 | } | ||
1864 | #endif // THREADAPI == THREADAPI_PTHREAD | ||
1865 | |||
1866 | STACK_GROW( L, 4); | ||
1867 | STACK_CHECK_ABS( L, 1); // settings | ||
1868 | |||
1869 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() BEGIN\n" INDENT_END, L)); | ||
1870 | DEBUGSPEW_CODE( if( U) ++ U->debugspew_indent_depth); | ||
1871 | |||
1872 | if( U == NULL) | ||
1873 | { | ||
1874 | U = universe_create( L); // settings universe | ||
1875 | DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); | ||
1876 | lua_newtable( L); // settings universe mt | ||
1877 | lua_getfield( L, 1, "shutdown_timeout"); // settings universe mt shutdown_timeout | ||
1878 | lua_pushcclosure( L, universe_gc, 1); // settings universe mt universe_gc | ||
1879 | lua_setfield( L, -2, "__gc"); // settings universe mt | ||
1880 | lua_setmetatable( L, -2); // settings universe | ||
1881 | lua_pop( L, 1); // settings | ||
1882 | lua_getfield( L, 1, "verbose_errors"); // settings verbose_errors | ||
1883 | U->verboseErrors = lua_toboolean( L, -1); | ||
1884 | lua_pop( L, 1); // settings | ||
1885 | lua_getfield( L, 1, "demote_full_userdata"); // settings demote_full_userdata | ||
1886 | U->demoteFullUserdata = lua_toboolean( L, -1); | ||
1887 | lua_pop( L, 1); // settings | ||
1888 | #if HAVE_LANE_TRACKING() | ||
1889 | MUTEX_INIT( &U->tracking_cs); | ||
1890 | lua_getfield( L, 1, "track_lanes"); // settings track_lanes | ||
1891 | U->tracking_first = lua_toboolean( L, -1) ? TRACKING_END : NULL; | ||
1892 | lua_pop( L, 1); // settings | ||
1893 | #endif // HAVE_LANE_TRACKING() | ||
1894 | // Linked chains handling | ||
1895 | MUTEX_INIT( &U->selfdestruct_cs); | ||
1896 | MUTEX_RECURSIVE_INIT( &U->require_cs); | ||
1897 | // Locks for 'tools.c' inc/dec counters | ||
1898 | MUTEX_INIT( &U->deep_lock); | ||
1899 | MUTEX_INIT( &U->mtid_lock); | ||
1900 | U->selfdestruct_first = SELFDESTRUCT_END; | ||
1901 | initialize_allocator_function( U, L); | ||
1902 | initialize_on_state_create( U, L); | ||
1903 | init_keepers( U, L); | ||
1904 | STACK_MID( L, 1); | ||
1905 | |||
1906 | // Initialize 'timer_deep'; a common Linda object shared by all states | ||
1907 | lua_pushcfunction( L, LG_linda); // settings lanes.linda | ||
1908 | lua_pushliteral( L, "lanes-timer"); // settings lanes.linda "lanes-timer" | ||
1909 | lua_call( L, 1, 1); // settings linda | ||
1910 | STACK_MID( L, 2); | ||
1911 | |||
1912 | // Proxy userdata contents is only a 'DEEP_PRELUDE*' pointer | ||
1913 | U->timer_deep = *(DeepPrelude**) lua_touserdata( L, -1); | ||
1914 | // increment refcount so that this linda remains alive as long as the universe exists. | ||
1915 | ++ U->timer_deep->refcount; | ||
1916 | lua_pop( L, 1); // settings | ||
1917 | } | ||
1918 | STACK_MID( L, 1); | ||
1919 | |||
1920 | // Serialize calls to 'require' from now on, also in the primary state | ||
1921 | serialize_require( DEBUGSPEW_PARAM_COMMA( U) L); | ||
1922 | |||
1923 | // Retrieve main module interface table | ||
1924 | lua_pushvalue( L, lua_upvalueindex( 2)); // settings M | ||
1925 | // remove configure() (this function) from the module interface | ||
1926 | lua_pushnil( L); // settings M nil | ||
1927 | lua_setfield( L, -2, "configure"); // settings M | ||
1928 | // add functions to the module's table | ||
1929 | luaG_registerlibfuncs( L, lanes_functions); | ||
1930 | #if HAVE_LANE_TRACKING() | ||
1931 | // register core.threads() only if settings say it should be available | ||
1932 | if( U->tracking_first != NULL) | ||
1933 | { | ||
1934 | lua_pushcfunction( L, LG_threads); // settings M LG_threads() | ||
1935 | lua_setfield( L, -2, "threads"); // settings M | ||
1936 | } | ||
1937 | #endif // HAVE_LANE_TRACKING() | ||
1938 | STACK_MID( L, 2); | ||
1939 | |||
1940 | { | ||
1941 | char const* errmsg; | ||
1942 | errmsg = push_deep_proxy( U, L, (DeepPrelude*) U->timer_deep, 0, eLM_LaneBody); // settings M timer_deep | ||
1943 | if( errmsg != NULL) | ||
1944 | { | ||
1945 | return luaL_error( L, errmsg); | ||
1946 | } | ||
1947 | lua_setfield( L, -2, "timer_gateway"); // settings M | ||
1948 | } | ||
1949 | STACK_MID( L, 2); | ||
1950 | |||
1951 | // prepare the metatable for threads | ||
1952 | // contains keys: { __gc, __index, cached_error, cached_tostring, cancel, join, get_debug_threadname } | ||
1953 | // | ||
1954 | if( luaL_newmetatable( L, "Lane")) // settings M mt | ||
1955 | { | ||
1956 | lua_pushcfunction( L, LG_thread_gc); // settings M mt LG_thread_gc | ||
1957 | lua_setfield( L, -2, "__gc"); // settings M mt | ||
1958 | lua_pushcfunction( L, LG_thread_index); // settings M mt LG_thread_index | ||
1959 | lua_setfield( L, -2, "__index"); // settings M mt | ||
1960 | lua_getglobal( L, "error"); // settings M mt error | ||
1961 | ASSERT_L( lua_isfunction( L, -1)); | ||
1962 | lua_setfield( L, -2, "cached_error"); // settings M mt | ||
1963 | lua_getglobal( L, "tostring"); // settings M mt tostring | ||
1964 | ASSERT_L( lua_isfunction( L, -1)); | ||
1965 | lua_setfield( L, -2, "cached_tostring"); // settings M mt | ||
1966 | lua_pushcfunction( L, LG_thread_join); // settings M mt LG_thread_join | ||
1967 | lua_setfield( L, -2, "join"); // settings M mt | ||
1968 | lua_pushcfunction( L, LG_get_debug_threadname); // settings M mt LG_get_debug_threadname | ||
1969 | lua_setfield( L, -2, "get_debug_threadname"); // settings M mt | ||
1970 | lua_pushcfunction( L, LG_thread_cancel); // settings M mt LG_thread_cancel | ||
1971 | lua_setfield( L, -2, "cancel"); // settings M mt | ||
1972 | lua_pushliteral( L, "Lane"); // settings M mt "Lane" | ||
1973 | lua_setfield( L, -2, "__metatable"); // settings M mt | ||
1974 | } | ||
1975 | |||
1976 | lua_pushcclosure( L, LG_lane_new, 1); // settings M lane_new | ||
1977 | lua_setfield( L, -2, "lane_new"); // settings M | ||
1978 | |||
1979 | // we can't register 'lanes.require' normally because we want to create an upvalued closure | ||
1980 | lua_getglobal( L, "require"); // settings M require | ||
1981 | lua_pushcclosure( L, LG_require, 1); // settings M lanes.require | ||
1982 | lua_setfield( L, -2, "require"); // settings M | ||
1983 | |||
1984 | lua_pushfstring( | ||
1985 | L, "%d.%d.%d" | ||
1986 | , LANES_VERSION_MAJOR, LANES_VERSION_MINOR, LANES_VERSION_PATCH | ||
1987 | ); // settings M VERSION | ||
1988 | lua_setfield( L, -2, "version"); // settings M | ||
1989 | |||
1990 | lua_pushinteger(L, THREAD_PRIO_MAX); // settings M THREAD_PRIO_MAX | ||
1991 | lua_setfield( L, -2, "max_prio"); // settings M | ||
1992 | |||
1993 | push_unique_key( L, CANCEL_ERROR); // settings M CANCEL_ERROR | ||
1994 | lua_setfield( L, -2, "cancel_error"); // settings M | ||
1995 | |||
1996 | STACK_MID( L, 2); // reference stack contains only the function argument 'settings' | ||
1997 | // we'll need this every time we transfer some C function from/to this state | ||
1998 | REGISTRY_SET( L, LOOKUP_REGKEY, lua_newtable( L)); | ||
1999 | STACK_MID( L, 2); | ||
2000 | |||
2001 | // register all native functions found in that module in the transferable functions database | ||
2002 | // we process it before _G because we don't want to find the module when scanning _G (this would generate longer names) | ||
2003 | // for example in package.loaded["lanes.core"].* | ||
2004 | populate_func_lookup_table( L, -1, name); | ||
2005 | STACK_MID( L, 2); | ||
2006 | |||
2007 | // record all existing C/JIT-fast functions | ||
2008 | // Lua 5.2 no longer has LUA_GLOBALSINDEX: we must push globals table on the stack | ||
2009 | if( from_master_state) | ||
2010 | { | ||
2011 | // don't do this when called during the initialization of a new lane, | ||
2012 | // because we will do it after on_state_create() is called, | ||
2013 | // and we don't want to skip _G because of caching in case globals are created then | ||
2014 | lua_pushglobaltable( L); // settings M _G | ||
2015 | populate_func_lookup_table( L, -1, NULL); | ||
2016 | lua_pop( L, 1); // settings M | ||
2017 | } | ||
2018 | lua_pop( L, 1); // settings | ||
2019 | |||
2020 | // set _R[CONFIG_REGKEY] = settings | ||
2021 | REGISTRY_SET( L, CONFIG_REGKEY, lua_pushvalue( L, -2)); // -2 because CONFIG_REGKEY is pushed before the value itself | ||
2022 | STACK_END( L, 1); | ||
2023 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() END\n" INDENT_END, L)); | ||
2024 | DEBUGSPEW_CODE( -- U->debugspew_indent_depth); | ||
2025 | // Return the settings table | ||
2026 | return 1; | ||
2027 | } | ||
2028 | |||
2029 | #if defined PLATFORM_WIN32 && !defined NDEBUG | ||
2030 | #include <signal.h> | ||
2031 | #include <conio.h> | ||
2032 | |||
2033 | void signal_handler( int signal) | ||
2034 | { | ||
2035 | if( signal == SIGABRT) | ||
2036 | { | ||
2037 | _cprintf( "caught abnormal termination!"); | ||
2038 | abort(); | ||
2039 | } | ||
2040 | } | ||
2041 | |||
2042 | // helper to have correct callstacks when crashing a Win32 running on 64 bits Windows | ||
2043 | // don't forget to toggle Debug/Exceptions/Win32 in visual Studio too! | ||
2044 | static volatile long s_ecoc_initCount = 0; | ||
2045 | static volatile int s_ecoc_go_ahead = 0; | ||
2046 | static void EnableCrashingOnCrashes( void) | ||
2047 | { | ||
2048 | if( InterlockedCompareExchange( &s_ecoc_initCount, 1, 0) == 0) | ||
2049 | { | ||
2050 | typedef BOOL (WINAPI* tGetPolicy)( LPDWORD lpFlags); | ||
2051 | typedef BOOL (WINAPI* tSetPolicy)( DWORD dwFlags); | ||
2052 | const DWORD EXCEPTION_SWALLOWING = 0x1; | ||
2053 | |||
2054 | HMODULE kernel32 = LoadLibraryA("kernel32.dll"); | ||
2055 | if (kernel32) | ||
2056 | { | ||
2057 | tGetPolicy pGetPolicy = (tGetPolicy)GetProcAddress(kernel32, "GetProcessUserModeExceptionPolicy"); | ||
2058 | tSetPolicy pSetPolicy = (tSetPolicy)GetProcAddress(kernel32, "SetProcessUserModeExceptionPolicy"); | ||
2059 | if( pGetPolicy && pSetPolicy) | ||
2060 | { | ||
2061 | DWORD dwFlags; | ||
2062 | if( pGetPolicy( &dwFlags)) | ||
2063 | { | ||
2064 | // Turn off the filter | ||
2065 | pSetPolicy( dwFlags & ~EXCEPTION_SWALLOWING); | ||
2066 | } | ||
2067 | } | ||
2068 | FreeLibrary(kernel32); | ||
2069 | } | ||
2070 | //typedef void (* SignalHandlerPointer)( int); | ||
2071 | /*SignalHandlerPointer previousHandler =*/ signal( SIGABRT, signal_handler); | ||
2072 | |||
2073 | s_ecoc_go_ahead = 1; // let others pass | ||
2074 | } | ||
2075 | else | ||
2076 | { | ||
2077 | while( !s_ecoc_go_ahead) { Sleep(1); } // changes threads | ||
2078 | } | ||
2079 | } | ||
2080 | #endif // PLATFORM_WIN32 && !defined NDEBUG | ||
2081 | |||
2082 | int LANES_API luaopen_lanes_core( lua_State* L) | ||
2083 | { | ||
2084 | #if defined PLATFORM_WIN32 && !defined NDEBUG | ||
2085 | EnableCrashingOnCrashes(); | ||
2086 | #endif // defined PLATFORM_WIN32 && !defined NDEBUG | ||
2087 | |||
2088 | STACK_GROW( L, 4); | ||
2089 | STACK_CHECK( L, 0); | ||
2090 | |||
2091 | // Prevent PUC-Lua/LuaJIT mismatch. Hopefully this works for MoonJIT too | ||
2092 | lua_getglobal( L, "jit"); // {jit?} | ||
2093 | #if LUAJIT_FLAVOR() == 0 | ||
2094 | if (!lua_isnil( L, -1)) | ||
2095 | return luaL_error( L, "Lanes is built for PUC-Lua, don't run from LuaJIT"); | ||
2096 | #else | ||
2097 | if (lua_isnil( L, -1)) | ||
2098 | return luaL_error( L, "Lanes is built for LuaJIT, don't run from PUC-Lua"); | ||
2099 | #endif | ||
2100 | lua_pop( L, 1); // | ||
2101 | |||
2102 | // Create main module interface table | ||
2103 | // we only have 1 closure, which must be called to configure Lanes | ||
2104 | lua_newtable( L); // M | ||
2105 | lua_pushvalue( L, 1); // M "lanes.core" | ||
2106 | lua_pushvalue( L, -2); // M "lanes.core" M | ||
2107 | lua_pushcclosure( L, LG_configure, 2); // M LG_configure() | ||
2108 | REGISTRY_GET( L, CONFIG_REGKEY); // M LG_configure() settings | ||
2109 | if( !lua_isnil( L, -1)) // this is not the first require "lanes.core": call configure() immediately | ||
2110 | { | ||
2111 | lua_pushvalue( L, -1); // M LG_configure() settings settings | ||
2112 | lua_setfield( L, -4, "settings"); // M LG_configure() settings | ||
2113 | lua_call( L, 1, 0); // M | ||
2114 | } | ||
2115 | else | ||
2116 | { | ||
2117 | // will do nothing on first invocation, as we haven't stored settings in the registry yet | ||
2118 | lua_setfield( L, -3, "settings"); // M LG_configure() | ||
2119 | lua_setfield( L, -2, "configure"); // M | ||
2120 | } | ||
2121 | |||
2122 | STACK_END( L, 1); | ||
2123 | return 1; | ||
2124 | } | ||
2125 | |||
2126 | static int default_luaopen_lanes( lua_State* L) | ||
2127 | { | ||
2128 | int rc = luaL_loadfile( L, "lanes.lua") || lua_pcall( L, 0, 1, 0); | ||
2129 | if( rc != LUA_OK) | ||
2130 | { | ||
2131 | return luaL_error( L, "failed to initialize embedded Lanes"); | ||
2132 | } | ||
2133 | return 1; | ||
2134 | } | ||
2135 | |||
2136 | // call this instead of luaopen_lanes_core() when embedding Lua and Lanes in a custom application | ||
2137 | void LANES_API luaopen_lanes_embedded( lua_State* L, lua_CFunction _luaopen_lanes) | ||
2138 | { | ||
2139 | STACK_CHECK( L, 0); | ||
2140 | // pre-require lanes.core so that when lanes.lua calls require "lanes.core" it finds it is already loaded | ||
2141 | luaL_requiref( L, "lanes.core", luaopen_lanes_core, 0); // ... lanes.core | ||
2142 | lua_pop( L, 1); // ... | ||
2143 | STACK_MID( L, 0); | ||
2144 | // call user-provided function that runs the chunk "lanes.lua" from wherever they stored it | ||
2145 | luaL_requiref( L, "lanes", _luaopen_lanes ? _luaopen_lanes : default_luaopen_lanes, 0); // ... lanes | ||
2146 | STACK_END( L, 1); | ||
2147 | } | ||
diff --git a/src/lanes.cpp b/src/lanes.cpp new file mode 100644 index 0000000..1f795cc --- /dev/null +++ b/src/lanes.cpp | |||
@@ -0,0 +1,2054 @@ | |||
1 | /* | ||
2 | * LANES.CPP Copyright (c) 2007-08, Asko Kauppi | ||
3 | * Copyright (C) 2009-24, Benoit Germain | ||
4 | * | ||
5 | * Multithreading in Lua. | ||
6 | * | ||
7 | * History: | ||
8 | * See CHANGES | ||
9 | * | ||
10 | * Platforms (tested internally): | ||
11 | * OS X (10.5.7 PowerPC/Intel) | ||
12 | * Linux x86 (Ubuntu 8.04) | ||
13 | * Win32 (Windows XP Home SP2, Visual C++ 2005/2008 Express) | ||
14 | * | ||
15 | * Platforms (tested externally): | ||
16 | * Win32 (MSYS) by Ross Berteig. | ||
17 | * | ||
18 | * Platforms (testers appreciated): | ||
19 | * Win64 - should work??? | ||
20 | * Linux x64 - should work | ||
21 | * FreeBSD - should work | ||
22 | * QNX - porting shouldn't be hard | ||
23 | * Sun Solaris - porting shouldn't be hard | ||
24 | * | ||
25 | * References: | ||
26 | * "Porting multithreaded applications from Win32 to Mac OS X": | ||
27 | * <http://developer.apple.com/macosx/multithreadedprogramming.html> | ||
28 | * | ||
29 | * Pthreads: | ||
30 | * <http://vergil.chemistry.gatech.edu/resources/programming/threads.html> | ||
31 | * | ||
32 | * MSDN: <http://msdn2.microsoft.com/en-us/library/ms686679.aspx> | ||
33 | * | ||
34 | * <http://ridiculousfish.com/blog/archives/2007/02/17/barrier> | ||
35 | * | ||
36 | * Defines: | ||
37 | * -DLINUX_SCHED_RR: all threads are lifted to SCHED_RR category, to | ||
38 | * allow negative priorities [-3,-1] be used. Even without this, | ||
39 | * using priorities will require 'sudo' privileges on Linux. | ||
40 | * | ||
41 | * -DUSE_PTHREAD_TIMEDJOIN: use 'pthread_timedjoin_np()' for waiting | ||
42 | * for threads with a timeout. This changes the thread cleanup | ||
43 | * mechanism slightly (cleans up at the join, not once the thread | ||
44 | * has finished). May or may not be a good idea to use it. | ||
45 | * Available only in selected operating systems (Linux). | ||
46 | * | ||
47 | * Bugs: | ||
48 | * | ||
49 | * To-do: | ||
50 | * | ||
51 | * Make waiting threads cancellable. | ||
52 | * ... | ||
53 | */ | ||
54 | |||
55 | /* | ||
56 | =============================================================================== | ||
57 | |||
58 | Copyright (C) 2007-10 Asko Kauppi <akauppi@gmail.com> | ||
59 | 2011-24 Benoit Germain <bnt.germain@gmail.com> | ||
60 | |||
61 | Permission is hereby granted, free of charge, to any person obtaining a copy | ||
62 | of this software and associated documentation files (the "Software"), to deal | ||
63 | in the Software without restriction, including without limitation the rights | ||
64 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||
65 | copies of the Software, and to permit persons to whom the Software is | ||
66 | furnished to do so, subject to the following conditions: | ||
67 | |||
68 | The above copyright notice and this permission notice shall be included in | ||
69 | all copies or substantial portions of the Software. | ||
70 | |||
71 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
72 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
73 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
74 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
75 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
76 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | ||
77 | THE SOFTWARE. | ||
78 | |||
79 | =============================================================================== | ||
80 | */ | ||
81 | |||
82 | #include "lanes.h" | ||
83 | |||
84 | #include "compat.h" | ||
85 | #include "keeper.h" | ||
86 | #include "lanes_private.h" | ||
87 | #include "state.h" | ||
88 | #include "threading.h" | ||
89 | #include "tools.h" | ||
90 | #include "universe.h" | ||
91 | |||
92 | #if !(defined(PLATFORM_XBOX) || defined(PLATFORM_WIN32) || defined(PLATFORM_POCKETPC)) | ||
93 | # include <sys/time.h> | ||
94 | #endif | ||
95 | |||
96 | /* geteuid() */ | ||
97 | #ifdef PLATFORM_LINUX | ||
98 | # include <unistd.h> | ||
99 | # include <sys/types.h> | ||
100 | #endif | ||
101 | |||
102 | #include <atomic> | ||
103 | |||
104 | // forwarding (will do things better later) | ||
105 | static void tracking_add(Lane* lane_); | ||
106 | |||
107 | Lane::Lane(Universe* U_, lua_State* L_) | ||
108 | : U{ U_ } | ||
109 | , L{ L_ } | ||
110 | { | ||
111 | #if HAVE_LANE_TRACKING() | ||
112 | if (U->tracking_first) | ||
113 | { | ||
114 | tracking_add(this); | ||
115 | } | ||
116 | #endif // HAVE_LANE_TRACKING() | ||
117 | } | ||
118 | |||
119 | bool Lane::waitForCompletion(lua_Duration duration_) | ||
120 | { | ||
121 | std::chrono::time_point<std::chrono::steady_clock> until{ std::chrono::time_point<std::chrono::steady_clock>::max() }; | ||
122 | if (duration_.count() >= 0.0) | ||
123 | { | ||
124 | until = std::chrono::steady_clock::now() + std::chrono::duration_cast<std::chrono::steady_clock::duration>(duration_); | ||
125 | } | ||
126 | |||
127 | std::unique_lock lock{ m_done_mutex }; | ||
128 | //std::stop_token token{ m_thread.get_stop_token() }; | ||
129 | //return m_done_signal.wait_until(lock, token, secs_, [this](){ return m_status >= Lane::Done; }); | ||
130 | return m_done_signal.wait_until(lock, until, [this](){ return m_status >= Lane::Done; }); | ||
131 | } | ||
132 | |||
133 | static void lane_main(Lane* lane); | ||
134 | void Lane::startThread(int priority_) | ||
135 | { | ||
136 | m_thread = std::jthread([this]() { lane_main(this); }); | ||
137 | if (priority_ != THREAD_PRIO_DEFAULT) | ||
138 | { | ||
139 | JTHREAD_SET_PRIORITY(m_thread, priority_, U->m_sudo); | ||
140 | } | ||
141 | } | ||
142 | |||
143 | /* Do you want full call stacks, or just the line where the error happened? | ||
144 | * | ||
145 | * TBD: The full stack feature does not seem to work (try 'make error'). | ||
146 | */ | ||
147 | #define ERROR_FULL_STACK 1 // must be either 0 or 1 as we do some index arithmetics with it! | ||
148 | |||
149 | // intern the debug name in the specified lua state so that the pointer remains valid when the lane's state is closed | ||
150 | static void securize_debug_threadname(lua_State* L, Lane* lane_) | ||
151 | { | ||
152 | STACK_CHECK_START_REL(L, 0); | ||
153 | STACK_GROW(L, 3); | ||
154 | lua_getiuservalue(L, 1, 1); | ||
155 | lua_newtable(L); | ||
156 | // Lua 5.1 can't do 'lane_->debug_name = lua_pushstring(L, lane_->debug_name);' | ||
157 | lua_pushstring(L, lane_->debug_name); | ||
158 | lane_->debug_name = lua_tostring(L, -1); | ||
159 | lua_rawset(L, -3); | ||
160 | lua_pop(L, 1); | ||
161 | STACK_CHECK(L, 0); | ||
162 | } | ||
163 | |||
164 | #if ERROR_FULL_STACK | ||
165 | [[nodiscard]] static int lane_error(lua_State* L); | ||
166 | // crc64/we of string "STACKTRACE_REGKEY" generated at http://www.nitrxgen.net/hashgen/ | ||
167 | static constexpr UniqueKey STACKTRACE_REGKEY{ 0x534af7d3226a429full }; | ||
168 | #endif // ERROR_FULL_STACK | ||
169 | |||
170 | /* | ||
171 | * registry[FINALIZER_REG_KEY] is either nil (no finalizers) or a table | ||
172 | * of functions that Lanes will call after the executing 'pcall' has ended. | ||
173 | * | ||
174 | * We're NOT using the GC system for finalizer mainly because providing the | ||
175 | * error (and maybe stack trace) parameters to the finalizer functions would | ||
176 | * anyways complicate that approach. | ||
177 | */ | ||
178 | // crc64/we of string "FINALIZER_REGKEY" generated at http://www.nitrxgen.net/hashgen/ | ||
179 | static constexpr UniqueKey FINALIZER_REGKEY{ 0x188fccb8bf348e09ull }; | ||
180 | |||
181 | // ################################################################################################# | ||
182 | |||
183 | /* | ||
184 | * Push a table stored in registry onto Lua stack. | ||
185 | * | ||
186 | * If there is no existing table, create one if 'create' is true. | ||
187 | * | ||
188 | * Returns: true if a table was pushed | ||
189 | * false if no table found, not created, and nothing pushed | ||
190 | */ | ||
191 | [[nodiscard]] static bool push_registry_table(lua_State* L, UniqueKey key, bool create) | ||
192 | { | ||
193 | STACK_GROW(L, 3); | ||
194 | STACK_CHECK_START_REL(L, 0); | ||
195 | |||
196 | key.pushValue(L); // ? | ||
197 | if (lua_isnil(L, -1)) // nil? | ||
198 | { | ||
199 | lua_pop(L, 1); // | ||
200 | STACK_CHECK(L, 0); | ||
201 | |||
202 | if (!create) | ||
203 | { | ||
204 | return false; | ||
205 | } | ||
206 | |||
207 | lua_newtable(L); // t | ||
208 | key.setValue(L, [](lua_State* L) { lua_pushvalue(L, -2); }); | ||
209 | } | ||
210 | STACK_CHECK(L, 1); | ||
211 | return true; // table pushed | ||
212 | } | ||
213 | |||
214 | // ################################################################################################# | ||
215 | |||
216 | #if HAVE_LANE_TRACKING() | ||
217 | |||
218 | // The chain is ended by '(Lane*)(-1)', not nullptr: | ||
219 | // 'tracking_first -> ... -> ... -> (-1)' | ||
220 | #define TRACKING_END ((Lane *)(-1)) | ||
221 | |||
222 | /* | ||
223 | * Add the lane to tracking chain; the ones still running at the end of the | ||
224 | * whole process will be cancelled. | ||
225 | */ | ||
226 | static void tracking_add(Lane* lane_) | ||
227 | { | ||
228 | std::lock_guard<std::mutex> guard{ lane_->U->tracking_cs }; | ||
229 | assert(lane_->tracking_next == nullptr); | ||
230 | |||
231 | lane_->tracking_next = lane_->U->tracking_first; | ||
232 | lane_->U->tracking_first = lane_; | ||
233 | } | ||
234 | |||
235 | // ################################################################################################# | ||
236 | |||
237 | /* | ||
238 | * A free-running lane has ended; remove it from tracking chain | ||
239 | */ | ||
240 | [[nodiscard]] static bool tracking_remove(Lane* lane_) | ||
241 | { | ||
242 | bool found{ false }; | ||
243 | std::lock_guard<std::mutex> guard{ lane_->U->tracking_cs }; | ||
244 | // Make sure (within the MUTEX) that we actually are in the chain | ||
245 | // still (at process exit they will remove us from chain and then | ||
246 | // cancel/kill). | ||
247 | // | ||
248 | if (lane_->tracking_next != nullptr) | ||
249 | { | ||
250 | Lane** ref = (Lane**) &lane_->U->tracking_first; | ||
251 | |||
252 | while( *ref != TRACKING_END) | ||
253 | { | ||
254 | if (*ref == lane_) | ||
255 | { | ||
256 | *ref = lane_->tracking_next; | ||
257 | lane_->tracking_next = nullptr; | ||
258 | found = true; | ||
259 | break; | ||
260 | } | ||
261 | ref = (Lane**) &((*ref)->tracking_next); | ||
262 | } | ||
263 | assert( found); | ||
264 | } | ||
265 | return found; | ||
266 | } | ||
267 | |||
268 | #endif // HAVE_LANE_TRACKING() | ||
269 | |||
270 | // ################################################################################################# | ||
271 | |||
272 | Lane::~Lane() | ||
273 | { | ||
274 | // Clean up after a (finished) thread | ||
275 | // | ||
276 | #if HAVE_LANE_TRACKING() | ||
277 | if (U->tracking_first != nullptr) | ||
278 | { | ||
279 | // Lane was cleaned up, no need to handle at process termination | ||
280 | std::ignore = tracking_remove(this); | ||
281 | } | ||
282 | #endif // HAVE_LANE_TRACKING() | ||
283 | } | ||
284 | |||
285 | /* | ||
286 | * ############################################################################################### | ||
287 | * ########################################## Finalizer ########################################## | ||
288 | * ############################################################################################### | ||
289 | */ | ||
290 | |||
291 | //--- | ||
292 | // void= finalizer( finalizer_func ) | ||
293 | // | ||
294 | // finalizer_func( [err, stack_tbl] ) | ||
295 | // | ||
296 | // Add a function that will be called when exiting the lane, either via | ||
297 | // normal return or an error. | ||
298 | // | ||
299 | LUAG_FUNC( set_finalizer) | ||
300 | { | ||
301 | luaL_argcheck(L, lua_isfunction(L, 1), 1, "finalizer should be a function"); | ||
302 | luaL_argcheck(L, lua_gettop( L) == 1, 1, "too many arguments"); | ||
303 | // Get the current finalizer table (if any), create one if it doesn't exist | ||
304 | std::ignore = push_registry_table(L, FINALIZER_REGKEY, true); // finalizer {finalisers} | ||
305 | STACK_GROW(L, 2); | ||
306 | lua_pushinteger(L, lua_rawlen(L, -1) + 1); // finalizer {finalisers} idx | ||
307 | lua_pushvalue(L, 1); // finalizer {finalisers} idx finalizer | ||
308 | lua_rawset(L, -3); // finalizer {finalisers} | ||
309 | lua_pop(L, 2); // | ||
310 | return 0; | ||
311 | } | ||
312 | |||
313 | |||
314 | //--- | ||
315 | // Run finalizers - if any - with the given parameters | ||
316 | // | ||
317 | // If 'rc' is nonzero, error message and stack index (the latter only when ERROR_FULL_STACK == 1) are available as: | ||
318 | // [-1]: stack trace (table) | ||
319 | // [-2]: error message (any type) | ||
320 | // | ||
321 | // Returns: | ||
322 | // 0 if finalizers were run without error (or there were none) | ||
323 | // LUA_ERRxxx return code if any of the finalizers failed | ||
324 | // | ||
325 | // TBD: should we add stack trace on failing finalizer, wouldn't be hard.. | ||
326 | // | ||
327 | static void push_stack_trace( lua_State* L, int rc_, int stk_base_); | ||
328 | |||
329 | [[nodiscard]] static int run_finalizers(lua_State* L, int lua_rc) | ||
330 | { | ||
331 | int finalizers_index; | ||
332 | int n; | ||
333 | int err_handler_index = 0; | ||
334 | int rc = LUA_OK; // ... | ||
335 | if (!push_registry_table(L, FINALIZER_REGKEY, false)) // ... finalizers? | ||
336 | { | ||
337 | return 0; // no finalizers | ||
338 | } | ||
339 | |||
340 | STACK_GROW(L, 5); | ||
341 | |||
342 | finalizers_index = lua_gettop( L); | ||
343 | |||
344 | #if ERROR_FULL_STACK | ||
345 | lua_pushcfunction(L, lane_error); // ... finalizers lane_error | ||
346 | err_handler_index = lua_gettop( L); | ||
347 | #endif // ERROR_FULL_STACK | ||
348 | |||
349 | for( n = (int) lua_rawlen(L, finalizers_index); n > 0; -- n) | ||
350 | { | ||
351 | int args = 0; | ||
352 | lua_pushinteger(L, n); // ... finalizers lane_error n | ||
353 | lua_rawget(L, finalizers_index); // ... finalizers lane_error finalizer | ||
354 | ASSERT_L( lua_isfunction(L, -1)); | ||
355 | if (lua_rc != LUA_OK) // we have an error message and an optional stack trace at the bottom of the stack | ||
356 | { | ||
357 | ASSERT_L( finalizers_index == 2 || finalizers_index == 3); | ||
358 | //char const* err_msg = lua_tostring(L, 1); | ||
359 | lua_pushvalue(L, 1); // ... finalizers lane_error finalizer err_msg | ||
360 | // note we don't always have a stack trace for example when CANCEL_ERROR, or when we got an error that doesn't call our handler, such as LUA_ERRMEM | ||
361 | if (finalizers_index == 3) | ||
362 | { | ||
363 | lua_pushvalue(L, 2); // ... finalizers lane_error finalizer err_msg stack_trace | ||
364 | } | ||
365 | args = finalizers_index - 1; | ||
366 | } | ||
367 | |||
368 | // if no error from the main body, finalizer doesn't receive any argument, else it gets the error message and optional stack trace | ||
369 | rc = lua_pcall(L, args, 0, err_handler_index); // ... finalizers lane_error err_msg2? | ||
370 | if (rc != LUA_OK) | ||
371 | { | ||
372 | push_stack_trace(L, rc, lua_gettop( L)); | ||
373 | // If one finalizer fails, don't run the others. Return this | ||
374 | // as the 'real' error, replacing what we could have had (or not) | ||
375 | // from the actual code. | ||
376 | break; | ||
377 | } | ||
378 | // no error, proceed to next finalizer // ... finalizers lane_error | ||
379 | } | ||
380 | |||
381 | if (rc != LUA_OK) | ||
382 | { | ||
383 | // ERROR_FULL_STACK accounts for the presence of lane_error on the stack | ||
384 | int nb_err_slots = lua_gettop( L) - finalizers_index - ERROR_FULL_STACK; | ||
385 | // a finalizer generated an error, this is what we leave of the stack | ||
386 | for( n = nb_err_slots; n > 0; -- n) | ||
387 | { | ||
388 | lua_replace(L, n); | ||
389 | } | ||
390 | // leave on the stack only the error and optional stack trace produced by the error in the finalizer | ||
391 | lua_settop(L, nb_err_slots); | ||
392 | } | ||
393 | else // no error from the finalizers, make sure only the original return values from the lane body remain on the stack | ||
394 | { | ||
395 | lua_settop(L, finalizers_index - 1); | ||
396 | } | ||
397 | |||
398 | return rc; | ||
399 | } | ||
400 | |||
401 | /* | ||
402 | * ############################################################################################### | ||
403 | * ########################################### Threads ########################################### | ||
404 | * ############################################################################################### | ||
405 | */ | ||
406 | |||
407 | // | ||
408 | // Protects modifying the selfdestruct chain | ||
409 | |||
410 | #define SELFDESTRUCT_END ((Lane*)(-1)) | ||
411 | // | ||
412 | // The chain is ended by '(Lane*)(-1)', not nullptr: | ||
413 | // 'selfdestruct_first -> ... -> ... -> (-1)' | ||
414 | |||
415 | /* | ||
416 | * Add the lane to selfdestruct chain; the ones still running at the end of the | ||
417 | * whole process will be cancelled. | ||
418 | */ | ||
419 | static void selfdestruct_add(Lane* lane_) | ||
420 | { | ||
421 | std::lock_guard<std::mutex> guard{ lane_->U->selfdestruct_cs }; | ||
422 | assert(lane_->selfdestruct_next == nullptr); | ||
423 | |||
424 | lane_->selfdestruct_next = lane_->U->selfdestruct_first; | ||
425 | lane_->U->selfdestruct_first = lane_; | ||
426 | } | ||
427 | |||
428 | // ############################################################################################### | ||
429 | |||
430 | /* | ||
431 | * A free-running lane has ended; remove it from selfdestruct chain | ||
432 | */ | ||
433 | [[nodiscard]] static bool selfdestruct_remove(Lane* lane_) | ||
434 | { | ||
435 | bool found{ false }; | ||
436 | std::lock_guard<std::mutex> guard{ lane_->U->selfdestruct_cs }; | ||
437 | // Make sure (within the MUTEX) that we actually are in the chain | ||
438 | // still (at process exit they will remove us from chain and then | ||
439 | // cancel/kill). | ||
440 | // | ||
441 | if (lane_->selfdestruct_next != nullptr) | ||
442 | { | ||
443 | Lane** ref = (Lane**) &lane_->U->selfdestruct_first; | ||
444 | |||
445 | while (*ref != SELFDESTRUCT_END) | ||
446 | { | ||
447 | if (*ref == lane_) | ||
448 | { | ||
449 | *ref = lane_->selfdestruct_next; | ||
450 | lane_->selfdestruct_next = nullptr; | ||
451 | // the terminal shutdown should wait until the lane is done with its lua_close() | ||
452 | lane_->U->selfdestructing_count.fetch_add(1, std::memory_order_release); | ||
453 | found = true; | ||
454 | break; | ||
455 | } | ||
456 | ref = (Lane**) &((*ref)->selfdestruct_next); | ||
457 | } | ||
458 | assert(found); | ||
459 | } | ||
460 | return found; | ||
461 | } | ||
462 | |||
463 | // ############################################################################################### | ||
464 | |||
465 | /* | ||
466 | * Process end; cancel any still free-running threads | ||
467 | */ | ||
468 | [[nodiscard]] static int universe_gc(lua_State* L) | ||
469 | { | ||
470 | Universe* const U{ lua_tofulluserdata<Universe>(L, 1) }; | ||
471 | lua_Duration const shutdown_timeout{ lua_tonumber(L, lua_upvalueindex(1)) }; | ||
472 | [[maybe_unused]] char const* const op_string{ lua_tostring(L, lua_upvalueindex(2)) }; | ||
473 | CancelOp const op{ which_cancel_op(op_string) }; | ||
474 | |||
475 | if (U->selfdestruct_first != SELFDESTRUCT_END) | ||
476 | { | ||
477 | |||
478 | // Signal _all_ still running threads to exit (including the timer thread) | ||
479 | // | ||
480 | { | ||
481 | std::lock_guard<std::mutex> guard{ U->selfdestruct_cs }; | ||
482 | Lane* lane{ U->selfdestruct_first }; | ||
483 | lua_Duration timeout{ 1us }; | ||
484 | while (lane != SELFDESTRUCT_END) | ||
485 | { | ||
486 | // attempt the requested cancel with a small timeout. | ||
487 | // if waiting on a linda, they will raise a cancel_error. | ||
488 | // if a cancellation hook is desired, it will be installed to try to raise an error | ||
489 | if (lane->m_thread.joinable()) | ||
490 | { | ||
491 | std::ignore = thread_cancel(lane, op, 1, timeout, true); | ||
492 | } | ||
493 | lane = lane->selfdestruct_next; | ||
494 | } | ||
495 | } | ||
496 | |||
497 | // When noticing their cancel, the lanes will remove themselves from | ||
498 | // the selfdestruct chain. | ||
499 | { | ||
500 | std::chrono::time_point<std::chrono::steady_clock> t_until{ std::chrono::steady_clock::now() + std::chrono::duration_cast<std::chrono::steady_clock::duration>(shutdown_timeout) }; | ||
501 | |||
502 | while (U->selfdestruct_first != SELFDESTRUCT_END) | ||
503 | { | ||
504 | // give threads time to act on their cancel | ||
505 | std::this_thread::yield(); | ||
506 | // count the number of cancelled thread that didn't have the time to act yet | ||
507 | int n{ 0 }; | ||
508 | { | ||
509 | std::lock_guard<std::mutex> guard{ U->selfdestruct_cs }; | ||
510 | Lane* lane{ U->selfdestruct_first }; | ||
511 | while (lane != SELFDESTRUCT_END) | ||
512 | { | ||
513 | if (lane->cancel_request != CancelRequest::None) | ||
514 | ++n; | ||
515 | lane = lane->selfdestruct_next; | ||
516 | } | ||
517 | } | ||
518 | // if timeout elapsed, or we know all threads have acted, stop waiting | ||
519 | std::chrono::time_point<std::chrono::steady_clock> t_now = std::chrono::steady_clock::now(); | ||
520 | if (n == 0 || (t_now >= t_until)) | ||
521 | { | ||
522 | DEBUGSPEW_CODE(fprintf(stderr, "%d uncancelled lane(s) remain after waiting %fs at process end.\n", n, shutdown_timeout.count())); | ||
523 | break; | ||
524 | } | ||
525 | } | ||
526 | } | ||
527 | |||
528 | // If some lanes are currently cleaning after themselves, wait until they are done. | ||
529 | // They are no longer listed in the selfdestruct chain, but they still have to lua_close(). | ||
530 | while (U->selfdestructing_count.load(std::memory_order_acquire) > 0) | ||
531 | { | ||
532 | std::this_thread::yield(); | ||
533 | } | ||
534 | } | ||
535 | |||
536 | // If after all this, we still have some free-running lanes, it's an external user error, they should have stopped appropriately | ||
537 | { | ||
538 | std::lock_guard<std::mutex> guard{ U->selfdestruct_cs }; | ||
539 | Lane* lane{ U->selfdestruct_first }; | ||
540 | if (lane != SELFDESTRUCT_END) | ||
541 | { | ||
542 | // this causes a leak because we don't call U's destructor (which could be bad if the still running lanes are accessing it) | ||
543 | luaL_error(L, "Zombie thread %s refuses to die!", lane->debug_name); // doesn't return | ||
544 | } | ||
545 | } | ||
546 | |||
547 | // necessary so that calling free_deep_prelude doesn't crash because linda_id expects a linda lightuserdata at absolute slot 1 | ||
548 | lua_settop(L, 0); | ||
549 | // no need to mutex-protect this as all threads in the universe are gone at that point | ||
550 | if (U->timer_deep != nullptr) // test ins case some early internal error prevented Lanes from creating the deep timer | ||
551 | { | ||
552 | [[maybe_unused]] int const prev_ref_count{ U->timer_deep->m_refcount.fetch_sub(1, std::memory_order_relaxed) }; | ||
553 | ASSERT_L(prev_ref_count == 1); // this should be the last reference | ||
554 | free_deep_prelude(L, U->timer_deep); | ||
555 | U->timer_deep = nullptr; | ||
556 | } | ||
557 | |||
558 | close_keepers(U); | ||
559 | |||
560 | // remove the protected allocator, if any | ||
561 | U->protected_allocator.removeFrom(L); | ||
562 | |||
563 | U->Universe::~Universe(); | ||
564 | |||
565 | // universe is no longer available (nor necessary) | ||
566 | // we need to do this in case some deep userdata objects were created before Lanes was initialized, | ||
567 | // as potentially they will be garbage collected after Lanes at application shutdown | ||
568 | universe_store(L, nullptr); | ||
569 | return 0; | ||
570 | } | ||
571 | |||
572 | // ############################################################################################### | ||
573 | |||
574 | //--- | ||
575 | // = _single( [cores_uint=1] ) | ||
576 | // | ||
577 | // Limits the process to use only 'cores' CPU cores. To be used for performance | ||
578 | // testing on multicore devices. DEBUGGING ONLY! | ||
579 | // | ||
580 | LUAG_FUNC( set_singlethreaded) | ||
581 | { | ||
582 | lua_Integer cores = luaL_optinteger(L, 1, 1); | ||
583 | (void) cores; // prevent "unused" warning | ||
584 | |||
585 | #ifdef PLATFORM_OSX | ||
586 | #ifdef _UTILBINDTHREADTOCPU | ||
587 | if (cores > 1) | ||
588 | { | ||
589 | return luaL_error(L, "Limiting to N>1 cores not possible"); | ||
590 | } | ||
591 | // requires 'chudInitialize()' | ||
592 | utilBindThreadToCPU(0); // # of CPU to run on (we cannot limit to 2..N CPUs?) | ||
593 | return 0; | ||
594 | #else | ||
595 | return luaL_error(L, "Not available: compile with _UTILBINDTHREADTOCPU"); | ||
596 | #endif | ||
597 | #else | ||
598 | return luaL_error(L, "not implemented"); | ||
599 | #endif | ||
600 | } | ||
601 | |||
602 | // ############################################################################################### | ||
603 | |||
604 | /* | ||
605 | * str= lane_error( error_val|str ) | ||
606 | * | ||
607 | * Called if there's an error in some lane; add call stack to error message | ||
608 | * just like 'lua.c' normally does. | ||
609 | * | ||
610 | * ".. will be called with the error message and its return value will be the | ||
611 | * message returned on the stack by lua_pcall." | ||
612 | * | ||
613 | * Note: Rather than modifying the error message itself, it would be better | ||
614 | * to provide the call stack (as string) completely separated. This would | ||
615 | * work great with non-string error values as well (current system does not). | ||
616 | * (This is NOT possible with the Lua 5.1 'lua_pcall()'; we could of course | ||
617 | * implement a Lanes-specific 'pcall' of our own that does this). TBD!!! :) | ||
618 | * --AKa 22-Jan-2009 | ||
619 | */ | ||
620 | #if ERROR_FULL_STACK | ||
621 | |||
622 | // crc64/we of string "EXTENDED_STACKTRACE_REGKEY" generated at http://www.nitrxgen.net/hashgen/ | ||
623 | static constexpr UniqueKey EXTENDED_STACKTRACE_REGKEY{ 0x2357c69a7c92c936ull }; // used as registry key | ||
624 | |||
625 | LUAG_FUNC( set_error_reporting) | ||
626 | { | ||
627 | luaL_checktype(L, 1, LUA_TSTRING); | ||
628 | char const* mode{ lua_tostring(L, 1) }; | ||
629 | lua_pushliteral(L, "extended"); | ||
630 | bool const extended{ strcmp(mode, "extended") == 0 }; | ||
631 | bool const basic{ strcmp(mode, "basic") == 0 }; | ||
632 | if (!extended && !basic) | ||
633 | { | ||
634 | return luaL_error(L, "unsupported error reporting model %s", mode); | ||
635 | } | ||
636 | |||
637 | EXTENDED_STACKTRACE_REGKEY.setValue(L, [extended](lua_State* L) { lua_pushboolean(L, extended ? 1 : 0); }); | ||
638 | return 0; | ||
639 | } | ||
640 | |||
641 | [[nodiscard]] static int lane_error(lua_State* L) | ||
642 | { | ||
643 | // error message (any type) | ||
644 | STACK_CHECK_START_ABS(L, 1); // some_error | ||
645 | |||
646 | // Don't do stack survey for cancelled lanes. | ||
647 | // | ||
648 | if (CANCEL_ERROR.equals(L, 1)) | ||
649 | { | ||
650 | return 1; // just pass on | ||
651 | } | ||
652 | |||
653 | STACK_GROW(L, 3); | ||
654 | bool const extended{ EXTENDED_STACKTRACE_REGKEY.readBoolValue(L) }; | ||
655 | STACK_CHECK(L, 1); | ||
656 | |||
657 | // Place stack trace at 'registry[STACKTRACE_REGKEY]' for the 'lua_pcall()' | ||
658 | // caller to fetch. This bypasses the Lua 5.1 limitation of only one | ||
659 | // return value from error handler to 'lua_pcall()' caller. | ||
660 | |||
661 | // It's adequate to push stack trace as a table. This gives the receiver | ||
662 | // of the stack best means to format it to their liking. Also, it allows | ||
663 | // us to add more stack info later, if needed. | ||
664 | // | ||
665 | // table of { "sourcefile.lua:<line>", ... } | ||
666 | // | ||
667 | lua_newtable(L); // some_error {} | ||
668 | |||
669 | // Best to start from level 1, but in some cases it might be a C function | ||
670 | // and we don't get '.currentline' for that. It's okay - just keep level | ||
671 | // and table index growing separate. --AKa 22-Jan-2009 | ||
672 | // | ||
673 | lua_Debug ar; | ||
674 | for (int n = 1; lua_getstack(L, n, &ar); ++n) | ||
675 | { | ||
676 | lua_getinfo(L, extended ? "Sln" : "Sl", &ar); | ||
677 | if (extended) | ||
678 | { | ||
679 | lua_newtable(L); // some_error {} {} | ||
680 | |||
681 | lua_pushstring(L, ar.source); // some_error {} {} source | ||
682 | lua_setfield(L, -2, "source"); // some_error {} {} | ||
683 | |||
684 | lua_pushinteger(L, ar.currentline); // some_error {} {} currentline | ||
685 | lua_setfield(L, -2, "currentline"); // some_error {} {} | ||
686 | |||
687 | lua_pushstring(L, ar.name); // some_error {} {} name | ||
688 | lua_setfield(L, -2, "name"); // some_error {} {} | ||
689 | |||
690 | lua_pushstring(L, ar.namewhat); // some_error {} {} namewhat | ||
691 | lua_setfield(L, -2, "namewhat"); // some_error {} {} | ||
692 | |||
693 | lua_pushstring(L, ar.what); // some_error {} {} what | ||
694 | lua_setfield(L, -2, "what"); // some_error {} {} | ||
695 | } | ||
696 | else if (ar.currentline > 0) | ||
697 | { | ||
698 | lua_pushfstring(L, "%s:%d", ar.short_src, ar.currentline); // some_error {} "blah:blah" | ||
699 | } | ||
700 | else | ||
701 | { | ||
702 | lua_pushfstring(L, "%s:?", ar.short_src); // some_error {} "blah" | ||
703 | } | ||
704 | lua_rawseti(L, -2, (lua_Integer) n); // some_error {} | ||
705 | } | ||
706 | |||
707 | // store the stack trace table in the registry | ||
708 | STACKTRACE_REGKEY.setValue(L, [](lua_State* L) { lua_insert(L, -2); }); // some_error | ||
709 | |||
710 | STACK_CHECK(L, 1); | ||
711 | return 1; // the untouched error value | ||
712 | } | ||
713 | #endif // ERROR_FULL_STACK | ||
714 | |||
715 | static void push_stack_trace( lua_State* L, int rc_, int stk_base_) | ||
716 | { | ||
717 | // Lua 5.1 error handler is limited to one return value; it stored the stack trace in the registry | ||
718 | switch( rc_) | ||
719 | { | ||
720 | case LUA_OK: // no error, body return values are on the stack | ||
721 | break; | ||
722 | |||
723 | case LUA_ERRRUN: // cancellation or a runtime error | ||
724 | #if ERROR_FULL_STACK // when ERROR_FULL_STACK, we installed a handler | ||
725 | { | ||
726 | STACK_CHECK_START_REL(L, 0); | ||
727 | // fetch the call stack table from the registry where the handler stored it | ||
728 | STACK_GROW(L, 1); | ||
729 | // yields nil if no stack was generated (in case of cancellation for example) | ||
730 | STACKTRACE_REGKEY.pushValue(L); // err trace|nil | ||
731 | STACK_CHECK(L, 1); | ||
732 | |||
733 | // For cancellation the error message is CANCEL_ERROR, and a stack trace isn't placed | ||
734 | // For other errors, the message can be whatever was thrown, and we should have a stack trace table | ||
735 | ASSERT_L(lua_type(L, 1 + stk_base_) == (CANCEL_ERROR.equals(L, stk_base_) ? LUA_TNIL : LUA_TTABLE)); | ||
736 | // Just leaving the stack trace table on the stack is enough to get it through to the master. | ||
737 | break; | ||
738 | } | ||
739 | #endif // fall through if not ERROR_FULL_STACK | ||
740 | |||
741 | case LUA_ERRMEM: // memory allocation error (handler not called) | ||
742 | case LUA_ERRERR: // error while running the error handler (if any, for example an out-of-memory condition) | ||
743 | default: | ||
744 | // we should have a single value which is either a string (the error message) or CANCEL_ERROR | ||
745 | ASSERT_L((lua_gettop(L) == stk_base_) && ((lua_type(L, stk_base_) == LUA_TSTRING) || CANCEL_ERROR.equals(L, stk_base_))); | ||
746 | break; | ||
747 | } | ||
748 | } | ||
749 | |||
750 | // ################################################################################################# | ||
751 | |||
752 | LUAG_FUNC(set_debug_threadname) | ||
753 | { | ||
754 | // fnv164 of string "debug_threadname" generated at https://www.pelock.com/products/hash-calculator | ||
755 | constexpr UniqueKey hidden_regkey{ 0x79C0669AAAE04440ull }; | ||
756 | // C s_lane structure is a light userdata upvalue | ||
757 | Lane* const lane{ lua_tolightuserdata<Lane>(L, lua_upvalueindex(1)) }; | ||
758 | luaL_checktype(L, -1, LUA_TSTRING); // "name" | ||
759 | lua_settop(L, 1); | ||
760 | STACK_CHECK_START_ABS(L, 1); | ||
761 | // store a hidden reference in the registry to make sure the string is kept around even if a lane decides to manually change the "decoda_name" global... | ||
762 | hidden_regkey.setValue(L, [](lua_State* L) { lua_pushvalue(L, -2); }); | ||
763 | STACK_CHECK(L, 1); | ||
764 | lane->debug_name = lua_tostring(L, -1); | ||
765 | // keep a direct pointer on the string | ||
766 | THREAD_SETNAME(lane->debug_name); | ||
767 | // to see VM name in Decoda debugger Virtual Machine window | ||
768 | lua_setglobal(L, "decoda_name"); // | ||
769 | STACK_CHECK(L, 0); | ||
770 | return 0; | ||
771 | } | ||
772 | |||
773 | // ################################################################################################# | ||
774 | |||
775 | LUAG_FUNC(get_debug_threadname) | ||
776 | { | ||
777 | Lane* const lane{ lua_toLane(L, 1) }; | ||
778 | luaL_argcheck(L, lua_gettop(L) == 1, 2, "too many arguments"); | ||
779 | lua_pushstring(L, lane->debug_name); | ||
780 | return 1; | ||
781 | } | ||
782 | |||
783 | // ################################################################################################# | ||
784 | |||
785 | LUAG_FUNC(set_thread_priority) | ||
786 | { | ||
787 | lua_Integer const prio{ luaL_checkinteger(L, 1) }; | ||
788 | // public Lanes API accepts a generic range -3/+3 | ||
789 | // that will be remapped into the platform-specific scheduler priority scheme | ||
790 | // On some platforms, -3 is equivalent to -2 and +3 to +2 | ||
791 | if (prio < THREAD_PRIO_MIN || prio > THREAD_PRIO_MAX) | ||
792 | { | ||
793 | return luaL_error(L, "priority out of range: %d..+%d (%d)", THREAD_PRIO_MIN, THREAD_PRIO_MAX, prio); | ||
794 | } | ||
795 | THREAD_SET_PRIORITY(static_cast<int>(prio), universe_get(L)->m_sudo); | ||
796 | return 0; | ||
797 | } | ||
798 | |||
799 | // ################################################################################################# | ||
800 | |||
801 | LUAG_FUNC(set_thread_affinity) | ||
802 | { | ||
803 | lua_Integer const affinity{ luaL_checkinteger(L, 1) }; | ||
804 | if (affinity <= 0) | ||
805 | { | ||
806 | return luaL_error(L, "invalid affinity (%d)", affinity); | ||
807 | } | ||
808 | THREAD_SET_AFFINITY( static_cast<unsigned int>(affinity)); | ||
809 | return 0; | ||
810 | } | ||
811 | |||
812 | #if USE_DEBUG_SPEW() | ||
813 | // can't use direct LUA_x errcode indexing because the sequence is not the same between Lua 5.1 and 5.2 :-( | ||
814 | // LUA_ERRERR doesn't have the same value | ||
815 | struct errcode_name | ||
816 | { | ||
817 | int code; | ||
818 | char const* name; | ||
819 | }; | ||
820 | |||
821 | static struct errcode_name s_errcodes[] = | ||
822 | { | ||
823 | { LUA_OK, "LUA_OK"}, | ||
824 | { LUA_YIELD, "LUA_YIELD"}, | ||
825 | { LUA_ERRRUN, "LUA_ERRRUN"}, | ||
826 | { LUA_ERRSYNTAX, "LUA_ERRSYNTAX"}, | ||
827 | { LUA_ERRMEM, "LUA_ERRMEM"}, | ||
828 | { LUA_ERRGCMM, "LUA_ERRGCMM"}, | ||
829 | { LUA_ERRERR, "LUA_ERRERR"}, | ||
830 | }; | ||
831 | static char const* get_errcode_name( int _code) | ||
832 | { | ||
833 | int i; | ||
834 | for( i = 0; i < 7; ++ i) | ||
835 | { | ||
836 | if (s_errcodes[i].code == _code) | ||
837 | { | ||
838 | return s_errcodes[i].name; | ||
839 | } | ||
840 | } | ||
841 | return "<nullptr>"; | ||
842 | } | ||
843 | #endif // USE_DEBUG_SPEW() | ||
844 | |||
845 | static void lane_main(Lane* lane) | ||
846 | { | ||
847 | lua_State* const L{ lane->L }; | ||
848 | // wait until the launching thread has finished preparing L | ||
849 | lane->m_ready.wait(); | ||
850 | int rc{ LUA_ERRRUN }; | ||
851 | if (lane->m_status == Lane::Pending) // nothing wrong happened during preparation, we can work | ||
852 | { | ||
853 | // At this point, the lane function and arguments are on the stack | ||
854 | int const nargs{ lua_gettop(L) - 1 }; | ||
855 | DEBUGSPEW_CODE(Universe* U = universe_get(L)); | ||
856 | lane->m_status = Lane::Running; // Pending -> Running | ||
857 | |||
858 | // Tie "set_finalizer()" to the state | ||
859 | lua_pushcfunction(L, LG_set_finalizer); | ||
860 | populate_func_lookup_table(L, -1, "set_finalizer"); | ||
861 | lua_setglobal(L, "set_finalizer"); | ||
862 | |||
863 | // Tie "set_debug_threadname()" to the state | ||
864 | // But don't register it in the lookup database because of the Lane pointer upvalue | ||
865 | lua_pushlightuserdata(L, lane); | ||
866 | lua_pushcclosure(L, LG_set_debug_threadname, 1); | ||
867 | lua_setglobal(L, "set_debug_threadname"); | ||
868 | |||
869 | // Tie "cancel_test()" to the state | ||
870 | lua_pushcfunction(L, LG_cancel_test); | ||
871 | populate_func_lookup_table(L, -1, "cancel_test"); | ||
872 | lua_setglobal(L, "cancel_test"); | ||
873 | |||
874 | // this could be done in lane_new before the lane body function is pushed on the stack to avoid unnecessary stack slot shifting around | ||
875 | #if ERROR_FULL_STACK | ||
876 | // Tie "set_error_reporting()" to the state | ||
877 | lua_pushcfunction(L, LG_set_error_reporting); | ||
878 | populate_func_lookup_table(L, -1, "set_error_reporting"); | ||
879 | lua_setglobal(L, "set_error_reporting"); | ||
880 | |||
881 | STACK_GROW(L, 1); | ||
882 | lua_pushcfunction(L, lane_error); // func args handler | ||
883 | lua_insert(L, 1); // handler func args | ||
884 | #endif // ERROR_FULL_STACK | ||
885 | |||
886 | rc = lua_pcall(L, nargs, LUA_MULTRET, ERROR_FULL_STACK); // retvals|err | ||
887 | |||
888 | #if ERROR_FULL_STACK | ||
889 | lua_remove(L, 1); // retvals|error | ||
890 | #endif // ERROR_FULL_STACK | ||
891 | |||
892 | // in case of error and if it exists, fetch stack trace from registry and push it | ||
893 | push_stack_trace(L, rc, 1); // retvals|error [trace] | ||
894 | |||
895 | DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "Lane %p body: %s (%s)\n" INDENT_END, L, get_errcode_name(rc), CANCEL_ERROR.equals(L, 1) ? "cancelled" : lua_typename(L, lua_type(L, 1)))); | ||
896 | // STACK_DUMP(L); | ||
897 | // Call finalizers, if the script has set them up. | ||
898 | // | ||
899 | int rc2{ run_finalizers(L, rc) }; | ||
900 | DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "Lane %p finalizer: %s\n" INDENT_END, L, get_errcode_name(rc2))); | ||
901 | if (rc2 != LUA_OK) // Error within a finalizer! | ||
902 | { | ||
903 | // the finalizer generated an error, and left its own error message [and stack trace] on the stack | ||
904 | rc = rc2; // we're overruling the earlier script error or normal return | ||
905 | } | ||
906 | lane->m_waiting_on = nullptr; // just in case | ||
907 | if (selfdestruct_remove(lane)) // check and remove (under lock!) | ||
908 | { | ||
909 | // We're a free-running thread and no-one's there to clean us up. | ||
910 | lua_close(lane->L); | ||
911 | lane->L = nullptr; // just in case | ||
912 | lane->U->selfdestruct_cs.lock(); | ||
913 | // done with lua_close(), terminal shutdown sequence may proceed | ||
914 | lane->U->selfdestructing_count.fetch_sub(1, std::memory_order_release); | ||
915 | lane->U->selfdestruct_cs.unlock(); | ||
916 | |||
917 | // we destroy our jthread member from inside the thread body, so we have to detach so that we don't try to join, as this doesn't seem a good idea | ||
918 | lane->m_thread.detach(); | ||
919 | delete lane; | ||
920 | lane = nullptr; | ||
921 | } | ||
922 | } | ||
923 | if (lane) | ||
924 | { | ||
925 | // leave results (1..top) or error message + stack trace (1..2) on the stack - master will copy them | ||
926 | |||
927 | Lane::Status st = (rc == LUA_OK) ? Lane::Done : CANCEL_ERROR.equals(L, 1) ? Lane::Cancelled : Lane::Error; | ||
928 | |||
929 | { | ||
930 | // 'm_done_mutex' protects the -> Done|Error|Cancelled state change | ||
931 | std::lock_guard lock{ lane->m_done_mutex }; | ||
932 | lane->m_status = st; | ||
933 | lane->m_done_signal.notify_one();// wake up master (while 'lane->m_done_mutex' is on) | ||
934 | } | ||
935 | } | ||
936 | } | ||
937 | |||
938 | // ################################################################################################# | ||
939 | |||
940 | // --- If a client wants to transfer stuff of a given module from the current state to another Lane, the module must be required | ||
941 | // with lanes.require, that will call the regular 'require', then populate the lookup database in the source lane | ||
942 | // module = lanes.require( "modname") | ||
943 | // upvalue[1]: _G.require | ||
944 | LUAG_FUNC(require) | ||
945 | { | ||
946 | char const* name = lua_tostring(L, 1); | ||
947 | int const nargs = lua_gettop(L); | ||
948 | DEBUGSPEW_CODE(Universe* U = universe_get(L)); | ||
949 | STACK_CHECK_START_REL(L, 0); | ||
950 | DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lanes.require %s BEGIN\n" INDENT_END, name)); | ||
951 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed)); | ||
952 | lua_pushvalue(L, lua_upvalueindex(1)); // "name" require | ||
953 | lua_insert(L, 1); // require "name" | ||
954 | lua_call(L, nargs, 1); // module | ||
955 | populate_func_lookup_table(L, -1, name); | ||
956 | DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lanes.require %s END\n" INDENT_END, name)); | ||
957 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed)); | ||
958 | STACK_CHECK(L, 0); | ||
959 | return 1; | ||
960 | } | ||
961 | |||
962 | // ################################################################################################# | ||
963 | |||
964 | // --- If a client wants to transfer stuff of a previously required module from the current state to another Lane, the module must be registered | ||
965 | // to populate the lookup database in the source lane (and in the destination too, of course) | ||
966 | // lanes.register( "modname", module) | ||
967 | LUAG_FUNC(register) | ||
968 | { | ||
969 | char const* name = luaL_checkstring(L, 1); | ||
970 | LuaType const mod_type{ lua_type_as_enum(L, 2) }; | ||
971 | // ignore extra parameters, just in case | ||
972 | lua_settop(L, 2); | ||
973 | luaL_argcheck(L, (mod_type == LuaType::TABLE) || (mod_type == LuaType::FUNCTION), 2, "unexpected module type"); | ||
974 | DEBUGSPEW_CODE(Universe* U = universe_get(L)); | ||
975 | STACK_CHECK_START_REL(L, 0); // "name" mod_table | ||
976 | DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lanes.register %s BEGIN\n" INDENT_END, name)); | ||
977 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed)); | ||
978 | populate_func_lookup_table(L, -1, name); | ||
979 | DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lanes.register %s END\n" INDENT_END, name)); | ||
980 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed)); | ||
981 | STACK_CHECK(L, 0); | ||
982 | return 0; | ||
983 | } | ||
984 | |||
985 | // ################################################################################################# | ||
986 | |||
987 | // crc64/we of string "GCCB_KEY" generated at http://www.nitrxgen.net/hashgen/ | ||
988 | static constexpr UniqueKey GCCB_KEY{ 0xcfb1f046ef074e88ull }; | ||
989 | |||
990 | //--- | ||
991 | // lane_ud = lane_new( function | ||
992 | // , [libs_str] | ||
993 | // , [priority_int=0] | ||
994 | // , [globals_tbl] | ||
995 | // , [package_tbl] | ||
996 | // , [required_tbl] | ||
997 | // , [gc_cb_func] | ||
998 | // [, ... args ...]) | ||
999 | // | ||
1000 | // Upvalues: metatable to use for 'lane_ud' | ||
1001 | // | ||
1002 | LUAG_FUNC(lane_new) | ||
1003 | { | ||
1004 | char const* const libs_str{ lua_tostring(L, 2) }; | ||
1005 | bool const have_priority{ !lua_isnoneornil(L, 3) }; | ||
1006 | int const priority{ have_priority ? (int) lua_tointeger(L, 3) : THREAD_PRIO_DEFAULT }; | ||
1007 | int const globals_idx{ lua_isnoneornil(L, 4) ? 0 : 4 }; | ||
1008 | int const package_idx{ lua_isnoneornil(L, 5) ? 0 : 5 }; | ||
1009 | int const required_idx{ lua_isnoneornil(L, 6) ? 0 : 6 }; | ||
1010 | int const gc_cb_idx{ lua_isnoneornil(L, 7) ? 0 : 7 }; | ||
1011 | |||
1012 | static constexpr int FIXED_ARGS{ 7 }; | ||
1013 | int const nargs{ lua_gettop(L) - FIXED_ARGS }; | ||
1014 | Universe* const U{ universe_get(L) }; | ||
1015 | ASSERT_L( nargs >= 0); | ||
1016 | |||
1017 | // public Lanes API accepts a generic range -3/+3 | ||
1018 | // that will be remapped into the platform-specific scheduler priority scheme | ||
1019 | // On some platforms, -3 is equivalent to -2 and +3 to +2 | ||
1020 | if (have_priority && (priority < THREAD_PRIO_MIN || priority > THREAD_PRIO_MAX)) | ||
1021 | { | ||
1022 | return luaL_error(L, "Priority out of range: %d..+%d (%d)", THREAD_PRIO_MIN, THREAD_PRIO_MAX, priority); | ||
1023 | } | ||
1024 | |||
1025 | /* --- Create and prepare the sub state --- */ | ||
1026 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: setup\n" INDENT_END)); | ||
1027 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed)); | ||
1028 | |||
1029 | // populate with selected libraries at the same time | ||
1030 | lua_State* const L2{ luaG_newstate(U, Source{ L }, libs_str) }; // L // L2 | ||
1031 | |||
1032 | // 'lane' is allocated from heap, not Lua, since its life span may surpass the handle's (if free running thread) | ||
1033 | Lane* const lane{ new (U) Lane{ U, L2 } }; | ||
1034 | if (lane == nullptr) | ||
1035 | { | ||
1036 | return luaL_error(L, "could not create lane: out of memory"); | ||
1037 | } | ||
1038 | |||
1039 | class OnExit | ||
1040 | { | ||
1041 | private: | ||
1042 | |||
1043 | lua_State* const m_L; | ||
1044 | Lane* m_lane{ nullptr }; | ||
1045 | int const m_gc_cb_idx; | ||
1046 | DEBUGSPEW_CODE(Universe* const U); // for DEBUGSPEW only (hence the absence of m_ prefix) | ||
1047 | |||
1048 | public: | ||
1049 | |||
1050 | OnExit(lua_State* L_, Lane* lane_, int gc_cb_idx_ DEBUGSPEW_COMMA_PARAM(Universe* U_)) | ||
1051 | : m_L{ L_ } | ||
1052 | , m_lane{ lane_ } | ||
1053 | , m_gc_cb_idx{ gc_cb_idx_ } | ||
1054 | DEBUGSPEW_COMMA_PARAM(U{ U_ }) | ||
1055 | {} | ||
1056 | |||
1057 | ~OnExit() | ||
1058 | { | ||
1059 | if (m_lane) | ||
1060 | { | ||
1061 | // we still need a full userdata so that garbage collection can do its thing | ||
1062 | prepareUserData(); | ||
1063 | // leave a single cancel_error on the stack for the caller | ||
1064 | lua_settop(m_lane->L, 0); | ||
1065 | CANCEL_ERROR.pushKey(m_lane->L); | ||
1066 | { | ||
1067 | std::lock_guard lock{ m_lane->m_done_mutex }; | ||
1068 | m_lane->m_status = Lane::Cancelled; | ||
1069 | m_lane->m_done_signal.notify_one(); // wake up master (while 'lane->m_done_mutex' is on) | ||
1070 | } | ||
1071 | // unblock the thread so that it can terminate gracefully | ||
1072 | m_lane->m_ready.count_down(); | ||
1073 | } | ||
1074 | } | ||
1075 | |||
1076 | private: | ||
1077 | |||
1078 | void prepareUserData() | ||
1079 | { | ||
1080 | DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lane_new: preparing lane userdata\n" INDENT_END)); | ||
1081 | STACK_CHECK_START_REL(m_L, 0); | ||
1082 | // a Lane full userdata needs a single uservalue | ||
1083 | Lane** const ud{ lua_newuserdatauv<Lane*>(m_L, 1) }; // ... lane | ||
1084 | *ud = m_lane; // don't forget to store the pointer in the userdata! | ||
1085 | |||
1086 | // Set metatable for the userdata | ||
1087 | // | ||
1088 | lua_pushvalue(m_L, lua_upvalueindex(1)); // ... lane mt | ||
1089 | lua_setmetatable(m_L, -2); // ... lane | ||
1090 | STACK_CHECK(m_L, 1); | ||
1091 | |||
1092 | // Create uservalue for the userdata | ||
1093 | // (this is where lane body return values will be stored when the handle is indexed by a numeric key) | ||
1094 | lua_newtable(m_L); // ... lane uv | ||
1095 | |||
1096 | // Store the gc_cb callback in the uservalue | ||
1097 | if (m_gc_cb_idx > 0) | ||
1098 | { | ||
1099 | GCCB_KEY.pushKey(m_L); // ... lane uv k | ||
1100 | lua_pushvalue(m_L, m_gc_cb_idx); // ... lane uv k gc_cb | ||
1101 | lua_rawset(m_L, -3); // ... lane uv | ||
1102 | } | ||
1103 | |||
1104 | lua_setiuservalue(m_L, -2, 1); // ... lane | ||
1105 | STACK_CHECK(m_L, 1); | ||
1106 | } | ||
1107 | |||
1108 | public: | ||
1109 | |||
1110 | void success() | ||
1111 | { | ||
1112 | prepareUserData(); | ||
1113 | m_lane->m_ready.count_down(); | ||
1114 | m_lane = nullptr; | ||
1115 | } | ||
1116 | } onExit{ L, lane, gc_cb_idx DEBUGSPEW_COMMA_PARAM(U) }; | ||
1117 | // launch the thread early, it will sync with a std::latch to parallelize OS thread warmup and L2 preparation | ||
1118 | DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lane_new: launching thread\n" INDENT_END)); | ||
1119 | lane->startThread(priority); | ||
1120 | |||
1121 | STACK_GROW( L2, nargs + 3); // | ||
1122 | STACK_CHECK_START_REL(L2, 0); | ||
1123 | |||
1124 | STACK_GROW(L, 3); // func libs priority globals package required gc_cb [... args ...] | ||
1125 | STACK_CHECK_START_REL(L, 0); | ||
1126 | |||
1127 | // give a default "Lua" name to the thread to see VM name in Decoda debugger | ||
1128 | lua_pushfstring( L2, "Lane #%p", L2); // "..." | ||
1129 | lua_setglobal( L2, "decoda_name"); // | ||
1130 | ASSERT_L( lua_gettop( L2) == 0); | ||
1131 | |||
1132 | // package | ||
1133 | if (package_idx != 0) | ||
1134 | { | ||
1135 | DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lane_new: update 'package'\n" INDENT_END)); | ||
1136 | // when copying with mode LookupMode::LaneBody, should raise an error in case of problem, not leave it one the stack | ||
1137 | [[maybe_unused]] InterCopyResult const ret{ luaG_inter_copy_package(U, Source{ L }, Dest{ L2 }, package_idx, LookupMode::LaneBody) }; | ||
1138 | ASSERT_L(ret == InterCopyResult::Success); // either all went well, or we should not even get here | ||
1139 | } | ||
1140 | |||
1141 | // modules to require in the target lane *before* the function is transfered! | ||
1142 | if (required_idx != 0) | ||
1143 | { | ||
1144 | int nbRequired = 1; | ||
1145 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: require 'required' list\n" INDENT_END)); | ||
1146 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed)); | ||
1147 | // should not happen, was checked in lanes.lua before calling lane_new() | ||
1148 | if (lua_type(L, required_idx) != LUA_TTABLE) | ||
1149 | { | ||
1150 | luaL_error(L, "expected required module list as a table, got %s", luaL_typename(L, required_idx)); // doesn't return | ||
1151 | } | ||
1152 | |||
1153 | lua_pushnil(L); // func libs priority globals package required gc_cb [... args ...] nil | ||
1154 | while (lua_next(L, required_idx) != 0) // func libs priority globals package required gc_cb [... args ...] n "modname" | ||
1155 | { | ||
1156 | if (lua_type(L, -1) != LUA_TSTRING || lua_type(L, -2) != LUA_TNUMBER || lua_tonumber(L, -2) != nbRequired) | ||
1157 | { | ||
1158 | luaL_error(L, "required module list should be a list of strings"); // doesn't return | ||
1159 | } | ||
1160 | else | ||
1161 | { | ||
1162 | // require the module in the target state, and populate the lookup table there too | ||
1163 | size_t len; | ||
1164 | char const* name = lua_tolstring(L, -1, &len); | ||
1165 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: require '%s'\n" INDENT_END, name)); | ||
1166 | |||
1167 | // require the module in the target lane | ||
1168 | lua_getglobal( L2, "require"); // require()? | ||
1169 | if (lua_isnil( L2, -1)) | ||
1170 | { | ||
1171 | lua_pop( L2, 1); // | ||
1172 | luaL_error(L, "cannot pre-require modules without loading 'package' library first"); // doesn't return | ||
1173 | } | ||
1174 | else | ||
1175 | { | ||
1176 | lua_pushlstring( L2, name, len); // require() name | ||
1177 | if (lua_pcall( L2, 1, 1, 0) != LUA_OK) // ret/errcode | ||
1178 | { | ||
1179 | // propagate error to main state if any | ||
1180 | std::ignore = luaG_inter_move(U | ||
1181 | , Source{ L2 }, Dest{ L } | ||
1182 | , 1, LookupMode::LaneBody | ||
1183 | ); // func libs priority globals package required gc_cb [... args ...] n "modname" error | ||
1184 | raise_lua_error(L); | ||
1185 | } | ||
1186 | // after requiring the module, register the functions it exported in our name<->function database | ||
1187 | populate_func_lookup_table( L2, -1, name); | ||
1188 | lua_pop( L2, 1); // | ||
1189 | } | ||
1190 | } | ||
1191 | lua_pop(L, 1); // func libs priority globals package required gc_cb [... args ...] n | ||
1192 | ++ nbRequired; | ||
1193 | } // func libs priority globals package required gc_cb [... args ...] | ||
1194 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed)); | ||
1195 | } | ||
1196 | STACK_CHECK(L, 0); | ||
1197 | STACK_CHECK(L2, 0); // | ||
1198 | |||
1199 | // Appending the specified globals to the global environment | ||
1200 | // *after* stdlibs have been loaded and modules required, in case we transfer references to native functions they exposed... | ||
1201 | // | ||
1202 | if (globals_idx != 0) | ||
1203 | { | ||
1204 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: transfer globals\n" INDENT_END)); | ||
1205 | if (!lua_istable(L, globals_idx)) | ||
1206 | { | ||
1207 | luaL_error(L, "Expected table, got %s", luaL_typename(L, globals_idx)); // doesn't return | ||
1208 | } | ||
1209 | |||
1210 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed)); | ||
1211 | lua_pushnil(L); // func libs priority globals package required gc_cb [... args ...] nil | ||
1212 | // Lua 5.2 wants us to push the globals table on the stack | ||
1213 | lua_pushglobaltable(L2); // _G | ||
1214 | while( lua_next(L, globals_idx)) // func libs priority globals package required gc_cb [... args ...] k v | ||
1215 | { | ||
1216 | std::ignore = luaG_inter_copy(U, Source{ L }, Dest{ L2 }, 2, LookupMode::LaneBody); // _G k v | ||
1217 | // assign it in L2's globals table | ||
1218 | lua_rawset(L2, -3); // _G | ||
1219 | lua_pop(L, 1); // func libs priority globals package required gc_cb [... args ...] k | ||
1220 | } // func libs priority globals package required gc_cb [... args ...] | ||
1221 | lua_pop( L2, 1); // | ||
1222 | |||
1223 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed)); | ||
1224 | } | ||
1225 | STACK_CHECK(L, 0); | ||
1226 | STACK_CHECK(L2, 0); | ||
1227 | |||
1228 | // Lane main function | ||
1229 | LuaType const func_type{ lua_type_as_enum(L, 1) }; | ||
1230 | if (func_type == LuaType::FUNCTION) | ||
1231 | { | ||
1232 | DEBUGSPEW_CODE(fprintf( stderr, INDENT_BEGIN "lane_new: transfer lane body\n" INDENT_END)); | ||
1233 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed)); | ||
1234 | lua_pushvalue(L, 1); // func libs priority globals package required gc_cb [... args ...] func | ||
1235 | InterCopyResult const res{ luaG_inter_move(U, Source{ L }, Dest{ L2 }, 1, LookupMode::LaneBody) }; // func libs priority globals package required gc_cb [... args ...] // func | ||
1236 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed)); | ||
1237 | if (res != InterCopyResult::Success) | ||
1238 | { | ||
1239 | luaL_error(L, "tried to copy unsupported types"); // doesn't return | ||
1240 | } | ||
1241 | } | ||
1242 | else if (func_type == LuaType::STRING) | ||
1243 | { | ||
1244 | DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lane_new: compile lane body\n" INDENT_END)); | ||
1245 | // compile the string | ||
1246 | if (luaL_loadstring(L2, lua_tostring(L, 1)) != 0) // func | ||
1247 | { | ||
1248 | luaL_error(L, "error when parsing lane function code"); // doesn't return | ||
1249 | } | ||
1250 | } | ||
1251 | else | ||
1252 | { | ||
1253 | luaL_error(L, "Expected function, got %s", lua_typename(L, func_type)); // doesn't return | ||
1254 | } | ||
1255 | STACK_CHECK(L, 0); | ||
1256 | STACK_CHECK(L2, 1); | ||
1257 | ASSERT_L(lua_isfunction(L2, 1)); | ||
1258 | |||
1259 | // revive arguments | ||
1260 | if (nargs > 0) | ||
1261 | { | ||
1262 | DEBUGSPEW_CODE(fprintf( stderr, INDENT_BEGIN "lane_new: transfer lane arguments\n" INDENT_END)); | ||
1263 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed)); | ||
1264 | InterCopyResult const res{ luaG_inter_move(U, Source{ L }, Dest{ L2 }, nargs, LookupMode::LaneBody) }; // func libs priority globals package required gc_cb // func [... args ...] | ||
1265 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed)); | ||
1266 | if (res != InterCopyResult::Success) | ||
1267 | { | ||
1268 | luaL_error(L, "tried to copy unsupported types"); // doesn't return | ||
1269 | } | ||
1270 | } | ||
1271 | STACK_CHECK(L, -nargs); | ||
1272 | ASSERT_L(lua_gettop( L) == FIXED_ARGS); | ||
1273 | |||
1274 | // Store 'lane' in the lane's registry, for 'cancel_test()' (we do cancel tests at pending send/receive). | ||
1275 | LANE_POINTER_REGKEY.setValue(L2, [lane](lua_State* L) { lua_pushlightuserdata(L, lane); }); // func [... args ...] | ||
1276 | STACK_CHECK(L2, 1 + nargs); | ||
1277 | |||
1278 | STACK_CHECK_RESET_REL(L, 0); | ||
1279 | // all went well, the lane's thread can start working | ||
1280 | onExit.success(); | ||
1281 | // we should have the lane userdata on top of the stack | ||
1282 | STACK_CHECK(L, 1); | ||
1283 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed)); | ||
1284 | return 1; | ||
1285 | } | ||
1286 | |||
1287 | // ################################################################################################# | ||
1288 | |||
1289 | //--- | ||
1290 | // = thread_gc( lane_ud ) | ||
1291 | // | ||
1292 | // Cleanup for a thread userdata. If the thread is still executing, leave it | ||
1293 | // alive as a free-running thread (will clean up itself). | ||
1294 | // | ||
1295 | // * Why NOT cancel/kill a loose thread: | ||
1296 | // | ||
1297 | // At least timer system uses a free-running thread, they should be handy | ||
1298 | // and the issue of canceling/killing threads at gc is not very nice, either | ||
1299 | // (would easily cause waits at gc cycle, which we don't want). | ||
1300 | // | ||
1301 | [[nodiscard]] static int lane_gc(lua_State* L) | ||
1302 | { | ||
1303 | bool have_gc_cb{ false }; | ||
1304 | Lane* const lane{ lua_toLane(L, 1) }; // ud | ||
1305 | |||
1306 | // if there a gc callback? | ||
1307 | lua_getiuservalue(L, 1, 1); // ud uservalue | ||
1308 | GCCB_KEY.pushKey(L); // ud uservalue __gc | ||
1309 | lua_rawget(L, -2); // ud uservalue gc_cb|nil | ||
1310 | if (!lua_isnil(L, -1)) | ||
1311 | { | ||
1312 | lua_remove(L, -2); // ud gc_cb|nil | ||
1313 | lua_pushstring(L, lane->debug_name); // ud gc_cb name | ||
1314 | have_gc_cb = true; | ||
1315 | } | ||
1316 | else | ||
1317 | { | ||
1318 | lua_pop(L, 2); // ud | ||
1319 | } | ||
1320 | |||
1321 | // We can read 'lane->status' without locks, but not wait for it | ||
1322 | if (lane->m_status < Lane::Done) | ||
1323 | { | ||
1324 | // still running: will have to be cleaned up later | ||
1325 | selfdestruct_add(lane); | ||
1326 | assert(lane->selfdestruct_next); | ||
1327 | if (have_gc_cb) | ||
1328 | { | ||
1329 | lua_pushliteral(L, "selfdestruct"); // ud gc_cb name status | ||
1330 | lua_call(L, 2, 0); // ud | ||
1331 | } | ||
1332 | return 0; | ||
1333 | } | ||
1334 | else if (lane->L) | ||
1335 | { | ||
1336 | // no longer accessing the Lua VM: we can close right now | ||
1337 | lua_close(lane->L); | ||
1338 | lane->L = nullptr; | ||
1339 | // just in case, but s will be freed soon so... | ||
1340 | lane->debug_name = "<gc>"; | ||
1341 | } | ||
1342 | |||
1343 | // Clean up after a (finished) thread | ||
1344 | delete lane; | ||
1345 | |||
1346 | // do this after lane cleanup in case the callback triggers an error | ||
1347 | if (have_gc_cb) | ||
1348 | { | ||
1349 | lua_pushliteral(L, "closed"); // ud gc_cb name status | ||
1350 | lua_call(L, 2, 0); // ud | ||
1351 | } | ||
1352 | return 0; | ||
1353 | } | ||
1354 | |||
1355 | // ################################################################################################# | ||
1356 | |||
1357 | //--- | ||
1358 | // str= thread_status( lane ) | ||
1359 | // | ||
1360 | // Returns: "pending" not started yet | ||
1361 | // -> "running" started, doing its work.. | ||
1362 | // <-> "waiting" blocked in a receive() | ||
1363 | // -> "done" finished, results are there | ||
1364 | // / "error" finished at an error, error value is there | ||
1365 | // / "cancelled" execution cancelled by M (state gone) | ||
1366 | // | ||
1367 | [[nodiscard]] static char const* thread_status_string(Lane* lane_) | ||
1368 | { | ||
1369 | Lane::Status const st{ lane_->m_status }; // read just once (volatile) | ||
1370 | char const* str = | ||
1371 | (st == Lane::Pending) ? "pending" : | ||
1372 | (st == Lane::Running) ? "running" : // like in 'co.status()' | ||
1373 | (st == Lane::Waiting) ? "waiting" : | ||
1374 | (st == Lane::Done) ? "done" : | ||
1375 | (st == Lane::Error) ? "error" : | ||
1376 | (st == Lane::Cancelled) ? "cancelled" : nullptr; | ||
1377 | return str; | ||
1378 | } | ||
1379 | |||
1380 | // ################################################################################################# | ||
1381 | |||
1382 | void push_thread_status(lua_State* L, Lane* lane_) | ||
1383 | { | ||
1384 | char const* const str{ thread_status_string(lane_) }; | ||
1385 | ASSERT_L(str); | ||
1386 | |||
1387 | std::ignore = lua_pushstring(L, str); | ||
1388 | } | ||
1389 | |||
1390 | // ################################################################################################# | ||
1391 | |||
1392 | //--- | ||
1393 | // [...] | [nil, err_any, stack_tbl]= thread_join( lane_ud [, wait_secs=-1] ) | ||
1394 | // | ||
1395 | // timeout: returns nil | ||
1396 | // done: returns return values (0..N) | ||
1397 | // error: returns nil + error value [+ stack table] | ||
1398 | // cancelled: returns nil | ||
1399 | // | ||
1400 | LUAG_FUNC(thread_join) | ||
1401 | { | ||
1402 | Lane* const lane{ lua_toLane(L, 1) }; | ||
1403 | lua_Duration const duration{ luaL_optnumber(L, 2, -1.0) }; | ||
1404 | lua_State* const L2{ lane->L }; | ||
1405 | |||
1406 | bool const done{ !lane->m_thread.joinable() || lane->waitForCompletion(duration) }; | ||
1407 | if (!done || !L2) | ||
1408 | { | ||
1409 | STACK_GROW(L, 2); | ||
1410 | lua_pushnil(L); | ||
1411 | lua_pushliteral(L, "timeout"); | ||
1412 | return 2; | ||
1413 | } | ||
1414 | |||
1415 | STACK_CHECK_START_REL(L, 0); | ||
1416 | // Thread is Done/Error/Cancelled; all ours now | ||
1417 | |||
1418 | int ret{ 0 }; | ||
1419 | Universe* const U{ lane->U }; | ||
1420 | // debug_name is a pointer to string possibly interned in the lane's state, that no longer exists when the state is closed | ||
1421 | // so store it in the userdata uservalue at a key that can't possibly collide | ||
1422 | securize_debug_threadname(L, lane); | ||
1423 | switch (lane->m_status) | ||
1424 | { | ||
1425 | case Lane::Done: | ||
1426 | { | ||
1427 | int const n{ lua_gettop(L2) }; // whole L2 stack | ||
1428 | if ((n > 0) && (luaG_inter_move(U, Source{ L2 }, Dest{ L }, n, LookupMode::LaneBody) != InterCopyResult::Success)) | ||
1429 | { | ||
1430 | luaL_error(L, "tried to copy unsupported types"); // doesn't return | ||
1431 | } | ||
1432 | ret = n; | ||
1433 | } | ||
1434 | break; | ||
1435 | |||
1436 | case Lane::Error: | ||
1437 | { | ||
1438 | int const n{ lua_gettop(L2) }; | ||
1439 | STACK_GROW(L, 3); | ||
1440 | lua_pushnil(L); | ||
1441 | // even when ERROR_FULL_STACK, if the error is not LUA_ERRRUN, the handler wasn't called, and we only have 1 error message on the stack ... | ||
1442 | if (luaG_inter_move(U, Source{ L2 }, Dest{ L }, n, LookupMode::LaneBody) != InterCopyResult::Success) // nil "err" [trace] | ||
1443 | { | ||
1444 | luaL_error(L, "tried to copy unsupported types: %s", lua_tostring(L, -n)); // doesn't return | ||
1445 | } | ||
1446 | ret = 1 + n; | ||
1447 | } | ||
1448 | break; | ||
1449 | |||
1450 | case Lane::Cancelled: | ||
1451 | ret = 0; | ||
1452 | break; | ||
1453 | |||
1454 | default: | ||
1455 | DEBUGSPEW_CODE(fprintf(stderr, "Status: %d\n", lane->m_status)); | ||
1456 | ASSERT_L(false); | ||
1457 | ret = 0; | ||
1458 | } | ||
1459 | lua_close(L2); | ||
1460 | lane->L = nullptr; | ||
1461 | STACK_CHECK(L, ret); | ||
1462 | return ret; | ||
1463 | } | ||
1464 | |||
1465 | |||
1466 | //--- | ||
1467 | // thread_index( ud, key) -> value | ||
1468 | // | ||
1469 | // If key is found in the environment, return it | ||
1470 | // If key is numeric, wait until the thread returns and populate the environment with the return values | ||
1471 | // If the return values signal an error, propagate it | ||
1472 | // If key is "status" return the thread status | ||
1473 | // Else raise an error | ||
1474 | LUAG_FUNC(thread_index) | ||
1475 | { | ||
1476 | static constexpr int UD{ 1 }; | ||
1477 | static constexpr int KEY{ 2 }; | ||
1478 | static constexpr int USR{ 3 }; | ||
1479 | Lane* const lane{ lua_toLane(L, UD) }; | ||
1480 | ASSERT_L(lua_gettop(L) == 2); | ||
1481 | |||
1482 | STACK_GROW(L, 8); // up to 8 positions are needed in case of error propagation | ||
1483 | |||
1484 | // If key is numeric, wait until the thread returns and populate the environment with the return values | ||
1485 | if (lua_type(L, KEY) == LUA_TNUMBER) | ||
1486 | { | ||
1487 | // first, check that we don't already have an environment that holds the requested value | ||
1488 | { | ||
1489 | // If key is found in the uservalue, return it | ||
1490 | lua_getiuservalue(L, UD, 1); | ||
1491 | lua_pushvalue(L, KEY); | ||
1492 | lua_rawget(L, USR); | ||
1493 | if (!lua_isnil(L, -1)) | ||
1494 | { | ||
1495 | return 1; | ||
1496 | } | ||
1497 | lua_pop(L, 1); | ||
1498 | } | ||
1499 | { | ||
1500 | // check if we already fetched the values from the thread or not | ||
1501 | lua_Integer key = lua_tointeger(L, KEY); | ||
1502 | lua_pushinteger(L, 0); | ||
1503 | lua_rawget(L, USR); | ||
1504 | bool const fetched{ !lua_isnil(L, -1) }; | ||
1505 | lua_pop(L, 1); // back to our 2 args + uservalue on the stack | ||
1506 | if (!fetched) | ||
1507 | { | ||
1508 | lua_pushinteger(L, 0); | ||
1509 | lua_pushboolean(L, 1); | ||
1510 | lua_rawset(L, USR); | ||
1511 | // wait until thread has completed | ||
1512 | lua_pushcfunction(L, LG_thread_join); | ||
1513 | lua_pushvalue(L, UD); | ||
1514 | lua_call(L, 1, LUA_MULTRET); // all return values are on the stack, at slots 4+ | ||
1515 | switch (lane->m_status) | ||
1516 | { | ||
1517 | default: | ||
1518 | // this is an internal error, we probably never get here | ||
1519 | lua_settop(L, 0); | ||
1520 | lua_pushliteral(L, "Unexpected status: "); | ||
1521 | lua_pushstring(L, thread_status_string(lane)); | ||
1522 | lua_concat(L, 2); | ||
1523 | raise_lua_error(L); | ||
1524 | [[fallthrough]]; // fall through if we are killed, as we got nil, "killed" on the stack | ||
1525 | |||
1526 | case Lane::Done: // got regular return values | ||
1527 | { | ||
1528 | int const nvalues{ lua_gettop(L) - 3 }; | ||
1529 | for (int i = nvalues; i > 0; --i) | ||
1530 | { | ||
1531 | // pop the last element of the stack, to store it in the uservalue at its proper index | ||
1532 | lua_rawseti(L, USR, i); | ||
1533 | } | ||
1534 | } | ||
1535 | break; | ||
1536 | |||
1537 | case Lane::Error: // got 3 values: nil, errstring, callstack table | ||
1538 | // me[-2] could carry the stack table, but even | ||
1539 | // me[-1] is rather unnecessary (and undocumented); | ||
1540 | // use ':join()' instead. --AKa 22-Jan-2009 | ||
1541 | ASSERT_L(lua_isnil(L, 4) && !lua_isnil(L, 5) && lua_istable(L, 6)); | ||
1542 | // store errstring at key -1 | ||
1543 | lua_pushnumber(L, -1); | ||
1544 | lua_pushvalue(L, 5); | ||
1545 | lua_rawset(L, USR); | ||
1546 | break; | ||
1547 | |||
1548 | case Lane::Cancelled: | ||
1549 | // do nothing | ||
1550 | break; | ||
1551 | } | ||
1552 | } | ||
1553 | lua_settop(L, 3); // UD KEY ENV | ||
1554 | if (key != -1) | ||
1555 | { | ||
1556 | lua_pushnumber(L, -1); // UD KEY ENV -1 | ||
1557 | lua_rawget(L, USR); // UD KEY ENV "error" | ||
1558 | if (!lua_isnil(L, -1)) // an error was stored | ||
1559 | { | ||
1560 | // Note: Lua 5.1 interpreter is not prepared to show | ||
1561 | // non-string errors, so we use 'tostring()' here | ||
1562 | // to get meaningful output. --AKa 22-Jan-2009 | ||
1563 | // | ||
1564 | // Also, the stack dump we get is no good; it only | ||
1565 | // lists our internal Lanes functions. There seems | ||
1566 | // to be no way to switch it off, though. | ||
1567 | // | ||
1568 | // Level 3 should show the line where 'h[x]' was read | ||
1569 | // but this only seems to work for string messages | ||
1570 | // (Lua 5.1.4). No idea, why. --AKa 22-Jan-2009 | ||
1571 | lua_getmetatable(L, UD); // UD KEY ENV "error" mt | ||
1572 | lua_getfield(L, -1, "cached_error"); // UD KEY ENV "error" mt error() | ||
1573 | lua_getfield(L, -2, "cached_tostring"); // UD KEY ENV "error" mt error() tostring() | ||
1574 | lua_pushvalue(L, 4); // UD KEY ENV "error" mt error() tostring() "error" | ||
1575 | lua_call(L, 1, 1); // tostring( errstring) -- just in case // UD KEY ENV "error" mt error() "error" | ||
1576 | lua_pushinteger(L, 3); // UD KEY ENV "error" mt error() "error" 3 | ||
1577 | lua_call(L, 2, 0); // error( tostring( errstring), 3) // UD KEY ENV "error" mt | ||
1578 | } | ||
1579 | else | ||
1580 | { | ||
1581 | lua_pop(L, 1); // back to our 3 arguments on the stack | ||
1582 | } | ||
1583 | } | ||
1584 | lua_rawgeti(L, USR, (int)key); | ||
1585 | } | ||
1586 | return 1; | ||
1587 | } | ||
1588 | if (lua_type(L, KEY) == LUA_TSTRING) | ||
1589 | { | ||
1590 | char const* const keystr{ lua_tostring(L, KEY) }; | ||
1591 | lua_settop(L, 2); // keep only our original arguments on the stack | ||
1592 | if (strcmp( keystr, "status") == 0) | ||
1593 | { | ||
1594 | push_thread_status(L, lane); // push the string representing the status | ||
1595 | return 1; | ||
1596 | } | ||
1597 | // return UD.metatable[key] | ||
1598 | lua_getmetatable(L, UD); // UD KEY mt | ||
1599 | lua_replace(L, -3); // mt KEY | ||
1600 | lua_rawget(L, -2); // mt value | ||
1601 | // only "cancel" and "join" are registered as functions, any other string will raise an error | ||
1602 | if (lua_iscfunction(L, -1)) | ||
1603 | { | ||
1604 | return 1; | ||
1605 | } | ||
1606 | return luaL_error(L, "can't index a lane with '%s'", keystr); | ||
1607 | } | ||
1608 | // unknown key | ||
1609 | lua_getmetatable(L, UD); | ||
1610 | lua_getfield(L, -1, "cached_error"); | ||
1611 | lua_pushliteral(L, "Unknown key: "); | ||
1612 | lua_pushvalue(L, KEY); | ||
1613 | lua_concat(L, 2); | ||
1614 | lua_call(L, 1, 0); // error( "Unknown key: " .. key) -> doesn't return | ||
1615 | return 0; | ||
1616 | } | ||
1617 | |||
1618 | #if HAVE_LANE_TRACKING() | ||
1619 | //--- | ||
1620 | // threads() -> {}|nil | ||
1621 | // | ||
1622 | // Return a list of all known lanes | ||
1623 | LUAG_FUNC(threads) | ||
1624 | { | ||
1625 | int const top{ lua_gettop(L) }; | ||
1626 | Universe* const U{ universe_get(L) }; | ||
1627 | |||
1628 | // List _all_ still running threads | ||
1629 | // | ||
1630 | std::lock_guard<std::mutex> guard{ U->tracking_cs }; | ||
1631 | if (U->tracking_first && U->tracking_first != TRACKING_END) | ||
1632 | { | ||
1633 | Lane* lane{ U->tracking_first }; | ||
1634 | int index = 0; | ||
1635 | lua_newtable(L); // {} | ||
1636 | while (lane != TRACKING_END) | ||
1637 | { | ||
1638 | // insert a { name, status } tuple, so that several lanes with the same name can't clobber each other | ||
1639 | lua_newtable(L); // {} {} | ||
1640 | lua_pushstring(L, lane->debug_name); // {} {} "name" | ||
1641 | lua_setfield(L, -2, "name"); // {} {} | ||
1642 | push_thread_status(L, lane); // {} {} "status" | ||
1643 | lua_setfield(L, -2, "status"); // {} {} | ||
1644 | lua_rawseti(L, -2, ++index); // {} | ||
1645 | lane = lane->tracking_next; | ||
1646 | } | ||
1647 | } | ||
1648 | return lua_gettop(L) - top; // 0 or 1 | ||
1649 | } | ||
1650 | #endif // HAVE_LANE_TRACKING() | ||
1651 | |||
1652 | /* | ||
1653 | * ############################################################################################### | ||
1654 | * ######################################## Timer support ######################################## | ||
1655 | * ############################################################################################### | ||
1656 | */ | ||
1657 | |||
1658 | /* | ||
1659 | * secs = now_secs() | ||
1660 | * | ||
1661 | * Returns the current time, as seconds. Resolution depends on std::system_clock implementation | ||
1662 | * Can't use std::chrono::steady_clock because we need the same baseline as std::mktime | ||
1663 | */ | ||
1664 | LUAG_FUNC(now_secs) | ||
1665 | { | ||
1666 | auto const now{ std::chrono::system_clock::now() }; | ||
1667 | lua_Duration duration { now.time_since_epoch() }; | ||
1668 | |||
1669 | lua_pushnumber(L, duration.count()); | ||
1670 | return 1; | ||
1671 | } | ||
1672 | |||
1673 | // ################################################################################################# | ||
1674 | |||
1675 | /* | ||
1676 | * wakeup_at_secs= wakeup_conv(date_tbl) | ||
1677 | */ | ||
1678 | LUAG_FUNC(wakeup_conv) | ||
1679 | { | ||
1680 | // date_tbl | ||
1681 | // .year (four digits) | ||
1682 | // .month (1..12) | ||
1683 | // .day (1..31) | ||
1684 | // .hour (0..23) | ||
1685 | // .min (0..59) | ||
1686 | // .sec (0..61) | ||
1687 | // .yday (day of the year) | ||
1688 | // .isdst (daylight saving on/off) | ||
1689 | |||
1690 | STACK_CHECK_START_REL(L, 0); | ||
1691 | auto readInteger = [L](char const* name_) | ||
1692 | { | ||
1693 | lua_getfield(L, 1, name_); | ||
1694 | lua_Integer const val{ lua_tointeger(L, -1) }; | ||
1695 | lua_pop(L, 1); | ||
1696 | return static_cast<int>(val); | ||
1697 | }; | ||
1698 | int const year{ readInteger("year") }; | ||
1699 | int const month{ readInteger("month") }; | ||
1700 | int const day{ readInteger("day") }; | ||
1701 | int const hour{ readInteger("hour") }; | ||
1702 | int const min{ readInteger("min") }; | ||
1703 | int const sec{ readInteger("sec") }; | ||
1704 | STACK_CHECK(L, 0); | ||
1705 | |||
1706 | // If Lua table has '.isdst' we trust that. If it does not, we'll let | ||
1707 | // 'mktime' decide on whether the time is within DST or not (value -1). | ||
1708 | // | ||
1709 | lua_getfield(L, 1, "isdst" ); | ||
1710 | int const isdst{ lua_isboolean(L, -1) ? lua_toboolean(L, -1) : -1 }; | ||
1711 | lua_pop(L,1); | ||
1712 | STACK_CHECK(L, 0); | ||
1713 | |||
1714 | std::tm t{}; | ||
1715 | t.tm_year = year - 1900; | ||
1716 | t.tm_mon= month-1; // 0..11 | ||
1717 | t.tm_mday= day; // 1..31 | ||
1718 | t.tm_hour= hour; // 0..23 | ||
1719 | t.tm_min= min; // 0..59 | ||
1720 | t.tm_sec= sec; // 0..60 | ||
1721 | t.tm_isdst= isdst; // 0/1/negative | ||
1722 | |||
1723 | lua_pushnumber(L, static_cast<lua_Number>(std::mktime(&t))); // resolution: 1 second | ||
1724 | return 1; | ||
1725 | } | ||
1726 | |||
1727 | /* | ||
1728 | * ############################################################################################### | ||
1729 | * ######################################## Module linkage ####################################### | ||
1730 | * ############################################################################################### | ||
1731 | */ | ||
1732 | |||
1733 | extern int LG_linda(lua_State* L); | ||
1734 | static struct luaL_Reg const lanes_functions[] = | ||
1735 | { | ||
1736 | { "linda", LG_linda }, | ||
1737 | { "now_secs", LG_now_secs }, | ||
1738 | { "wakeup_conv", LG_wakeup_conv }, | ||
1739 | { "set_thread_priority", LG_set_thread_priority }, | ||
1740 | { "set_thread_affinity", LG_set_thread_affinity }, | ||
1741 | { "nameof", luaG_nameof }, | ||
1742 | { "register", LG_register }, | ||
1743 | { "set_singlethreaded", LG_set_singlethreaded }, | ||
1744 | { nullptr, nullptr } | ||
1745 | }; | ||
1746 | |||
1747 | // ################################################################################################# | ||
1748 | |||
1749 | // upvalue 1: module name | ||
1750 | // upvalue 2: module table | ||
1751 | // param 1: settings table | ||
1752 | LUAG_FUNC(configure) | ||
1753 | { | ||
1754 | // start with one-time initializations. | ||
1755 | { | ||
1756 | // C++ guarantees that the static variable initialization is threadsafe. | ||
1757 | static auto _ = std::invoke( | ||
1758 | []() | ||
1759 | { | ||
1760 | #if (defined PLATFORM_OSX) && (defined _UTILBINDTHREADTOCPU) | ||
1761 | chudInitialize(); | ||
1762 | #endif | ||
1763 | return false; | ||
1764 | } | ||
1765 | ); | ||
1766 | } | ||
1767 | |||
1768 | Universe* U = universe_get(L); | ||
1769 | bool const from_master_state{ U == nullptr }; | ||
1770 | char const* name = luaL_checkstring(L, lua_upvalueindex(1)); | ||
1771 | ASSERT_L(lua_type(L, 1) == LUA_TTABLE); | ||
1772 | |||
1773 | STACK_GROW(L, 4); | ||
1774 | STACK_CHECK_START_ABS(L, 1); // settings | ||
1775 | |||
1776 | DEBUGSPEW_CODE(fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() BEGIN\n" INDENT_END, L)); | ||
1777 | DEBUGSPEW_CODE(if (U) U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed)); | ||
1778 | |||
1779 | if (U == nullptr) | ||
1780 | { | ||
1781 | U = universe_create(L); // settings universe | ||
1782 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed)); | ||
1783 | lua_newtable( L); // settings universe mt | ||
1784 | lua_getfield(L, 1, "shutdown_timeout"); // settings universe mt shutdown_timeout | ||
1785 | lua_getfield(L, 1, "shutdown_mode"); // settings universe mt shutdown_timeout shutdown_mode | ||
1786 | lua_pushcclosure(L, universe_gc, 2); // settings universe mt universe_gc | ||
1787 | lua_setfield(L, -2, "__gc"); // settings universe mt | ||
1788 | lua_setmetatable(L, -2); // settings universe | ||
1789 | lua_pop(L, 1); // settings | ||
1790 | lua_getfield(L, 1, "verbose_errors"); // settings verbose_errors | ||
1791 | U->verboseErrors = lua_toboolean(L, -1) ? true : false; | ||
1792 | lua_pop(L, 1); // settings | ||
1793 | lua_getfield(L, 1, "demote_full_userdata"); // settings demote_full_userdata | ||
1794 | U->demoteFullUserdata = lua_toboolean(L, -1) ? true : false; | ||
1795 | lua_pop(L, 1); // settings | ||
1796 | #if HAVE_LANE_TRACKING() | ||
1797 | lua_getfield(L, 1, "track_lanes"); // settings track_lanes | ||
1798 | U->tracking_first = lua_toboolean(L, -1) ? TRACKING_END : nullptr; | ||
1799 | lua_pop(L, 1); // settings | ||
1800 | #endif // HAVE_LANE_TRACKING() | ||
1801 | // Linked chains handling | ||
1802 | U->selfdestruct_first = SELFDESTRUCT_END; | ||
1803 | initialize_allocator_function( U, L); | ||
1804 | initialize_on_state_create( U, L); | ||
1805 | init_keepers( U, L); | ||
1806 | STACK_CHECK(L, 1); | ||
1807 | |||
1808 | // Initialize 'timer_deep'; a common Linda object shared by all states | ||
1809 | lua_pushcfunction(L, LG_linda); // settings lanes.linda | ||
1810 | lua_pushliteral(L, "lanes-timer"); // settings lanes.linda "lanes-timer" | ||
1811 | lua_call(L, 1, 1); // settings linda | ||
1812 | STACK_CHECK(L, 2); | ||
1813 | |||
1814 | // Proxy userdata contents is only a 'DeepPrelude*' pointer | ||
1815 | U->timer_deep = *lua_tofulluserdata<DeepPrelude*>(L, -1); | ||
1816 | // increment refcount so that this linda remains alive as long as the universe exists. | ||
1817 | U->timer_deep->m_refcount.fetch_add(1, std::memory_order_relaxed); | ||
1818 | lua_pop(L, 1); // settings | ||
1819 | } | ||
1820 | STACK_CHECK(L, 1); | ||
1821 | |||
1822 | // Serialize calls to 'require' from now on, also in the primary state | ||
1823 | serialize_require( DEBUGSPEW_PARAM_COMMA( U) L); | ||
1824 | |||
1825 | // Retrieve main module interface table | ||
1826 | lua_pushvalue(L, lua_upvalueindex( 2)); // settings M | ||
1827 | // remove configure() (this function) from the module interface | ||
1828 | lua_pushnil( L); // settings M nil | ||
1829 | lua_setfield(L, -2, "configure"); // settings M | ||
1830 | // add functions to the module's table | ||
1831 | luaG_registerlibfuncs(L, lanes_functions); | ||
1832 | #if HAVE_LANE_TRACKING() | ||
1833 | // register core.threads() only if settings say it should be available | ||
1834 | if (U->tracking_first != nullptr) | ||
1835 | { | ||
1836 | lua_pushcfunction(L, LG_threads); // settings M LG_threads() | ||
1837 | lua_setfield(L, -2, "threads"); // settings M | ||
1838 | } | ||
1839 | #endif // HAVE_LANE_TRACKING() | ||
1840 | STACK_CHECK(L, 2); | ||
1841 | |||
1842 | { | ||
1843 | char const* errmsg{ push_deep_proxy(Dest{ L }, U->timer_deep, 0, LookupMode::LaneBody) }; // settings M timer_deep | ||
1844 | if (errmsg != nullptr) | ||
1845 | { | ||
1846 | return luaL_error(L, errmsg); | ||
1847 | } | ||
1848 | lua_setfield(L, -2, "timer_gateway"); // settings M | ||
1849 | } | ||
1850 | STACK_CHECK(L, 2); | ||
1851 | |||
1852 | // prepare the metatable for threads | ||
1853 | // contains keys: { __gc, __index, cached_error, cached_tostring, cancel, join, get_debug_threadname } | ||
1854 | // | ||
1855 | if (luaL_newmetatable(L, "Lane")) // settings M mt | ||
1856 | { | ||
1857 | lua_pushcfunction(L, lane_gc); // settings M mt lane_gc | ||
1858 | lua_setfield(L, -2, "__gc"); // settings M mt | ||
1859 | lua_pushcfunction(L, LG_thread_index); // settings M mt LG_thread_index | ||
1860 | lua_setfield(L, -2, "__index"); // settings M mt | ||
1861 | lua_getglobal(L, "error"); // settings M mt error | ||
1862 | ASSERT_L( lua_isfunction(L, -1)); | ||
1863 | lua_setfield(L, -2, "cached_error"); // settings M mt | ||
1864 | lua_getglobal(L, "tostring"); // settings M mt tostring | ||
1865 | ASSERT_L( lua_isfunction(L, -1)); | ||
1866 | lua_setfield(L, -2, "cached_tostring"); // settings M mt | ||
1867 | lua_pushcfunction(L, LG_thread_join); // settings M mt LG_thread_join | ||
1868 | lua_setfield(L, -2, "join"); // settings M mt | ||
1869 | lua_pushcfunction(L, LG_get_debug_threadname); // settings M mt LG_get_debug_threadname | ||
1870 | lua_setfield(L, -2, "get_debug_threadname"); // settings M mt | ||
1871 | lua_pushcfunction(L, LG_thread_cancel); // settings M mt LG_thread_cancel | ||
1872 | lua_setfield(L, -2, "cancel"); // settings M mt | ||
1873 | lua_pushliteral(L, "Lane"); // settings M mt "Lane" | ||
1874 | lua_setfield(L, -2, "__metatable"); // settings M mt | ||
1875 | } | ||
1876 | |||
1877 | lua_pushcclosure(L, LG_lane_new, 1); // settings M lane_new | ||
1878 | lua_setfield(L, -2, "lane_new"); // settings M | ||
1879 | |||
1880 | // we can't register 'lanes.require' normally because we want to create an upvalued closure | ||
1881 | lua_getglobal(L, "require"); // settings M require | ||
1882 | lua_pushcclosure(L, LG_require, 1); // settings M lanes.require | ||
1883 | lua_setfield(L, -2, "require"); // settings M | ||
1884 | |||
1885 | lua_pushfstring( | ||
1886 | L, "%d.%d.%d" | ||
1887 | , LANES_VERSION_MAJOR, LANES_VERSION_MINOR, LANES_VERSION_PATCH | ||
1888 | ); // settings M VERSION | ||
1889 | lua_setfield(L, -2, "version"); // settings M | ||
1890 | |||
1891 | lua_pushinteger(L, THREAD_PRIO_MAX); // settings M THREAD_PRIO_MAX | ||
1892 | lua_setfield(L, -2, "max_prio"); // settings M | ||
1893 | |||
1894 | CANCEL_ERROR.pushKey(L); // settings M CANCEL_ERROR | ||
1895 | lua_setfield(L, -2, "cancel_error"); // settings M | ||
1896 | |||
1897 | STACK_CHECK(L, 2); // reference stack contains only the function argument 'settings' | ||
1898 | // we'll need this every time we transfer some C function from/to this state | ||
1899 | LOOKUP_REGKEY.setValue(L, [](lua_State* L) { lua_newtable(L); }); // settings M | ||
1900 | STACK_CHECK(L, 2); | ||
1901 | |||
1902 | // register all native functions found in that module in the transferable functions database | ||
1903 | // we process it before _G because we don't want to find the module when scanning _G (this would generate longer names) | ||
1904 | // for example in package.loaded["lanes.core"].* | ||
1905 | populate_func_lookup_table(L, -1, name); | ||
1906 | STACK_CHECK(L, 2); | ||
1907 | |||
1908 | // record all existing C/JIT-fast functions | ||
1909 | // Lua 5.2 no longer has LUA_GLOBALSINDEX: we must push globals table on the stack | ||
1910 | if (from_master_state) | ||
1911 | { | ||
1912 | // don't do this when called during the initialization of a new lane, | ||
1913 | // because we will do it after on_state_create() is called, | ||
1914 | // and we don't want to skip _G because of caching in case globals are created then | ||
1915 | lua_pushglobaltable( L); // settings M _G | ||
1916 | populate_func_lookup_table(L, -1, nullptr); | ||
1917 | lua_pop(L, 1); // settings M | ||
1918 | } | ||
1919 | lua_pop(L, 1); // settings | ||
1920 | |||
1921 | // set _R[CONFIG_REGKEY] = settings | ||
1922 | CONFIG_REGKEY.setValue(L, [](lua_State* L) { lua_pushvalue(L, -2); }); | ||
1923 | STACK_CHECK(L, 1); | ||
1924 | DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "%p: lanes.configure() END\n" INDENT_END, L)); | ||
1925 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed)); | ||
1926 | // Return the settings table | ||
1927 | return 1; | ||
1928 | } | ||
1929 | |||
1930 | // ################################################################################################# | ||
1931 | |||
1932 | #if defined PLATFORM_WIN32 && !defined NDEBUG | ||
1933 | #include <signal.h> | ||
1934 | #include <conio.h> | ||
1935 | |||
1936 | void signal_handler(int signal) | ||
1937 | { | ||
1938 | if (signal == SIGABRT) | ||
1939 | { | ||
1940 | _cprintf("caught abnormal termination!"); | ||
1941 | abort(); | ||
1942 | } | ||
1943 | } | ||
1944 | |||
1945 | // helper to have correct callstacks when crashing a Win32 running on 64 bits Windows | ||
1946 | // don't forget to toggle Debug/Exceptions/Win32 in visual Studio too! | ||
1947 | static volatile long s_ecoc_initCount = 0; | ||
1948 | static volatile int s_ecoc_go_ahead = 0; | ||
1949 | static void EnableCrashingOnCrashes(void) | ||
1950 | { | ||
1951 | if (InterlockedCompareExchange(&s_ecoc_initCount, 1, 0) == 0) | ||
1952 | { | ||
1953 | typedef BOOL(WINAPI * tGetPolicy)(LPDWORD lpFlags); | ||
1954 | typedef BOOL(WINAPI * tSetPolicy)(DWORD dwFlags); | ||
1955 | const DWORD EXCEPTION_SWALLOWING = 0x1; | ||
1956 | |||
1957 | HMODULE kernel32 = LoadLibraryA("kernel32.dll"); | ||
1958 | if (kernel32) | ||
1959 | { | ||
1960 | tGetPolicy pGetPolicy = (tGetPolicy) GetProcAddress(kernel32, "GetProcessUserModeExceptionPolicy"); | ||
1961 | tSetPolicy pSetPolicy = (tSetPolicy) GetProcAddress(kernel32, "SetProcessUserModeExceptionPolicy"); | ||
1962 | if (pGetPolicy && pSetPolicy) | ||
1963 | { | ||
1964 | DWORD dwFlags; | ||
1965 | if (pGetPolicy(&dwFlags)) | ||
1966 | { | ||
1967 | // Turn off the filter | ||
1968 | pSetPolicy(dwFlags & ~EXCEPTION_SWALLOWING); | ||
1969 | } | ||
1970 | } | ||
1971 | FreeLibrary(kernel32); | ||
1972 | } | ||
1973 | // typedef void (* SignalHandlerPointer)( int); | ||
1974 | /*SignalHandlerPointer previousHandler =*/signal(SIGABRT, signal_handler); | ||
1975 | |||
1976 | s_ecoc_go_ahead = 1; // let others pass | ||
1977 | } | ||
1978 | else | ||
1979 | { | ||
1980 | while (!s_ecoc_go_ahead) | ||
1981 | { | ||
1982 | Sleep(1); | ||
1983 | } // changes threads | ||
1984 | } | ||
1985 | } | ||
1986 | #endif // PLATFORM_WIN32 && !defined NDEBUG | ||
1987 | |||
1988 | LANES_API int luaopen_lanes_core( lua_State* L) | ||
1989 | { | ||
1990 | #if defined PLATFORM_WIN32 && !defined NDEBUG | ||
1991 | EnableCrashingOnCrashes(); | ||
1992 | #endif // defined PLATFORM_WIN32 && !defined NDEBUG | ||
1993 | |||
1994 | STACK_GROW(L, 4); | ||
1995 | STACK_CHECK_START_REL(L, 0); | ||
1996 | |||
1997 | // Prevent PUC-Lua/LuaJIT mismatch. Hopefully this works for MoonJIT too | ||
1998 | lua_getglobal(L, "jit"); // {jit?} | ||
1999 | #if LUAJIT_FLAVOR() == 0 | ||
2000 | if (!lua_isnil(L, -1)) | ||
2001 | return luaL_error(L, "Lanes is built for PUC-Lua, don't run from LuaJIT"); | ||
2002 | #else | ||
2003 | if (lua_isnil(L, -1)) | ||
2004 | return luaL_error(L, "Lanes is built for LuaJIT, don't run from PUC-Lua"); | ||
2005 | #endif | ||
2006 | lua_pop(L, 1); // | ||
2007 | STACK_CHECK(L, 0); | ||
2008 | |||
2009 | // Create main module interface table | ||
2010 | // we only have 1 closure, which must be called to configure Lanes | ||
2011 | lua_newtable(L); // M | ||
2012 | lua_pushvalue(L, 1); // M "lanes.core" | ||
2013 | lua_pushvalue(L, -2); // M "lanes.core" M | ||
2014 | lua_pushcclosure(L, LG_configure, 2); // M LG_configure() | ||
2015 | CONFIG_REGKEY.pushValue(L); // M LG_configure() settings | ||
2016 | if (!lua_isnil(L, -1)) // this is not the first require "lanes.core": call configure() immediately | ||
2017 | { | ||
2018 | lua_pushvalue(L, -1); // M LG_configure() settings settings | ||
2019 | lua_setfield(L, -4, "settings"); // M LG_configure() settings | ||
2020 | lua_call(L, 1, 0); // M | ||
2021 | } | ||
2022 | else | ||
2023 | { | ||
2024 | // will do nothing on first invocation, as we haven't stored settings in the registry yet | ||
2025 | lua_setfield(L, -3, "settings"); // M LG_configure() | ||
2026 | lua_setfield(L, -2, "configure"); // M | ||
2027 | } | ||
2028 | |||
2029 | STACK_CHECK(L, 1); | ||
2030 | return 1; | ||
2031 | } | ||
2032 | |||
2033 | [[nodiscard]] static int default_luaopen_lanes(lua_State* L) | ||
2034 | { | ||
2035 | int const rc{ luaL_loadfile(L, "lanes.lua") || lua_pcall(L, 0, 1, 0) }; | ||
2036 | if (rc != LUA_OK) | ||
2037 | { | ||
2038 | return luaL_error(L, "failed to initialize embedded Lanes"); | ||
2039 | } | ||
2040 | return 1; | ||
2041 | } | ||
2042 | |||
2043 | // call this instead of luaopen_lanes_core() when embedding Lua and Lanes in a custom application | ||
2044 | LANES_API void luaopen_lanes_embedded( lua_State* L, lua_CFunction _luaopen_lanes) | ||
2045 | { | ||
2046 | STACK_CHECK_START_REL(L, 0); | ||
2047 | // pre-require lanes.core so that when lanes.lua calls require "lanes.core" it finds it is already loaded | ||
2048 | luaL_requiref(L, "lanes.core", luaopen_lanes_core, 0); // ... lanes.core | ||
2049 | lua_pop(L, 1); // ... | ||
2050 | STACK_CHECK(L, 0); | ||
2051 | // call user-provided function that runs the chunk "lanes.lua" from wherever they stored it | ||
2052 | luaL_requiref(L, "lanes", _luaopen_lanes ? _luaopen_lanes : default_luaopen_lanes, 0); // ... lanes | ||
2053 | STACK_CHECK(L, 1); | ||
2054 | } | ||
diff --git a/src/lanes.h b/src/lanes.h index 62b9ea9..bc8de55 100644 --- a/src/lanes.h +++ b/src/lanes.h | |||
@@ -1,17 +1,17 @@ | |||
1 | #if !defined( __lanes_h__) | 1 | #pragma once |
2 | #define __lanes_h__ 1 | ||
3 | 2 | ||
3 | #ifdef __cplusplus | ||
4 | extern "C" { | ||
5 | #endif // __cplusplus | ||
4 | #include "lua.h" | 6 | #include "lua.h" |
5 | #include "platform.h" | 7 | #ifdef __cplusplus |
8 | } | ||
9 | #endif // __cplusplus | ||
6 | 10 | ||
7 | #if (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC) | 11 | #include "lanesconf.h" |
8 | #define LANES_API __declspec(dllexport) | ||
9 | #else | ||
10 | #define LANES_API | ||
11 | #endif // (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC) | ||
12 | 12 | ||
13 | #define LANES_VERSION_MAJOR 3 | 13 | #define LANES_VERSION_MAJOR 4 |
14 | #define LANES_VERSION_MINOR 17 | 14 | #define LANES_VERSION_MINOR 0 |
15 | #define LANES_VERSION_PATCH 0 | 15 | #define LANES_VERSION_PATCH 0 |
16 | 16 | ||
17 | #define LANES_MIN_VERSION_REQUIRED(MAJOR, MINOR, PATCH) ((LANES_VERSION_MAJOR>MAJOR) || (LANES_VERSION_MAJOR==MAJOR && (LANES_VERSION_MINOR>MINOR || (LANES_VERSION_MINOR==MINOR && LANES_VERSION_PATCH>=PATCH)))) | 17 | #define LANES_MIN_VERSION_REQUIRED(MAJOR, MINOR, PATCH) ((LANES_VERSION_MAJOR>MAJOR) || (LANES_VERSION_MAJOR==MAJOR && (LANES_VERSION_MINOR>MINOR || (LANES_VERSION_MINOR==MINOR && LANES_VERSION_PATCH>=PATCH)))) |
@@ -20,9 +20,7 @@ | |||
20 | #define LANES_VERSION_GREATER_THAN(MAJOR, MINOR, PATCH) ((LANES_VERSION_MAJOR>MAJOR) || (LANES_VERSION_MAJOR==MAJOR && (LANES_VERSION_MINOR>MINOR || (LANES_VERSION_MINOR==MINOR && LANES_VERSION_PATCH>PATCH)))) | 20 | #define LANES_VERSION_GREATER_THAN(MAJOR, MINOR, PATCH) ((LANES_VERSION_MAJOR>MAJOR) || (LANES_VERSION_MAJOR==MAJOR && (LANES_VERSION_MINOR>MINOR || (LANES_VERSION_MINOR==MINOR && LANES_VERSION_PATCH>PATCH)))) |
21 | #define LANES_VERSION_GREATER_OR_EQUAL(MAJOR, MINOR, PATCH) ((LANES_VERSION_MAJOR>MAJOR) || (LANES_VERSION_MAJOR==MAJOR && (LANES_VERSION_MINOR>MINOR || (LANES_VERSION_MINOR==MINOR && LANES_VERSION_PATCH>=PATCH)))) | 21 | #define LANES_VERSION_GREATER_OR_EQUAL(MAJOR, MINOR, PATCH) ((LANES_VERSION_MAJOR>MAJOR) || (LANES_VERSION_MAJOR==MAJOR && (LANES_VERSION_MINOR>MINOR || (LANES_VERSION_MINOR==MINOR && LANES_VERSION_PATCH>=PATCH)))) |
22 | 22 | ||
23 | extern int LANES_API luaopen_lanes_core( lua_State* L); | 23 | LANES_API [[nodiscard]] int luaopen_lanes_core(lua_State* L); |
24 | 24 | ||
25 | // Call this to work with embedded Lanes instead of calling luaopen_lanes_core() | 25 | // Call this to work with embedded Lanes instead of calling luaopen_lanes_core() |
26 | extern void LANES_API luaopen_lanes_embedded( lua_State* L, lua_CFunction _luaopen_lanes); | 26 | LANES_API void luaopen_lanes_embedded(lua_State* L, lua_CFunction _luaopen_lanes); |
27 | |||
28 | #endif // __lanes_h__ \ No newline at end of file | ||
diff --git a/src/lanes.lua b/src/lanes.lua index 49900f9..fd3d22b 100644 --- a/src/lanes.lua +++ b/src/lanes.lua | |||
@@ -73,6 +73,7 @@ lanes.configure = function( settings_) | |||
73 | keepers_gc_threshold = -1, | 73 | keepers_gc_threshold = -1, |
74 | on_state_create = nil, | 74 | on_state_create = nil, |
75 | shutdown_timeout = 0.25, | 75 | shutdown_timeout = 0.25, |
76 | shutdown_mode = "hard", | ||
76 | with_timers = true, | 77 | with_timers = true, |
77 | track_lanes = false, | 78 | track_lanes = false, |
78 | demote_full_userdata = nil, | 79 | demote_full_userdata = nil, |
@@ -113,6 +114,11 @@ lanes.configure = function( settings_) | |||
113 | -- shutdown_timeout should be a number >= 0 | 114 | -- shutdown_timeout should be a number >= 0 |
114 | return type( val_) == "number" and val_ >= 0 | 115 | return type( val_) == "number" and val_ >= 0 |
115 | end, | 116 | end, |
117 | shutdown_mode = function( val_) | ||
118 | local valid_hooks = { soft = true, hard = true, call = true, ret = true, line = true, count = true } | ||
119 | -- shutdown_mode should be a known hook mask | ||
120 | return valid_hooks[val_] | ||
121 | end, | ||
116 | track_lanes = boolean_param_checker, | 122 | track_lanes = boolean_param_checker, |
117 | demote_full_userdata = boolean_param_checker, | 123 | demote_full_userdata = boolean_param_checker, |
118 | verbose_errors = boolean_param_checker | 124 | verbose_errors = boolean_param_checker |
@@ -367,7 +373,6 @@ lanes.configure = function( settings_) | |||
367 | 373 | ||
368 | 374 | ||
369 | if settings.with_timers ~= false then | 375 | if settings.with_timers ~= false then |
370 | |||
371 | -- | 376 | -- |
372 | -- On first 'require "lanes"', a timer lane is spawned that will maintain | 377 | -- On first 'require "lanes"', a timer lane is spawned that will maintain |
373 | -- timer tables and sleep in between the timer events. All interaction with | 378 | -- timer tables and sleep in between the timer events. All interaction with |
diff --git a/src/lanes_private.h b/src/lanes_private.h index 8143216..18e55fd 100644 --- a/src/lanes_private.h +++ b/src/lanes_private.h | |||
@@ -1,102 +1,106 @@ | |||
1 | #if !defined __lanes_private_h__ | 1 | #pragma once |
2 | #define __lanes_private_h__ 1 | ||
3 | 2 | ||
4 | #include "uniquekey.h" | ||
5 | #include "cancel.h" | 3 | #include "cancel.h" |
4 | #include "uniquekey.h" | ||
6 | #include "universe.h" | 5 | #include "universe.h" |
7 | 6 | ||
7 | #include <chrono> | ||
8 | #include <condition_variable> | ||
9 | #include <latch> | ||
10 | #include <stop_token> | ||
11 | #include <thread> | ||
12 | |||
8 | // NOTE: values to be changed by either thread, during execution, without | 13 | // NOTE: values to be changed by either thread, during execution, without |
9 | // locking, are marked "volatile" | 14 | // locking, are marked "volatile" |
10 | // | 15 | // |
11 | struct s_Lane | 16 | class Lane |
12 | { | 17 | { |
13 | THREAD_T thread; | 18 | public: |
19 | |||
20 | /* | ||
21 | Pending: The Lua VM hasn't done anything yet. | ||
22 | Running, Waiting: Thread is inside the Lua VM. If the thread is forcefully stopped, we can't lua_close() the Lua State. | ||
23 | Done, Error, Cancelled: Thread execution is outside the Lua VM. It can be lua_close()d. | ||
24 | */ | ||
25 | enum class Status | ||
26 | { | ||
27 | Pending, | ||
28 | Running, | ||
29 | Waiting, | ||
30 | Done, | ||
31 | Error, | ||
32 | Cancelled | ||
33 | }; | ||
34 | using enum Status; | ||
35 | |||
36 | // the thread | ||
37 | std::jthread m_thread; | ||
38 | // a latch to wait for the lua_State to be ready | ||
39 | std::latch m_ready{ 1 }; | ||
40 | // to wait for stop requests through m_thread's stop_source | ||
41 | std::mutex m_done_mutex; | ||
42 | std::condition_variable m_done_signal; // use condition_variable_any if waiting for a stop_token | ||
14 | // | 43 | // |
15 | // M: sub-thread OS thread | 44 | // M: sub-thread OS thread |
16 | // S: not used | 45 | // S: not used |
17 | 46 | ||
18 | char const* debug_name; | 47 | char const* debug_name{ "<unnamed>" }; |
19 | 48 | ||
49 | Universe* const U; | ||
20 | lua_State* L; | 50 | lua_State* L; |
21 | Universe* U; | ||
22 | // | 51 | // |
23 | // M: prepares the state, and reads results | 52 | // M: prepares the state, and reads results |
24 | // S: while S is running, M must keep out of modifying the state | 53 | // S: while S is running, M must keep out of modifying the state |
25 | 54 | ||
26 | volatile enum e_status status; | 55 | Status volatile m_status{ Pending }; |
27 | // | 56 | // |
28 | // M: sets to PENDING (before launching) | 57 | // M: sets to Pending (before launching) |
29 | // S: updates -> RUNNING/WAITING -> DONE/ERROR_ST/CANCELLED | 58 | // S: updates -> Running/Waiting -> Done/Error/Cancelled |
30 | 59 | ||
31 | SIGNAL_T* volatile waiting_on; | 60 | std::condition_variable* volatile m_waiting_on{ nullptr }; |
32 | // | 61 | // |
33 | // When status is WAITING, points on the linda's signal the thread waits on, else NULL | 62 | // When status is Waiting, points on the linda's signal the thread waits on, else nullptr |
34 | 63 | ||
35 | volatile enum e_cancel_request cancel_request; | 64 | CancelRequest volatile cancel_request{ CancelRequest::None }; |
36 | // | 65 | // |
37 | // M: sets to FALSE, flags TRUE for cancel request | 66 | // M: sets to false, flags true for cancel request |
38 | // S: reads to see if cancel is requested | 67 | // S: reads to see if cancel is requested |
39 | 68 | ||
40 | #if THREADWAIT_METHOD == THREADWAIT_CONDVAR | 69 | Lane* volatile selfdestruct_next{ nullptr }; |
41 | SIGNAL_T done_signal; | ||
42 | // | ||
43 | // M: Waited upon at lane ending (if Posix with no PTHREAD_TIMEDJOIN) | ||
44 | // S: sets the signal once cancellation is noticed (avoids a kill) | ||
45 | |||
46 | MUTEX_T done_lock; | ||
47 | // | ||
48 | // Lock required by 'done_signal' condition variable, protecting | ||
49 | // lane status changes to DONE/ERROR_ST/CANCELLED. | ||
50 | #endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR | ||
51 | |||
52 | volatile enum | ||
53 | { | ||
54 | NORMAL, // normal master side state | ||
55 | KILLED // issued an OS kill | ||
56 | } mstatus; | ||
57 | // | ||
58 | // M: sets to NORMAL, if issued a kill changes to KILLED | ||
59 | // S: not used | ||
60 | |||
61 | struct s_Lane* volatile selfdestruct_next; | ||
62 | // | 70 | // |
63 | // M: sets to non-NULL if facing lane handle '__gc' cycle but the lane | 71 | // M: sets to non-nullptr if facing lane handle '__gc' cycle but the lane |
64 | // is still running | 72 | // is still running |
65 | // S: cleans up after itself if non-NULL at lane exit | 73 | // S: cleans up after itself if non-nullptr at lane exit |
66 | 74 | ||
67 | #if HAVE_LANE_TRACKING() | 75 | #if HAVE_LANE_TRACKING() |
68 | struct s_Lane* volatile tracking_next; | 76 | Lane* volatile tracking_next{ nullptr }; |
69 | #endif // HAVE_LANE_TRACKING() | 77 | #endif // HAVE_LANE_TRACKING() |
70 | // | 78 | // |
71 | // For tracking only | 79 | // For tracking only |
80 | |||
81 | [[nodiscard]] static void* operator new(size_t size_, Universe* U_) noexcept { return U_->internal_allocator.alloc(size_); } | ||
82 | // can't actually delete the operator because the compiler generates stack unwinding code that could call it in case of exception | ||
83 | static void operator delete(void* p_, Universe* U_) { U_->internal_allocator.free(p_, sizeof(Lane)); } | ||
84 | // this one is for us, to make sure memory is freed by the correct allocator | ||
85 | static void operator delete(void* p_) { static_cast<Lane*>(p_)->U->internal_allocator.free(p_, sizeof(Lane)); } | ||
86 | |||
87 | Lane(Universe* U_, lua_State* L_); | ||
88 | ~Lane(); | ||
89 | |||
90 | [[nodiscard]] bool waitForCompletion(lua_Duration duration_); | ||
91 | void startThread(int priority_); | ||
72 | }; | 92 | }; |
73 | typedef struct s_Lane Lane; | ||
74 | 93 | ||
75 | // xxh64 of string "LANE_POINTER_REGKEY" generated at https://www.pelock.com/products/hash-calculator | 94 | // xxh64 of string "LANE_POINTER_REGKEY" generated at https://www.pelock.com/products/hash-calculator |
76 | static DECLARE_CONST_UNIQUE_KEY( LANE_POINTER_REGKEY, 0xB3022205633743BC); // used as registry key | 95 | static constexpr UniqueKey LANE_POINTER_REGKEY{ 0xB3022205633743BCull }; // used as registry key |
77 | 96 | ||
78 | // To allow free-running threads (longer lifespan than the handle's) | 97 | // To allow free-running threads (longer lifespan than the handle's) |
79 | // 'Lane' are malloc/free'd and the handle only carries a pointer. | 98 | // 'Lane' are malloc/free'd and the handle only carries a pointer. |
80 | // This is not deep userdata since the handle's not portable among lanes. | 99 | // This is not deep userdata since the handle's not portable among lanes. |
81 | // | 100 | // |
82 | inline Lane* lua_toLane(lua_State* L, int i_) | 101 | [[nodiscard]] inline Lane* lua_toLane(lua_State* L, int i_) |
83 | { | 102 | { |
84 | return *(Lane**)(luaL_checkudata(L, i_, "Lane")); | 103 | return *(static_cast<Lane**>(luaL_checkudata(L, i_, "Lane"))); |
85 | } | 104 | } |
86 | 105 | ||
87 | static inline Lane* get_lane_from_registry( lua_State* L) | 106 | void push_thread_status(lua_State* L, Lane* lane_); |
88 | { | ||
89 | Lane* s; | ||
90 | STACK_GROW( L, 1); | ||
91 | STACK_CHECK( L, 0); | ||
92 | REGISTRY_GET( L, LANE_POINTER_REGKEY); | ||
93 | s = lua_touserdata( L, -1); // lightuserdata (true 's_lane' pointer) / nil | ||
94 | lua_pop( L, 1); | ||
95 | STACK_END( L, 0); | ||
96 | return s; | ||
97 | } | ||
98 | |||
99 | int push_thread_status( lua_State* L, Lane* s); | ||
100 | |||
101 | |||
102 | #endif // __lanes_private_h__ \ No newline at end of file | ||
diff --git a/src/lanesconf.h b/src/lanesconf.h new file mode 100644 index 0000000..fb4a601 --- /dev/null +++ b/src/lanesconf.h | |||
@@ -0,0 +1,17 @@ | |||
1 | #pragma once | ||
2 | |||
3 | #include "platform.h" | ||
4 | |||
5 | #if (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC) | ||
6 | #ifdef __cplusplus | ||
7 | #define LANES_API extern "C" __declspec(dllexport) | ||
8 | #else | ||
9 | #define LANES_API extern __declspec(dllexport) | ||
10 | #endif // __cplusplus | ||
11 | #else | ||
12 | #ifdef __cplusplus | ||
13 | #define LANES_API extern "C" | ||
14 | #else | ||
15 | #define LANES_API extern | ||
16 | #endif // __cplusplus | ||
17 | #endif // (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC) | ||
diff --git a/src/linda.c b/src/linda.c deleted file mode 100644 index 2128520..0000000 --- a/src/linda.c +++ /dev/null | |||
@@ -1,948 +0,0 @@ | |||
1 | /* | ||
2 | * LINDA.C Copyright (c) 2018, Benoit Germain | ||
3 | * | ||
4 | * Linda deep userdata. | ||
5 | */ | ||
6 | |||
7 | /* | ||
8 | =============================================================================== | ||
9 | |||
10 | Copyright (C) 2018 benoit Germain <bnt.germain@gmail.com> | ||
11 | |||
12 | Permission is hereby granted, free of charge, to any person obtaining a copy | ||
13 | of this software and associated documentation files (the "Software"), to deal | ||
14 | in the Software without restriction, including without limitation the rights | ||
15 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||
16 | copies of the Software, and to permit persons to whom the Software is | ||
17 | furnished to do so, subject to the following conditions: | ||
18 | |||
19 | The above copyright notice and this permission notice shall be included in | ||
20 | all copies or substantial portions of the Software. | ||
21 | |||
22 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
23 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
24 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
25 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
26 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
27 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | ||
28 | THE SOFTWARE. | ||
29 | |||
30 | =============================================================================== | ||
31 | */ | ||
32 | |||
33 | #include <stdlib.h> | ||
34 | #include <string.h> | ||
35 | #include <assert.h> | ||
36 | |||
37 | #include "threading.h" | ||
38 | #include "compat.h" | ||
39 | #include "tools.h" | ||
40 | #include "universe.h" | ||
41 | #include "keeper.h" | ||
42 | #include "deep.h" | ||
43 | #include "lanes_private.h" | ||
44 | |||
45 | /* | ||
46 | * Actual data is kept within a keeper state, which is hashed by the 's_Linda' | ||
47 | * pointer (which is same to all userdatas pointing to it). | ||
48 | */ | ||
49 | struct s_Linda | ||
50 | { | ||
51 | DeepPrelude prelude; // Deep userdata MUST start with this header | ||
52 | SIGNAL_T read_happened; | ||
53 | SIGNAL_T write_happened; | ||
54 | Universe* U; // the universe this linda belongs to | ||
55 | uintptr_t group; // a group to control keeper allocation between lindas | ||
56 | enum e_cancel_request simulate_cancel; | ||
57 | char name[1]; | ||
58 | }; | ||
59 | #define LINDA_KEEPER_HASHSEED( linda) (linda->group ? linda->group : (uintptr_t)linda) | ||
60 | |||
61 | static void* linda_id( lua_State*, DeepOp); | ||
62 | |||
63 | static inline struct s_Linda* lua_toLinda( lua_State* L, int idx_) | ||
64 | { | ||
65 | struct s_Linda* linda = (struct s_Linda*) luaG_todeep( L, linda_id, idx_); | ||
66 | luaL_argcheck( L, linda != NULL, idx_, "expecting a linda object"); | ||
67 | return linda; | ||
68 | } | ||
69 | |||
70 | static void check_key_types( lua_State* L, int start_, int end_) | ||
71 | { | ||
72 | int i; | ||
73 | for( i = start_; i <= end_; ++ i) | ||
74 | { | ||
75 | int t = lua_type( L, i); | ||
76 | if( t == LUA_TBOOLEAN || t == LUA_TNUMBER || t == LUA_TSTRING || t == LUA_TLIGHTUSERDATA) | ||
77 | { | ||
78 | continue; | ||
79 | } | ||
80 | (void) luaL_error( L, "argument #%d: invalid key type (not a boolean, string, number or light userdata)", i); | ||
81 | } | ||
82 | } | ||
83 | |||
84 | LUAG_FUNC( linda_protected_call) | ||
85 | { | ||
86 | int rc = LUA_OK; | ||
87 | struct s_Linda* linda = lua_toLinda( L, 1); | ||
88 | |||
89 | // acquire the keeper | ||
90 | Keeper* K = keeper_acquire( linda->U->keepers, LINDA_KEEPER_HASHSEED(linda)); | ||
91 | lua_State* KL = K ? K->L : NULL; // need to do this for 'STACK_CHECK' | ||
92 | if( KL == NULL) return 0; | ||
93 | |||
94 | // retrieve the actual function to be called and move it before the arguments | ||
95 | lua_pushvalue( L, lua_upvalueindex( 1)); | ||
96 | lua_insert( L, 1); | ||
97 | // do a protected call | ||
98 | rc = lua_pcall( L, lua_gettop( L) - 1, LUA_MULTRET, 0); | ||
99 | |||
100 | // release the keeper | ||
101 | keeper_release( K); | ||
102 | |||
103 | // if there was an error, forward it | ||
104 | if( rc != LUA_OK) | ||
105 | { | ||
106 | return lua_error( L); | ||
107 | } | ||
108 | // return whatever the actual operation provided | ||
109 | return lua_gettop( L); | ||
110 | } | ||
111 | |||
112 | /* | ||
113 | * bool= linda_send( linda_ud, [timeout_secs=-1,] [linda.null,] key_num|str|bool|lightuserdata, ... ) | ||
114 | * | ||
115 | * Send one or more values to a Linda. If there is a limit, all values must fit. | ||
116 | * | ||
117 | * Returns: 'true' if the value was queued | ||
118 | * 'false' for timeout (only happens when the queue size is limited) | ||
119 | * nil, CANCEL_ERROR if cancelled | ||
120 | */ | ||
121 | LUAG_FUNC( linda_send) | ||
122 | { | ||
123 | struct s_Linda* linda = lua_toLinda( L, 1); | ||
124 | bool_t ret = FALSE; | ||
125 | enum e_cancel_request cancel = CANCEL_NONE; | ||
126 | int pushed; | ||
127 | time_d timeout = -1.0; | ||
128 | int key_i = 2; // index of first key, if timeout not there | ||
129 | bool_t as_nil_sentinel; // if not NULL, send() will silently send a single nil if nothing is provided | ||
130 | |||
131 | if( lua_type( L, 2) == LUA_TNUMBER) // we don't want to use lua_isnumber() because of autocoercion | ||
132 | { | ||
133 | timeout = SIGNAL_TIMEOUT_PREPARE( lua_tonumber( L, 2)); | ||
134 | ++ key_i; | ||
135 | } | ||
136 | else if( lua_isnil( L, 2)) // alternate explicit "no timeout" by passing nil before the key | ||
137 | { | ||
138 | ++ key_i; | ||
139 | } | ||
140 | |||
141 | as_nil_sentinel = equal_unique_key( L, key_i, NIL_SENTINEL); | ||
142 | if( as_nil_sentinel) | ||
143 | { | ||
144 | // the real key to send data to is after the NIL_SENTINEL marker | ||
145 | ++ key_i; | ||
146 | } | ||
147 | |||
148 | // make sure the key is of a valid type | ||
149 | check_key_types( L, key_i, key_i); | ||
150 | |||
151 | STACK_GROW( L, 1); | ||
152 | |||
153 | // make sure there is something to send | ||
154 | if( lua_gettop( L) == key_i) | ||
155 | { | ||
156 | if( as_nil_sentinel) | ||
157 | { | ||
158 | // send a single nil if nothing is provided | ||
159 | push_unique_key( L, NIL_SENTINEL); | ||
160 | } | ||
161 | else | ||
162 | { | ||
163 | return luaL_error( L, "no data to send"); | ||
164 | } | ||
165 | } | ||
166 | |||
167 | // convert nils to some special non-nil sentinel in sent values | ||
168 | keeper_toggle_nil_sentinels( L, key_i + 1, eLM_ToKeeper); | ||
169 | |||
170 | { | ||
171 | bool_t try_again = TRUE; | ||
172 | Lane* const s = get_lane_from_registry( L); | ||
173 | Keeper* K = which_keeper( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); | ||
174 | lua_State* KL = K ? K->L : NULL; // need to do this for 'STACK_CHECK' | ||
175 | if( KL == NULL) return 0; | ||
176 | STACK_CHECK( KL, 0); | ||
177 | for( ;;) | ||
178 | { | ||
179 | if( s != NULL) | ||
180 | { | ||
181 | cancel = s->cancel_request; | ||
182 | } | ||
183 | cancel = (cancel != CANCEL_NONE) ? cancel : linda->simulate_cancel; | ||
184 | // if user wants to cancel, or looped because of a timeout, the call returns without sending anything | ||
185 | if( !try_again || cancel != CANCEL_NONE) | ||
186 | { | ||
187 | pushed = 0; | ||
188 | break; | ||
189 | } | ||
190 | |||
191 | STACK_MID( KL, 0); | ||
192 | pushed = keeper_call( linda->U, KL, KEEPER_API( send), L, linda, key_i); | ||
193 | if( pushed < 0) | ||
194 | { | ||
195 | break; | ||
196 | } | ||
197 | ASSERT_L( pushed == 1); | ||
198 | |||
199 | ret = lua_toboolean( L, -1); | ||
200 | lua_pop( L, 1); | ||
201 | |||
202 | if( ret) | ||
203 | { | ||
204 | // Wake up ALL waiting threads | ||
205 | SIGNAL_ALL( &linda->write_happened); | ||
206 | break; | ||
207 | } | ||
208 | |||
209 | // instant timout to bypass the wait syscall | ||
210 | if( timeout == 0.0) | ||
211 | { | ||
212 | break; /* no wait; instant timeout */ | ||
213 | } | ||
214 | |||
215 | // storage limit hit, wait until timeout or signalled that we should try again | ||
216 | { | ||
217 | enum e_status prev_status = ERROR_ST; // prevent 'might be used uninitialized' warnings | ||
218 | if( s != NULL) | ||
219 | { | ||
220 | // change status of lane to "waiting" | ||
221 | prev_status = s->status; // RUNNING, most likely | ||
222 | ASSERT_L( prev_status == RUNNING); // but check, just in case | ||
223 | s->status = WAITING; | ||
224 | ASSERT_L( s->waiting_on == NULL); | ||
225 | s->waiting_on = &linda->read_happened; | ||
226 | } | ||
227 | // could not send because no room: wait until some data was read before trying again, or until timeout is reached | ||
228 | try_again = SIGNAL_WAIT( &linda->read_happened, &K->keeper_cs, timeout); | ||
229 | if( s != NULL) | ||
230 | { | ||
231 | s->waiting_on = NULL; | ||
232 | s->status = prev_status; | ||
233 | } | ||
234 | } | ||
235 | } | ||
236 | STACK_END( KL, 0); | ||
237 | } | ||
238 | |||
239 | if( pushed < 0) | ||
240 | { | ||
241 | return luaL_error( L, "tried to copy unsupported types"); | ||
242 | } | ||
243 | |||
244 | switch( cancel) | ||
245 | { | ||
246 | case CANCEL_SOFT: | ||
247 | // if user wants to soft-cancel, the call returns lanes.cancel_error | ||
248 | push_unique_key( L, CANCEL_ERROR); | ||
249 | return 1; | ||
250 | |||
251 | case CANCEL_HARD: | ||
252 | // raise an error interrupting execution only in case of hard cancel | ||
253 | return cancel_error( L); // raises an error and doesn't return | ||
254 | |||
255 | default: | ||
256 | lua_pushboolean( L, ret); // true (success) or false (timeout) | ||
257 | return 1; | ||
258 | } | ||
259 | } | ||
260 | |||
261 | |||
262 | /* | ||
263 | * 2 modes of operation | ||
264 | * [val, key]= linda_receive( linda_ud, [timeout_secs_num=-1], key_num|str|bool|lightuserdata [, ...] ) | ||
265 | * Consumes a single value from the Linda, in any key. | ||
266 | * Returns: received value (which is consumed from the slot), and the key which had it | ||
267 | |||
268 | * [val1, ... valCOUNT]= linda_receive( linda_ud, [timeout_secs_num=-1], linda.batched, key_num|str|bool|lightuserdata, min_COUNT[, max_COUNT]) | ||
269 | * Consumes between min_COUNT and max_COUNT values from the linda, from a single key. | ||
270 | * returns the actual consumed values, or nil if there weren't enough values to consume | ||
271 | * | ||
272 | */ | ||
273 | // xxh64 of string "BATCH_SENTINEL" generated at https://www.pelock.com/products/hash-calculator | ||
274 | DECLARE_CONST_UNIQUE_KEY(BATCH_SENTINEL, 0x2DDFEE0968C62AA7); | ||
275 | LUAG_FUNC( linda_receive) | ||
276 | { | ||
277 | struct s_Linda* linda = lua_toLinda( L, 1); | ||
278 | int pushed, expected_pushed_min, expected_pushed_max; | ||
279 | enum e_cancel_request cancel = CANCEL_NONE; | ||
280 | keeper_api_t selected_keeper_receive; | ||
281 | |||
282 | time_d timeout = -1.0; | ||
283 | int key_i = 2; | ||
284 | |||
285 | if( lua_type( L, 2) == LUA_TNUMBER) // we don't want to use lua_isnumber() because of autocoercion | ||
286 | { | ||
287 | timeout = SIGNAL_TIMEOUT_PREPARE( lua_tonumber( L, 2)); | ||
288 | ++ key_i; | ||
289 | } | ||
290 | else if( lua_isnil( L, 2)) // alternate explicit "no timeout" by passing nil before the key | ||
291 | { | ||
292 | ++ key_i; | ||
293 | } | ||
294 | |||
295 | // are we in batched mode? | ||
296 | { | ||
297 | int is_batched; | ||
298 | push_unique_key( L, BATCH_SENTINEL); | ||
299 | is_batched = lua501_equal( L, key_i, -1); | ||
300 | lua_pop( L, 1); | ||
301 | if( is_batched) | ||
302 | { | ||
303 | // no need to pass linda.batched in the keeper state | ||
304 | ++ key_i; | ||
305 | // make sure the keys are of a valid type | ||
306 | check_key_types( L, key_i, key_i); | ||
307 | // receive multiple values from a single slot | ||
308 | selected_keeper_receive = KEEPER_API( receive_batched); | ||
309 | // we expect a user-defined amount of return value | ||
310 | expected_pushed_min = (int)luaL_checkinteger( L, key_i + 1); | ||
311 | expected_pushed_max = (int)luaL_optinteger( L, key_i + 2, expected_pushed_min); | ||
312 | // don't forget to count the key in addition to the values | ||
313 | ++ expected_pushed_min; | ||
314 | ++ expected_pushed_max; | ||
315 | if( expected_pushed_min > expected_pushed_max) | ||
316 | { | ||
317 | return luaL_error( L, "batched min/max error"); | ||
318 | } | ||
319 | } | ||
320 | else | ||
321 | { | ||
322 | // make sure the keys are of a valid type | ||
323 | check_key_types( L, key_i, lua_gettop( L)); | ||
324 | // receive a single value, checking multiple slots | ||
325 | selected_keeper_receive = KEEPER_API( receive); | ||
326 | // we expect a single (value, key) pair of returned values | ||
327 | expected_pushed_min = expected_pushed_max = 2; | ||
328 | } | ||
329 | } | ||
330 | |||
331 | { | ||
332 | bool_t try_again = TRUE; | ||
333 | Lane* const s = get_lane_from_registry( L); | ||
334 | Keeper* K = which_keeper( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); | ||
335 | if( K == NULL) return 0; | ||
336 | for( ;;) | ||
337 | { | ||
338 | if( s != NULL) | ||
339 | { | ||
340 | cancel = s->cancel_request; | ||
341 | } | ||
342 | cancel = (cancel != CANCEL_NONE) ? cancel : linda->simulate_cancel; | ||
343 | // if user wants to cancel, or looped because of a timeout, the call returns without sending anything | ||
344 | if( !try_again || cancel != CANCEL_NONE) | ||
345 | { | ||
346 | pushed = 0; | ||
347 | break; | ||
348 | } | ||
349 | |||
350 | // all arguments of receive() but the first are passed to the keeper's receive function | ||
351 | pushed = keeper_call( linda->U, K->L, selected_keeper_receive, L, linda, key_i); | ||
352 | if( pushed < 0) | ||
353 | { | ||
354 | break; | ||
355 | } | ||
356 | if( pushed > 0) | ||
357 | { | ||
358 | ASSERT_L( pushed >= expected_pushed_min && pushed <= expected_pushed_max); | ||
359 | // replace sentinels with real nils | ||
360 | keeper_toggle_nil_sentinels( L, lua_gettop( L) - pushed, eLM_FromKeeper); | ||
361 | // To be done from within the 'K' locking area | ||
362 | // | ||
363 | SIGNAL_ALL( &linda->read_happened); | ||
364 | break; | ||
365 | } | ||
366 | |||
367 | if( timeout == 0.0) | ||
368 | { | ||
369 | break; /* instant timeout */ | ||
370 | } | ||
371 | |||
372 | // nothing received, wait until timeout or signalled that we should try again | ||
373 | { | ||
374 | enum e_status prev_status = ERROR_ST; // prevent 'might be used uninitialized' warnings | ||
375 | if( s != NULL) | ||
376 | { | ||
377 | // change status of lane to "waiting" | ||
378 | prev_status = s->status; // RUNNING, most likely | ||
379 | ASSERT_L( prev_status == RUNNING); // but check, just in case | ||
380 | s->status = WAITING; | ||
381 | ASSERT_L( s->waiting_on == NULL); | ||
382 | s->waiting_on = &linda->write_happened; | ||
383 | } | ||
384 | // not enough data to read: wakeup when data was sent, or when timeout is reached | ||
385 | try_again = SIGNAL_WAIT( &linda->write_happened, &K->keeper_cs, timeout); | ||
386 | if( s != NULL) | ||
387 | { | ||
388 | s->waiting_on = NULL; | ||
389 | s->status = prev_status; | ||
390 | } | ||
391 | } | ||
392 | } | ||
393 | } | ||
394 | |||
395 | if( pushed < 0) | ||
396 | { | ||
397 | return luaL_error( L, "tried to copy unsupported types"); | ||
398 | } | ||
399 | |||
400 | switch( cancel) | ||
401 | { | ||
402 | case CANCEL_SOFT: | ||
403 | // if user wants to soft-cancel, the call returns CANCEL_ERROR | ||
404 | push_unique_key( L, CANCEL_ERROR); | ||
405 | return 1; | ||
406 | |||
407 | case CANCEL_HARD: | ||
408 | // raise an error interrupting execution only in case of hard cancel | ||
409 | return cancel_error( L); // raises an error and doesn't return | ||
410 | |||
411 | default: | ||
412 | return pushed; | ||
413 | } | ||
414 | } | ||
415 | |||
416 | |||
417 | /* | ||
418 | * [true|lanes.cancel_error] = linda_set( linda_ud, key_num|str|bool|lightuserdata [, value [, ...]]) | ||
419 | * | ||
420 | * Set one or more value to Linda. | ||
421 | * TODO: what do we do if we set to non-nil and limit is 0? | ||
422 | * | ||
423 | * Existing slot value is replaced, and possible queued entries removed. | ||
424 | */ | ||
425 | LUAG_FUNC( linda_set) | ||
426 | { | ||
427 | struct s_Linda* const linda = lua_toLinda( L, 1); | ||
428 | int pushed; | ||
429 | bool_t has_value = lua_gettop( L) > 2; | ||
430 | |||
431 | // make sure the key is of a valid type (throws an error if not the case) | ||
432 | check_key_types( L, 2, 2); | ||
433 | |||
434 | { | ||
435 | Keeper* K = which_keeper( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); | ||
436 | |||
437 | if( linda->simulate_cancel == CANCEL_NONE) | ||
438 | { | ||
439 | if( has_value) | ||
440 | { | ||
441 | // convert nils to some special non-nil sentinel in sent values | ||
442 | keeper_toggle_nil_sentinels( L, 3, eLM_ToKeeper); | ||
443 | } | ||
444 | pushed = keeper_call( linda->U, K->L, KEEPER_API( set), L, linda, 2); | ||
445 | if( pushed >= 0) // no error? | ||
446 | { | ||
447 | ASSERT_L( pushed == 0 || pushed == 1); | ||
448 | |||
449 | if( has_value) | ||
450 | { | ||
451 | // we put some data in the slot, tell readers that they should wake | ||
452 | SIGNAL_ALL( &linda->write_happened); // To be done from within the 'K' locking area | ||
453 | } | ||
454 | if( pushed == 1) | ||
455 | { | ||
456 | // the key was full, but it is no longer the case, tell writers they should wake | ||
457 | ASSERT_L( lua_type( L, -1) == LUA_TBOOLEAN && lua_toboolean( L, -1) == 1); | ||
458 | SIGNAL_ALL( &linda->read_happened); // To be done from within the 'K' locking area | ||
459 | } | ||
460 | } | ||
461 | } | ||
462 | else // linda is cancelled | ||
463 | { | ||
464 | // do nothing and return lanes.cancel_error | ||
465 | push_unique_key( L, CANCEL_ERROR); | ||
466 | pushed = 1; | ||
467 | } | ||
468 | } | ||
469 | |||
470 | // must trigger any error after keeper state has been released | ||
471 | return (pushed < 0) ? luaL_error( L, "tried to copy unsupported types") : pushed; | ||
472 | } | ||
473 | |||
474 | |||
475 | /* | ||
476 | * [val] = linda_count( linda_ud, [key [, ...]]) | ||
477 | * | ||
478 | * Get a count of the pending elements in the specified keys | ||
479 | */ | ||
480 | LUAG_FUNC( linda_count) | ||
481 | { | ||
482 | struct s_Linda* linda = lua_toLinda( L, 1); | ||
483 | int pushed; | ||
484 | |||
485 | // make sure the keys are of a valid type | ||
486 | check_key_types( L, 2, lua_gettop( L)); | ||
487 | |||
488 | { | ||
489 | Keeper* K = which_keeper( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); | ||
490 | pushed = keeper_call( linda->U, K->L, KEEPER_API( count), L, linda, 2); | ||
491 | if( pushed < 0) | ||
492 | { | ||
493 | return luaL_error( L, "tried to count an invalid key"); | ||
494 | } | ||
495 | } | ||
496 | return pushed; | ||
497 | } | ||
498 | |||
499 | |||
500 | /* | ||
501 | * [val [, ...]] = linda_get( linda_ud, key_num|str|bool|lightuserdata [, count = 1]) | ||
502 | * | ||
503 | * Get one or more values from Linda. | ||
504 | */ | ||
505 | LUAG_FUNC( linda_get) | ||
506 | { | ||
507 | struct s_Linda* const linda = lua_toLinda( L, 1); | ||
508 | int pushed; | ||
509 | lua_Integer count = luaL_optinteger( L, 3, 1); | ||
510 | luaL_argcheck( L, count >= 1, 3, "count should be >= 1"); | ||
511 | luaL_argcheck( L, lua_gettop( L) <= 3, 4, "too many arguments"); | ||
512 | |||
513 | // make sure the key is of a valid type (throws an error if not the case) | ||
514 | check_key_types( L, 2, 2); | ||
515 | |||
516 | if( linda->simulate_cancel == CANCEL_NONE) | ||
517 | { | ||
518 | Keeper* const K = which_keeper(linda->U->keepers, LINDA_KEEPER_HASHSEED(linda)); | ||
519 | pushed = keeper_call( linda->U, K->L, KEEPER_API( get), L, linda, 2); | ||
520 | if( pushed > 0) | ||
521 | { | ||
522 | keeper_toggle_nil_sentinels( L, lua_gettop( L) - pushed, eLM_FromKeeper); | ||
523 | } | ||
524 | } | ||
525 | else // linda is cancelled | ||
526 | { | ||
527 | // do nothing and return lanes.cancel_error | ||
528 | push_unique_key( L, CANCEL_ERROR); | ||
529 | pushed = 1; | ||
530 | } | ||
531 | // an error can be raised if we attempt to read an unregistered function | ||
532 | if( pushed < 0) | ||
533 | { | ||
534 | return luaL_error( L, "tried to copy unsupported types"); | ||
535 | } | ||
536 | |||
537 | return pushed; | ||
538 | } | ||
539 | |||
540 | |||
541 | /* | ||
542 | * [true] = linda_limit( linda_ud, key_num|str|bool|lightuserdata, int) | ||
543 | * | ||
544 | * Set limit to 1 Linda keys. | ||
545 | * Optionally wake threads waiting to write on the linda, in case the limit enables them to do so | ||
546 | */ | ||
547 | LUAG_FUNC( linda_limit) | ||
548 | { | ||
549 | struct s_Linda* linda = lua_toLinda( L, 1); | ||
550 | int pushed; | ||
551 | |||
552 | // make sure we got 3 arguments: the linda, a key and a limit | ||
553 | luaL_argcheck( L, lua_gettop( L) == 3, 2, "wrong number of arguments"); | ||
554 | // make sure we got a numeric limit | ||
555 | luaL_checknumber( L, 3); | ||
556 | // make sure the key is of a valid type | ||
557 | check_key_types( L, 2, 2); | ||
558 | |||
559 | if( linda->simulate_cancel == CANCEL_NONE) | ||
560 | { | ||
561 | Keeper* const K = which_keeper(linda->U->keepers, LINDA_KEEPER_HASHSEED(linda)); | ||
562 | pushed = keeper_call( linda->U, K->L, KEEPER_API( limit), L, linda, 2); | ||
563 | ASSERT_L( pushed == 0 || pushed == 1); // no error, optional boolean value saying if we should wake blocked writer threads | ||
564 | if( pushed == 1) | ||
565 | { | ||
566 | ASSERT_L( lua_type( L, -1) == LUA_TBOOLEAN && lua_toboolean( L, -1) == 1); | ||
567 | SIGNAL_ALL( &linda->read_happened); // To be done from within the 'K' locking area | ||
568 | } | ||
569 | } | ||
570 | else // linda is cancelled | ||
571 | { | ||
572 | // do nothing and return lanes.cancel_error | ||
573 | push_unique_key( L, CANCEL_ERROR); | ||
574 | pushed = 1; | ||
575 | } | ||
576 | // propagate pushed boolean if any | ||
577 | return pushed; | ||
578 | } | ||
579 | |||
580 | |||
581 | /* | ||
582 | * (void) = linda_cancel( linda_ud, "read"|"write"|"both"|"none") | ||
583 | * | ||
584 | * Signal linda so that waiting threads wake up as if their own lane was cancelled | ||
585 | */ | ||
586 | LUAG_FUNC( linda_cancel) | ||
587 | { | ||
588 | struct s_Linda* linda = lua_toLinda( L, 1); | ||
589 | char const* who = luaL_optstring( L, 2, "both"); | ||
590 | |||
591 | // make sure we got 3 arguments: the linda, a key and a limit | ||
592 | luaL_argcheck( L, lua_gettop( L) <= 2, 2, "wrong number of arguments"); | ||
593 | |||
594 | linda->simulate_cancel = CANCEL_SOFT; | ||
595 | if( strcmp( who, "both") == 0) // tell everyone writers to wake up | ||
596 | { | ||
597 | SIGNAL_ALL( &linda->write_happened); | ||
598 | SIGNAL_ALL( &linda->read_happened); | ||
599 | } | ||
600 | else if( strcmp( who, "none") == 0) // reset flag | ||
601 | { | ||
602 | linda->simulate_cancel = CANCEL_NONE; | ||
603 | } | ||
604 | else if( strcmp( who, "read") == 0) // tell blocked readers to wake up | ||
605 | { | ||
606 | SIGNAL_ALL( &linda->write_happened); | ||
607 | } | ||
608 | else if( strcmp( who, "write") == 0) // tell blocked writers to wake up | ||
609 | { | ||
610 | SIGNAL_ALL( &linda->read_happened); | ||
611 | } | ||
612 | else | ||
613 | { | ||
614 | return luaL_error( L, "unknown wake hint '%s'", who); | ||
615 | } | ||
616 | return 0; | ||
617 | } | ||
618 | |||
619 | |||
620 | /* | ||
621 | * lightuserdata= linda_deep( linda_ud ) | ||
622 | * | ||
623 | * Return the 'deep' userdata pointer, identifying the Linda. | ||
624 | * | ||
625 | * This is needed for using Lindas as key indices (timer system needs it); | ||
626 | * separately created proxies of the same underlying deep object will have | ||
627 | * different userdata and won't be known to be essentially the same deep one | ||
628 | * without this. | ||
629 | */ | ||
630 | LUAG_FUNC( linda_deep) | ||
631 | { | ||
632 | struct s_Linda* linda= lua_toLinda( L, 1); | ||
633 | lua_pushlightuserdata( L, linda); // just the address | ||
634 | return 1; | ||
635 | } | ||
636 | |||
637 | |||
638 | /* | ||
639 | * string = linda:__tostring( linda_ud) | ||
640 | * | ||
641 | * Return the stringification of a linda | ||
642 | * | ||
643 | * Useful for concatenation or debugging purposes | ||
644 | */ | ||
645 | |||
646 | static int linda_tostring( lua_State* L, int idx_, bool_t opt_) | ||
647 | { | ||
648 | struct s_Linda* linda = (struct s_Linda*) luaG_todeep( L, linda_id, idx_); | ||
649 | if( !opt_) | ||
650 | { | ||
651 | luaL_argcheck( L, linda, idx_, "expecting a linda object"); | ||
652 | } | ||
653 | if( linda != NULL) | ||
654 | { | ||
655 | char text[128]; | ||
656 | int len; | ||
657 | if( linda->name[0]) | ||
658 | len = sprintf( text, "Linda: %.*s", (int)sizeof(text) - 8, linda->name); | ||
659 | else | ||
660 | len = sprintf( text, "Linda: %p", linda); | ||
661 | lua_pushlstring( L, text, len); | ||
662 | return 1; | ||
663 | } | ||
664 | return 0; | ||
665 | } | ||
666 | |||
667 | LUAG_FUNC( linda_tostring) | ||
668 | { | ||
669 | return linda_tostring( L, 1, FALSE); | ||
670 | } | ||
671 | |||
672 | |||
673 | /* | ||
674 | * string = linda:__concat( a, b) | ||
675 | * | ||
676 | * Return the concatenation of a pair of items, one of them being a linda | ||
677 | * | ||
678 | * Useful for concatenation or debugging purposes | ||
679 | */ | ||
680 | LUAG_FUNC( linda_concat) | ||
681 | { // linda1? linda2? | ||
682 | bool_t atLeastOneLinda = FALSE; | ||
683 | // Lua semantics enforce that one of the 2 arguments is a Linda, but not necessarily both. | ||
684 | if( linda_tostring( L, 1, TRUE)) | ||
685 | { | ||
686 | atLeastOneLinda = TRUE; | ||
687 | lua_replace( L, 1); | ||
688 | } | ||
689 | if( linda_tostring( L, 2, TRUE)) | ||
690 | { | ||
691 | atLeastOneLinda = TRUE; | ||
692 | lua_replace( L, 2); | ||
693 | } | ||
694 | if( !atLeastOneLinda) // should not be possible | ||
695 | { | ||
696 | return luaL_error( L, "internal error: linda_concat called on non-Linda"); | ||
697 | } | ||
698 | lua_concat( L, 2); | ||
699 | return 1; | ||
700 | } | ||
701 | |||
702 | /* | ||
703 | * table = linda:dump() | ||
704 | * return a table listing all pending data inside the linda | ||
705 | */ | ||
706 | LUAG_FUNC( linda_dump) | ||
707 | { | ||
708 | struct s_Linda* linda = lua_toLinda( L, 1); | ||
709 | ASSERT_L( linda->U == universe_get( L)); | ||
710 | return keeper_push_linda_storage( linda->U, L, linda, LINDA_KEEPER_HASHSEED( linda)); | ||
711 | } | ||
712 | |||
713 | /* | ||
714 | * table = linda:dump() | ||
715 | * return a table listing all pending data inside the linda | ||
716 | */ | ||
717 | LUAG_FUNC( linda_towatch) | ||
718 | { | ||
719 | struct s_Linda* linda = lua_toLinda( L, 1); | ||
720 | int pushed; | ||
721 | ASSERT_L( linda->U == universe_get( L)); | ||
722 | pushed = keeper_push_linda_storage( linda->U, L, linda, LINDA_KEEPER_HASHSEED( linda)); | ||
723 | if( pushed == 0) | ||
724 | { | ||
725 | // if the linda is empty, don't return nil | ||
726 | pushed = linda_tostring( L, 1, FALSE); | ||
727 | } | ||
728 | return pushed; | ||
729 | } | ||
730 | |||
731 | /* | ||
732 | * Identity function of a shared userdata object. | ||
733 | * | ||
734 | * lightuserdata= linda_id( "new" [, ...] ) | ||
735 | * = linda_id( "delete", lightuserdata ) | ||
736 | * | ||
737 | * Creation and cleanup of actual 'deep' objects. 'luaG_...' will wrap them into | ||
738 | * regular userdata proxies, per each state using the deep data. | ||
739 | * | ||
740 | * tbl= linda_id( "metatable" ) | ||
741 | * | ||
742 | * Returns a metatable for the proxy objects ('__gc' method not needed; will | ||
743 | * be added by 'luaG_...') | ||
744 | * | ||
745 | * string= linda_id( "module") | ||
746 | * | ||
747 | * Returns the name of the module that a state should require | ||
748 | * in order to keep a handle on the shared library that exported the idfunc | ||
749 | * | ||
750 | * = linda_id( str, ... ) | ||
751 | * | ||
752 | * For any other strings, the ID function must not react at all. This allows | ||
753 | * future extensions of the system. | ||
754 | */ | ||
755 | static void* linda_id( lua_State* L, DeepOp op_) | ||
756 | { | ||
757 | switch( op_) | ||
758 | { | ||
759 | case eDO_new: | ||
760 | { | ||
761 | Universe* const U = universe_get(L); | ||
762 | struct s_Linda* s; | ||
763 | size_t name_len = 0; | ||
764 | char const* linda_name = NULL; | ||
765 | unsigned long linda_group = 0; | ||
766 | // should have a string and/or a number of the stack as parameters (name and group) | ||
767 | switch( lua_gettop( L)) | ||
768 | { | ||
769 | default: // 0 | ||
770 | break; | ||
771 | |||
772 | case 1: // 1 parameter, either a name or a group | ||
773 | if( lua_type( L, -1) == LUA_TSTRING) | ||
774 | { | ||
775 | linda_name = lua_tolstring( L, -1, &name_len); | ||
776 | } | ||
777 | else | ||
778 | { | ||
779 | linda_group = (unsigned long) lua_tointeger( L, -1); | ||
780 | } | ||
781 | break; | ||
782 | |||
783 | case 2: // 2 parameters, a name and group, in that order | ||
784 | linda_name = lua_tolstring( L, -2, &name_len); | ||
785 | linda_group = (unsigned long) lua_tointeger( L, -1); | ||
786 | break; | ||
787 | } | ||
788 | |||
789 | /* The deep data is allocated separately of Lua stack; we might no | ||
790 | * longer be around when last reference to it is being released. | ||
791 | * One can use any memory allocation scheme. | ||
792 | * just don't use L's allocF because we don't know which state will get the honor of GCing the linda | ||
793 | */ | ||
794 | { | ||
795 | AllocatorDefinition* const allocD = &U->internal_allocator; | ||
796 | s = (struct s_Linda*) allocD->allocF(allocD->allocUD, NULL, 0, sizeof(struct s_Linda) + name_len); // terminating 0 is already included | ||
797 | } | ||
798 | if( s) | ||
799 | { | ||
800 | s->prelude.magic.value = DEEP_VERSION.value; | ||
801 | SIGNAL_INIT( &s->read_happened); | ||
802 | SIGNAL_INIT( &s->write_happened); | ||
803 | s->U = U; | ||
804 | s->simulate_cancel = CANCEL_NONE; | ||
805 | s->group = linda_group << KEEPER_MAGIC_SHIFT; | ||
806 | s->name[0] = 0; | ||
807 | memcpy( s->name, linda_name, name_len ? name_len + 1 : 0); | ||
808 | } | ||
809 | return s; | ||
810 | } | ||
811 | |||
812 | case eDO_delete: | ||
813 | { | ||
814 | Keeper* myK; | ||
815 | struct s_Linda* linda = lua_touserdata( L, 1); | ||
816 | ASSERT_L( linda); | ||
817 | |||
818 | // Clean associated structures in the keeper state. | ||
819 | myK = which_keeper( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); | ||
820 | if (myK) | ||
821 | { | ||
822 | // if collected from my own keeper, we can't acquire/release it | ||
823 | // because we are already inside a protected area, and trying to do so would deadlock! | ||
824 | bool_t const need_acquire_release = (myK->L != L); | ||
825 | // Clean associated structures in the keeper state. | ||
826 | Keeper* const K = need_acquire_release ? keeper_acquire(linda->U->keepers, LINDA_KEEPER_HASHSEED(linda)) : myK; | ||
827 | // hopefully this won't ever raise an error as we would jump to the closest pcall site while forgetting to release the keeper mutex... | ||
828 | keeper_call(linda->U, K->L, KEEPER_API(clear), L, linda, 0); | ||
829 | if(need_acquire_release) | ||
830 | { | ||
831 | keeper_release(K); | ||
832 | } | ||
833 | } | ||
834 | |||
835 | // There aren't any lanes waiting on these lindas, since all proxies have been gc'ed. Right? | ||
836 | SIGNAL_FREE( &linda->read_happened); | ||
837 | SIGNAL_FREE( &linda->write_happened); | ||
838 | { | ||
839 | AllocatorDefinition* const allocD = &linda->U->internal_allocator; | ||
840 | (void) allocD->allocF(allocD->allocUD, linda, sizeof(struct s_Linda) + strlen(linda->name), 0); | ||
841 | } | ||
842 | return NULL; | ||
843 | } | ||
844 | |||
845 | case eDO_metatable: | ||
846 | { | ||
847 | |||
848 | STACK_CHECK( L, 0); | ||
849 | lua_newtable( L); | ||
850 | // metatable is its own index | ||
851 | lua_pushvalue( L, -1); | ||
852 | lua_setfield( L, -2, "__index"); | ||
853 | |||
854 | // protect metatable from external access | ||
855 | lua_pushliteral( L, "Linda"); | ||
856 | lua_setfield( L, -2, "__metatable"); | ||
857 | |||
858 | lua_pushcfunction( L, LG_linda_tostring); | ||
859 | lua_setfield( L, -2, "__tostring"); | ||
860 | |||
861 | // Decoda __towatch support | ||
862 | lua_pushcfunction( L, LG_linda_towatch); | ||
863 | lua_setfield( L, -2, "__towatch"); | ||
864 | |||
865 | lua_pushcfunction( L, LG_linda_concat); | ||
866 | lua_setfield( L, -2, "__concat"); | ||
867 | |||
868 | // protected calls, to ensure associated keeper is always released even in case of error | ||
869 | // all function are the protected call wrapper, where the actual operation is provided as upvalue | ||
870 | // note that this kind of thing can break function lookup as we use the function pointer here and there | ||
871 | |||
872 | lua_pushcfunction( L, LG_linda_send); | ||
873 | lua_pushcclosure( L, LG_linda_protected_call, 1); | ||
874 | lua_setfield( L, -2, "send"); | ||
875 | |||
876 | lua_pushcfunction( L, LG_linda_receive); | ||
877 | lua_pushcclosure( L, LG_linda_protected_call, 1); | ||
878 | lua_setfield( L, -2, "receive"); | ||
879 | |||
880 | lua_pushcfunction( L, LG_linda_limit); | ||
881 | lua_pushcclosure( L, LG_linda_protected_call, 1); | ||
882 | lua_setfield( L, -2, "limit"); | ||
883 | |||
884 | lua_pushcfunction( L, LG_linda_set); | ||
885 | lua_pushcclosure( L, LG_linda_protected_call, 1); | ||
886 | lua_setfield( L, -2, "set"); | ||
887 | |||
888 | lua_pushcfunction( L, LG_linda_count); | ||
889 | lua_pushcclosure( L, LG_linda_protected_call, 1); | ||
890 | lua_setfield( L, -2, "count"); | ||
891 | |||
892 | lua_pushcfunction( L, LG_linda_get); | ||
893 | lua_pushcclosure( L, LG_linda_protected_call, 1); | ||
894 | lua_setfield( L, -2, "get"); | ||
895 | |||
896 | lua_pushcfunction( L, LG_linda_cancel); | ||
897 | lua_setfield( L, -2, "cancel"); | ||
898 | |||
899 | lua_pushcfunction( L, LG_linda_deep); | ||
900 | lua_setfield( L, -2, "deep"); | ||
901 | |||
902 | lua_pushcfunction( L, LG_linda_dump); | ||
903 | lua_pushcclosure( L, LG_linda_protected_call, 1); | ||
904 | lua_setfield( L, -2, "dump"); | ||
905 | |||
906 | // some constants | ||
907 | push_unique_key( L, BATCH_SENTINEL); | ||
908 | lua_setfield( L, -2, "batched"); | ||
909 | |||
910 | push_unique_key( L, NIL_SENTINEL); | ||
911 | lua_setfield( L, -2, "null"); | ||
912 | |||
913 | STACK_END( L, 1); | ||
914 | return NULL; | ||
915 | } | ||
916 | |||
917 | case eDO_module: | ||
918 | // linda is a special case because we know lanes must be loaded from the main lua state | ||
919 | // to be able to ever get here, so we know it will remain loaded as long a the main state is around | ||
920 | // in other words, forever. | ||
921 | default: | ||
922 | { | ||
923 | return NULL; | ||
924 | } | ||
925 | } | ||
926 | } | ||
927 | |||
928 | /* | ||
929 | * ud = lanes.linda( [name[,group]]) | ||
930 | * | ||
931 | * returns a linda object, or raises an error if creation failed | ||
932 | */ | ||
933 | LUAG_FUNC( linda) | ||
934 | { | ||
935 | int const top = lua_gettop( L); | ||
936 | luaL_argcheck( L, top <= 2, top, "too many arguments"); | ||
937 | if( top == 1) | ||
938 | { | ||
939 | int const t = lua_type( L, 1); | ||
940 | luaL_argcheck( L, t == LUA_TSTRING || t == LUA_TNUMBER, 1, "wrong parameter (should be a string or a number)"); | ||
941 | } | ||
942 | else if( top == 2) | ||
943 | { | ||
944 | luaL_checktype( L, 1, LUA_TSTRING); | ||
945 | luaL_checktype( L, 2, LUA_TNUMBER); | ||
946 | } | ||
947 | return luaG_newdeepuserdata( L, linda_id, 0); | ||
948 | } | ||
diff --git a/src/linda.cpp b/src/linda.cpp new file mode 100644 index 0000000..e749f52 --- /dev/null +++ b/src/linda.cpp | |||
@@ -0,0 +1,1025 @@ | |||
1 | /* | ||
2 | * LINDA.CPP Copyright (c) 2018-2024, Benoit Germain | ||
3 | * | ||
4 | * Linda deep userdata. | ||
5 | */ | ||
6 | |||
7 | /* | ||
8 | =============================================================================== | ||
9 | |||
10 | Copyright (C) 2018 benoit Germain <bnt.germain@gmail.com> | ||
11 | |||
12 | Permission is hereby granted, free of charge, to any person obtaining a copy | ||
13 | of this software and associated documentation files (the "Software"), to deal | ||
14 | in the Software without restriction, including without limitation the rights | ||
15 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||
16 | copies of the Software, and to permit persons to whom the Software is | ||
17 | furnished to do so, subject to the following conditions: | ||
18 | |||
19 | The above copyright notice and this permission notice shall be included in | ||
20 | all copies or substantial portions of the Software. | ||
21 | |||
22 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
23 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
24 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
25 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
26 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
27 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | ||
28 | THE SOFTWARE. | ||
29 | |||
30 | =============================================================================== | ||
31 | */ | ||
32 | |||
33 | #include "compat.h" | ||
34 | #include "deep.h" | ||
35 | #include "keeper.h" | ||
36 | #include "lanes_private.h" | ||
37 | #include "threading.h" | ||
38 | #include "tools.h" | ||
39 | #include "universe.h" | ||
40 | |||
41 | #include <array> | ||
42 | #include <variant> | ||
43 | |||
44 | /* | ||
45 | * Actual data is kept within a keeper state, which is hashed by the 'Linda' | ||
46 | * pointer (which is same to all userdatas pointing to it). | ||
47 | */ | ||
48 | class Linda : public DeepPrelude // Deep userdata MUST start with this header | ||
49 | { | ||
50 | private: | ||
51 | |||
52 | static constexpr size_t kEmbeddedNameLength = 24; | ||
53 | using EmbeddedName = std::array<char, kEmbeddedNameLength>; | ||
54 | struct AllocatedName | ||
55 | { | ||
56 | size_t len{ 0 }; | ||
57 | char* name{ nullptr }; | ||
58 | }; | ||
59 | // depending on the name length, it is either embedded inside the Linda, or allocated separately | ||
60 | std::variant<AllocatedName, EmbeddedName> m_name; | ||
61 | |||
62 | public: | ||
63 | |||
64 | std::condition_variable m_read_happened; | ||
65 | std::condition_variable m_write_happened; | ||
66 | Universe* const U; // the universe this linda belongs to | ||
67 | uintptr_t const group; // a group to control keeper allocation between lindas | ||
68 | CancelRequest simulate_cancel{ CancelRequest::None }; | ||
69 | |||
70 | public: | ||
71 | |||
72 | // a fifo full userdata has one uservalue, the table that holds the actual fifo contents | ||
73 | [[nodiscard]] static void* operator new(size_t size_, Universe* U_) noexcept { return U_->internal_allocator.alloc(size_); } | ||
74 | // always embedded somewhere else or "in-place constructed" as a full userdata | ||
75 | // can't actually delete the operator because the compiler generates stack unwinding code that could call it in case of exception | ||
76 | static void operator delete(void* p_, Universe* U_) { U_->internal_allocator.free(p_, sizeof(Linda)); } | ||
77 | // this one is for us, to make sure memory is freed by the correct allocator | ||
78 | static void operator delete(void* p_) { static_cast<Linda*>(p_)->U->internal_allocator.free(p_, sizeof(Linda)); } | ||
79 | |||
80 | Linda(Universe* U_, uintptr_t group_, char const* name_, size_t len_) | ||
81 | : U{ U_ } | ||
82 | , group{ group_ << KEEPER_MAGIC_SHIFT } | ||
83 | { | ||
84 | setName(name_, len_); | ||
85 | } | ||
86 | |||
87 | ~Linda() | ||
88 | { | ||
89 | if (std::holds_alternative<AllocatedName>(m_name)) | ||
90 | { | ||
91 | AllocatedName& name = std::get<AllocatedName>(m_name); | ||
92 | U->internal_allocator.free(name.name, name.len); | ||
93 | } | ||
94 | } | ||
95 | |||
96 | private: | ||
97 | |||
98 | void setName(char const* name_, size_t len_) | ||
99 | { | ||
100 | // keep default | ||
101 | if (!name_ || len_ == 0) | ||
102 | { | ||
103 | return; | ||
104 | } | ||
105 | ++len_; // don't forget terminating 0 | ||
106 | if (len_ < kEmbeddedNameLength) | ||
107 | { | ||
108 | m_name.emplace<EmbeddedName>(); | ||
109 | char* const name{ std::get<EmbeddedName>(m_name).data() }; | ||
110 | memcpy(name, name_, len_); | ||
111 | } | ||
112 | else | ||
113 | { | ||
114 | AllocatedName& name = std::get<AllocatedName>(m_name); | ||
115 | name.name = static_cast<char*>(U->internal_allocator.alloc(len_)); | ||
116 | name.len = len_; | ||
117 | memcpy(name.name, name_, len_); | ||
118 | } | ||
119 | } | ||
120 | |||
121 | public: | ||
122 | |||
123 | uintptr_t hashSeed() const { return group ? group : std::bit_cast<uintptr_t>(this); } | ||
124 | |||
125 | char const* getName() const | ||
126 | { | ||
127 | if (std::holds_alternative<AllocatedName>(m_name)) | ||
128 | { | ||
129 | AllocatedName const& name = std::get<AllocatedName>(m_name); | ||
130 | return name.name; | ||
131 | } | ||
132 | if (std::holds_alternative<EmbeddedName>(m_name)) | ||
133 | { | ||
134 | char const* const name{ std::get<EmbeddedName>(m_name).data() }; | ||
135 | return name; | ||
136 | } | ||
137 | return nullptr; | ||
138 | } | ||
139 | }; | ||
140 | [[nodiscard]] static void* linda_id(lua_State*, DeepOp); | ||
141 | |||
142 | template<bool OPT> | ||
143 | [[nodiscard]] static inline Linda* lua_toLinda(lua_State* L, int idx_) | ||
144 | { | ||
145 | Linda* const linda{ static_cast<Linda*>(luaG_todeep(L, linda_id, idx_)) }; | ||
146 | if (!OPT) | ||
147 | { | ||
148 | luaL_argcheck(L, linda != nullptr, idx_, "expecting a linda object"); | ||
149 | } | ||
150 | ASSERT_L(linda->U == universe_get(L)); | ||
151 | return linda; | ||
152 | } | ||
153 | |||
154 | // ################################################################################################# | ||
155 | |||
156 | static void check_key_types(lua_State* L, int start_, int end_) | ||
157 | { | ||
158 | for (int i{ start_ }; i <= end_; ++i) | ||
159 | { | ||
160 | int const t{ lua_type(L, i) }; | ||
161 | if (t == LUA_TBOOLEAN || t == LUA_TNUMBER || t == LUA_TSTRING || t == LUA_TLIGHTUSERDATA) | ||
162 | { | ||
163 | continue; | ||
164 | } | ||
165 | luaL_error(L, "argument #%d: invalid key type (not a boolean, string, number or light userdata)", i); // doesn't return | ||
166 | } | ||
167 | } | ||
168 | |||
169 | // ################################################################################################# | ||
170 | |||
171 | LUAG_FUNC(linda_protected_call) | ||
172 | { | ||
173 | Linda* const linda{ lua_toLinda<false>(L, 1) }; | ||
174 | |||
175 | // acquire the keeper | ||
176 | Keeper* const K{ keeper_acquire(linda->U->keepers, linda->hashSeed()) }; | ||
177 | lua_State* const KL{ K ? K->L : nullptr }; | ||
178 | if (KL == nullptr) | ||
179 | return 0; | ||
180 | |||
181 | // retrieve the actual function to be called and move it before the arguments | ||
182 | lua_pushvalue(L, lua_upvalueindex(1)); | ||
183 | lua_insert(L, 1); | ||
184 | // do a protected call | ||
185 | int const rc{ lua_pcall(L, lua_gettop(L) - 1, LUA_MULTRET, 0) }; | ||
186 | |||
187 | // release the keeper | ||
188 | keeper_release(K); | ||
189 | |||
190 | // if there was an error, forward it | ||
191 | if (rc != LUA_OK) | ||
192 | { | ||
193 | raise_lua_error(L); | ||
194 | } | ||
195 | // return whatever the actual operation provided | ||
196 | return lua_gettop(L); | ||
197 | } | ||
198 | |||
199 | // ################################################################################################# | ||
200 | |||
201 | /* | ||
202 | * bool= linda_send( linda_ud, [timeout_secs=-1,] [linda.null,] key_num|str|bool|lightuserdata, ... ) | ||
203 | * | ||
204 | * Send one or more values to a Linda. If there is a limit, all values must fit. | ||
205 | * | ||
206 | * Returns: 'true' if the value was queued | ||
207 | * 'false' for timeout (only happens when the queue size is limited) | ||
208 | * nil, CANCEL_ERROR if cancelled | ||
209 | */ | ||
210 | LUAG_FUNC(linda_send) | ||
211 | { | ||
212 | Linda* const linda{ lua_toLinda<false>(L, 1) }; | ||
213 | std::chrono::time_point<std::chrono::steady_clock> until{ std::chrono::time_point<std::chrono::steady_clock>::max() }; | ||
214 | int key_i{ 2 }; // index of first key, if timeout not there | ||
215 | |||
216 | if (lua_type(L, 2) == LUA_TNUMBER) // we don't want to use lua_isnumber() because of autocoercion | ||
217 | { | ||
218 | lua_Duration const duration{ lua_tonumber(L, 2) }; | ||
219 | if (duration.count() >= 0.0) | ||
220 | { | ||
221 | until = std::chrono::steady_clock::now() + std::chrono::duration_cast<std::chrono::steady_clock::duration>(duration); | ||
222 | } | ||
223 | ++key_i; | ||
224 | } | ||
225 | else if (lua_isnil(L, 2)) // alternate explicit "infinite timeout" by passing nil before the key | ||
226 | { | ||
227 | ++key_i; | ||
228 | } | ||
229 | |||
230 | bool const as_nil_sentinel{ NIL_SENTINEL.equals(L, key_i) }; // if not nullptr, send() will silently send a single nil if nothing is provided | ||
231 | if (as_nil_sentinel) | ||
232 | { | ||
233 | // the real key to send data to is after the NIL_SENTINEL marker | ||
234 | ++key_i; | ||
235 | } | ||
236 | |||
237 | // make sure the key is of a valid type | ||
238 | check_key_types(L, key_i, key_i); | ||
239 | |||
240 | STACK_GROW(L, 1); | ||
241 | |||
242 | // make sure there is something to send | ||
243 | if (lua_gettop(L) == key_i) | ||
244 | { | ||
245 | if (as_nil_sentinel) | ||
246 | { | ||
247 | // send a single nil if nothing is provided | ||
248 | NIL_SENTINEL.pushKey(L); | ||
249 | } | ||
250 | else | ||
251 | { | ||
252 | return luaL_error(L, "no data to send"); | ||
253 | } | ||
254 | } | ||
255 | |||
256 | // convert nils to some special non-nil sentinel in sent values | ||
257 | keeper_toggle_nil_sentinels(L, key_i + 1, LookupMode::ToKeeper); | ||
258 | bool ret{ false }; | ||
259 | CancelRequest cancel{ CancelRequest::None }; | ||
260 | int pushed{ 0 }; | ||
261 | { | ||
262 | Lane* const lane{ LANE_POINTER_REGKEY.readLightUserDataValue<Lane>(L) }; | ||
263 | Keeper* const K{ which_keeper(linda->U->keepers, linda->hashSeed()) }; | ||
264 | lua_State* const KL{ K ? K->L : nullptr }; | ||
265 | if (KL == nullptr) | ||
266 | return 0; | ||
267 | |||
268 | STACK_CHECK_START_REL(KL, 0); | ||
269 | for (bool try_again{ true };;) | ||
270 | { | ||
271 | if (lane != nullptr) | ||
272 | { | ||
273 | cancel = lane->cancel_request; | ||
274 | } | ||
275 | cancel = (cancel != CancelRequest::None) ? cancel : linda->simulate_cancel; | ||
276 | // if user wants to cancel, or looped because of a timeout, the call returns without sending anything | ||
277 | if (!try_again || cancel != CancelRequest::None) | ||
278 | { | ||
279 | pushed = 0; | ||
280 | break; | ||
281 | } | ||
282 | |||
283 | STACK_CHECK(KL, 0); | ||
284 | pushed = keeper_call(linda->U, KL, KEEPER_API(send), L, linda, key_i); | ||
285 | if (pushed < 0) | ||
286 | { | ||
287 | break; | ||
288 | } | ||
289 | ASSERT_L(pushed == 1); | ||
290 | |||
291 | ret = lua_toboolean(L, -1) ? true : false; | ||
292 | lua_pop(L, 1); | ||
293 | |||
294 | if (ret) | ||
295 | { | ||
296 | // Wake up ALL waiting threads | ||
297 | linda->m_write_happened.notify_all(); | ||
298 | break; | ||
299 | } | ||
300 | |||
301 | // instant timout to bypass the wait syscall | ||
302 | if (std::chrono::steady_clock::now() >= until) | ||
303 | { | ||
304 | break; /* no wait; instant timeout */ | ||
305 | } | ||
306 | |||
307 | // storage limit hit, wait until timeout or signalled that we should try again | ||
308 | { | ||
309 | Lane::Status prev_status{ Lane::Error }; // prevent 'might be used uninitialized' warnings | ||
310 | if (lane != nullptr) | ||
311 | { | ||
312 | // change status of lane to "waiting" | ||
313 | prev_status = lane->m_status; // Running, most likely | ||
314 | ASSERT_L(prev_status == Lane::Running); // but check, just in case | ||
315 | lane->m_status = Lane::Waiting; | ||
316 | ASSERT_L(lane->m_waiting_on == nullptr); | ||
317 | lane->m_waiting_on = &linda->m_read_happened; | ||
318 | } | ||
319 | // could not send because no room: wait until some data was read before trying again, or until timeout is reached | ||
320 | std::unique_lock<std::mutex> keeper_lock{ K->m_mutex, std::adopt_lock }; | ||
321 | std::cv_status const status{ linda->m_read_happened.wait_until(keeper_lock, until) }; | ||
322 | keeper_lock.release(); // we don't want to release the lock! | ||
323 | try_again = (status == std::cv_status::no_timeout); // detect spurious wakeups | ||
324 | if (lane != nullptr) | ||
325 | { | ||
326 | lane->m_waiting_on = nullptr; | ||
327 | lane->m_status = prev_status; | ||
328 | } | ||
329 | } | ||
330 | } | ||
331 | STACK_CHECK(KL, 0); | ||
332 | } | ||
333 | |||
334 | if (pushed < 0) | ||
335 | { | ||
336 | return luaL_error(L, "tried to copy unsupported types"); | ||
337 | } | ||
338 | |||
339 | switch (cancel) | ||
340 | { | ||
341 | case CancelRequest::Soft: | ||
342 | // if user wants to soft-cancel, the call returns lanes.cancel_error | ||
343 | CANCEL_ERROR.pushKey(L); | ||
344 | return 1; | ||
345 | |||
346 | case CancelRequest::Hard: | ||
347 | // raise an error interrupting execution only in case of hard cancel | ||
348 | raise_cancel_error(L); // raises an error and doesn't return | ||
349 | |||
350 | default: | ||
351 | lua_pushboolean(L, ret); // true (success) or false (timeout) | ||
352 | return 1; | ||
353 | } | ||
354 | } | ||
355 | |||
356 | // ################################################################################################# | ||
357 | |||
358 | /* | ||
359 | * 2 modes of operation | ||
360 | * [val, key]= linda_receive( linda_ud, [timeout_secs_num=-1], key_num|str|bool|lightuserdata [, ...] ) | ||
361 | * Consumes a single value from the Linda, in any key. | ||
362 | * Returns: received value (which is consumed from the slot), and the key which had it | ||
363 | |||
364 | * [val1, ... valCOUNT]= linda_receive( linda_ud, [timeout_secs_num=-1], linda.batched, key_num|str|bool|lightuserdata, min_COUNT[, max_COUNT]) | ||
365 | * Consumes between min_COUNT and max_COUNT values from the linda, from a single key. | ||
366 | * returns the actual consumed values, or nil if there weren't enough values to consume | ||
367 | * | ||
368 | */ | ||
369 | // xxh64 of string "CANCEL_ERROR" generated at https://www.pelock.com/products/hash-calculator | ||
370 | static constexpr UniqueKey BATCH_SENTINEL{ 0x2DDFEE0968C62AA7ull }; | ||
371 | LUAG_FUNC(linda_receive) | ||
372 | { | ||
373 | Linda* const linda{ lua_toLinda<false>(L, 1) }; | ||
374 | std::chrono::time_point<std::chrono::steady_clock> until{ std::chrono::time_point<std::chrono::steady_clock>::max() }; | ||
375 | int key_i{ 2 }; // index of first key, if timeout not there | ||
376 | |||
377 | if (lua_type(L, 2) == LUA_TNUMBER) // we don't want to use lua_isnumber() because of autocoercion | ||
378 | { | ||
379 | lua_Duration const duration{ lua_tonumber(L, 2) }; | ||
380 | if (duration.count() >= 0.0) | ||
381 | { | ||
382 | until = std::chrono::steady_clock::now() + std::chrono::duration_cast<std::chrono::steady_clock::duration>(duration); | ||
383 | } | ||
384 | ++key_i; | ||
385 | } | ||
386 | else if (lua_isnil(L, 2)) // alternate explicit "infinite timeout" by passing nil before the key | ||
387 | { | ||
388 | ++key_i; | ||
389 | } | ||
390 | |||
391 | keeper_api_t selected_keeper_receive{ nullptr }; | ||
392 | int expected_pushed_min{ 0 }, expected_pushed_max{ 0 }; | ||
393 | // are we in batched mode? | ||
394 | BATCH_SENTINEL.pushKey(L); | ||
395 | int const is_batched{ lua501_equal(L, key_i, -1) }; | ||
396 | lua_pop(L, 1); | ||
397 | if (is_batched) | ||
398 | { | ||
399 | // no need to pass linda.batched in the keeper state | ||
400 | ++key_i; | ||
401 | // make sure the keys are of a valid type | ||
402 | check_key_types(L, key_i, key_i); | ||
403 | // receive multiple values from a single slot | ||
404 | selected_keeper_receive = KEEPER_API(receive_batched); | ||
405 | // we expect a user-defined amount of return value | ||
406 | expected_pushed_min = (int) luaL_checkinteger(L, key_i + 1); | ||
407 | expected_pushed_max = (int) luaL_optinteger(L, key_i + 2, expected_pushed_min); | ||
408 | // don't forget to count the key in addition to the values | ||
409 | ++expected_pushed_min; | ||
410 | ++expected_pushed_max; | ||
411 | if (expected_pushed_min > expected_pushed_max) | ||
412 | { | ||
413 | return luaL_error(L, "batched min/max error"); | ||
414 | } | ||
415 | } | ||
416 | else | ||
417 | { | ||
418 | // make sure the keys are of a valid type | ||
419 | check_key_types(L, key_i, lua_gettop(L)); | ||
420 | // receive a single value, checking multiple slots | ||
421 | selected_keeper_receive = KEEPER_API(receive); | ||
422 | // we expect a single (value, key) pair of returned values | ||
423 | expected_pushed_min = expected_pushed_max = 2; | ||
424 | } | ||
425 | |||
426 | Lane* const lane{ LANE_POINTER_REGKEY.readLightUserDataValue<Lane>(L) }; | ||
427 | Keeper* const K{ which_keeper(linda->U->keepers, linda->hashSeed()) }; | ||
428 | lua_State* const KL{ K ? K->L : nullptr }; | ||
429 | if (KL == nullptr) | ||
430 | return 0; | ||
431 | |||
432 | CancelRequest cancel{ CancelRequest::None }; | ||
433 | int pushed{ 0 }; | ||
434 | STACK_CHECK_START_REL(KL, 0); | ||
435 | for (bool try_again{ true };;) | ||
436 | { | ||
437 | if (lane != nullptr) | ||
438 | { | ||
439 | cancel = lane->cancel_request; | ||
440 | } | ||
441 | cancel = (cancel != CancelRequest::None) ? cancel : linda->simulate_cancel; | ||
442 | // if user wants to cancel, or looped because of a timeout, the call returns without sending anything | ||
443 | if (!try_again || cancel != CancelRequest::None) | ||
444 | { | ||
445 | pushed = 0; | ||
446 | break; | ||
447 | } | ||
448 | |||
449 | // all arguments of receive() but the first are passed to the keeper's receive function | ||
450 | pushed = keeper_call(linda->U, KL, selected_keeper_receive, L, linda, key_i); | ||
451 | if (pushed < 0) | ||
452 | { | ||
453 | break; | ||
454 | } | ||
455 | if (pushed > 0) | ||
456 | { | ||
457 | ASSERT_L(pushed >= expected_pushed_min && pushed <= expected_pushed_max); | ||
458 | // replace sentinels with real nils | ||
459 | keeper_toggle_nil_sentinels(L, lua_gettop(L) - pushed, LookupMode::FromKeeper); | ||
460 | // To be done from within the 'K' locking area | ||
461 | // | ||
462 | linda->m_read_happened.notify_all(); | ||
463 | break; | ||
464 | } | ||
465 | |||
466 | if (std::chrono::steady_clock::now() >= until) | ||
467 | { | ||
468 | break; /* instant timeout */ | ||
469 | } | ||
470 | |||
471 | // nothing received, wait until timeout or signalled that we should try again | ||
472 | { | ||
473 | Lane::Status prev_status{ Lane::Error }; // prevent 'might be used uninitialized' warnings | ||
474 | if (lane != nullptr) | ||
475 | { | ||
476 | // change status of lane to "waiting" | ||
477 | prev_status = lane->m_status; // Running, most likely | ||
478 | ASSERT_L(prev_status == Lane::Running); // but check, just in case | ||
479 | lane->m_status = Lane::Waiting; | ||
480 | ASSERT_L(lane->m_waiting_on == nullptr); | ||
481 | lane->m_waiting_on = &linda->m_write_happened; | ||
482 | } | ||
483 | // not enough data to read: wakeup when data was sent, or when timeout is reached | ||
484 | std::unique_lock<std::mutex> keeper_lock{ K->m_mutex, std::adopt_lock }; | ||
485 | std::cv_status const status{ linda->m_write_happened.wait_until(keeper_lock, until) }; | ||
486 | keeper_lock.release(); // we don't want to release the lock! | ||
487 | try_again = (status == std::cv_status::no_timeout); // detect spurious wakeups | ||
488 | if (lane != nullptr) | ||
489 | { | ||
490 | lane->m_waiting_on = nullptr; | ||
491 | lane->m_status = prev_status; | ||
492 | } | ||
493 | } | ||
494 | } | ||
495 | STACK_CHECK(KL, 0); | ||
496 | |||
497 | if (pushed < 0) | ||
498 | { | ||
499 | return luaL_error(L, "tried to copy unsupported types"); | ||
500 | } | ||
501 | |||
502 | switch (cancel) | ||
503 | { | ||
504 | case CancelRequest::Soft: | ||
505 | // if user wants to soft-cancel, the call returns CANCEL_ERROR | ||
506 | CANCEL_ERROR.pushKey(L); | ||
507 | return 1; | ||
508 | |||
509 | case CancelRequest::Hard: | ||
510 | // raise an error interrupting execution only in case of hard cancel | ||
511 | raise_cancel_error(L); // raises an error and doesn't return | ||
512 | |||
513 | default: | ||
514 | return pushed; | ||
515 | } | ||
516 | } | ||
517 | |||
518 | // ################################################################################################# | ||
519 | |||
520 | /* | ||
521 | * [true|lanes.cancel_error] = linda_set( linda_ud, key_num|str|bool|lightuserdata [, value [, ...]]) | ||
522 | * | ||
523 | * Set one or more value to Linda. | ||
524 | * TODO: what do we do if we set to non-nil and limit is 0? | ||
525 | * | ||
526 | * Existing slot value is replaced, and possible queued entries removed. | ||
527 | */ | ||
528 | LUAG_FUNC(linda_set) | ||
529 | { | ||
530 | Linda* const linda{ lua_toLinda<false>(L, 1) }; | ||
531 | bool const has_value{ lua_gettop(L) > 2 }; | ||
532 | // make sure the key is of a valid type (throws an error if not the case) | ||
533 | check_key_types(L, 2, 2); | ||
534 | |||
535 | Keeper* const K{ which_keeper(linda->U->keepers, linda->hashSeed()) }; | ||
536 | int pushed{ 0 }; | ||
537 | if (linda->simulate_cancel == CancelRequest::None) | ||
538 | { | ||
539 | if (has_value) | ||
540 | { | ||
541 | // convert nils to some special non-nil sentinel in sent values | ||
542 | keeper_toggle_nil_sentinels(L, 3, LookupMode::ToKeeper); | ||
543 | } | ||
544 | pushed = keeper_call(linda->U, K->L, KEEPER_API(set), L, linda, 2); | ||
545 | if (pushed >= 0) // no error? | ||
546 | { | ||
547 | ASSERT_L(pushed == 0 || pushed == 1); | ||
548 | |||
549 | if (has_value) | ||
550 | { | ||
551 | // we put some data in the slot, tell readers that they should wake | ||
552 | linda->m_write_happened.notify_all(); // To be done from within the 'K' locking area | ||
553 | } | ||
554 | if (pushed == 1) | ||
555 | { | ||
556 | // the key was full, but it is no longer the case, tell writers they should wake | ||
557 | ASSERT_L(lua_type(L, -1) == LUA_TBOOLEAN && lua_toboolean(L, -1) == 1); | ||
558 | linda->m_read_happened.notify_all(); // To be done from within the 'K' locking area | ||
559 | } | ||
560 | } | ||
561 | } | ||
562 | else // linda is cancelled | ||
563 | { | ||
564 | // do nothing and return lanes.cancel_error | ||
565 | CANCEL_ERROR.pushKey(L); | ||
566 | pushed = 1; | ||
567 | } | ||
568 | |||
569 | // must trigger any error after keeper state has been released | ||
570 | return (pushed < 0) ? luaL_error(L, "tried to copy unsupported types") : pushed; | ||
571 | } | ||
572 | |||
573 | // ################################################################################################# | ||
574 | |||
575 | /* | ||
576 | * [val] = linda_count( linda_ud, [key [, ...]]) | ||
577 | * | ||
578 | * Get a count of the pending elements in the specified keys | ||
579 | */ | ||
580 | LUAG_FUNC(linda_count) | ||
581 | { | ||
582 | Linda* const linda{ lua_toLinda<false>(L, 1) }; | ||
583 | // make sure the keys are of a valid type | ||
584 | check_key_types(L, 2, lua_gettop(L)); | ||
585 | |||
586 | Keeper* const K{ which_keeper(linda->U->keepers, linda->hashSeed()) }; | ||
587 | int const pushed{ keeper_call(linda->U, K->L, KEEPER_API(count), L, linda, 2) }; | ||
588 | if (pushed < 0) | ||
589 | { | ||
590 | return luaL_error(L, "tried to count an invalid key"); | ||
591 | } | ||
592 | return pushed; | ||
593 | } | ||
594 | |||
595 | // ################################################################################################# | ||
596 | |||
597 | /* | ||
598 | * [val [, ...]] = linda_get( linda_ud, key_num|str|bool|lightuserdata [, count = 1]) | ||
599 | * | ||
600 | * Get one or more values from Linda. | ||
601 | */ | ||
602 | LUAG_FUNC(linda_get) | ||
603 | { | ||
604 | Linda* const linda{ lua_toLinda<false>(L, 1) }; | ||
605 | lua_Integer const count{ luaL_optinteger(L, 3, 1) }; | ||
606 | luaL_argcheck(L, count >= 1, 3, "count should be >= 1"); | ||
607 | luaL_argcheck(L, lua_gettop(L) <= 3, 4, "too many arguments"); | ||
608 | // make sure the key is of a valid type (throws an error if not the case) | ||
609 | check_key_types(L, 2, 2); | ||
610 | |||
611 | int pushed{ 0 }; | ||
612 | if (linda->simulate_cancel == CancelRequest::None) | ||
613 | { | ||
614 | Keeper* const K{ which_keeper(linda->U->keepers, linda->hashSeed()) }; | ||
615 | pushed = keeper_call(linda->U, K->L, KEEPER_API(get), L, linda, 2); | ||
616 | if (pushed > 0) | ||
617 | { | ||
618 | keeper_toggle_nil_sentinels(L, lua_gettop(L) - pushed, LookupMode::FromKeeper); | ||
619 | } | ||
620 | } | ||
621 | else // linda is cancelled | ||
622 | { | ||
623 | // do nothing and return lanes.cancel_error | ||
624 | CANCEL_ERROR.pushKey(L); | ||
625 | pushed = 1; | ||
626 | } | ||
627 | // an error can be raised if we attempt to read an unregistered function | ||
628 | if (pushed < 0) | ||
629 | { | ||
630 | return luaL_error(L, "tried to copy unsupported types"); | ||
631 | } | ||
632 | |||
633 | return pushed; | ||
634 | } | ||
635 | |||
636 | // ################################################################################################# | ||
637 | |||
638 | /* | ||
639 | * [true] = linda_limit( linda_ud, key_num|str|bool|lightuserdata, int) | ||
640 | * | ||
641 | * Set limit to 1 Linda keys. | ||
642 | * Optionally wake threads waiting to write on the linda, in case the limit enables them to do so | ||
643 | */ | ||
644 | LUAG_FUNC( linda_limit) | ||
645 | { | ||
646 | Linda* const linda{ lua_toLinda<false>(L, 1) }; | ||
647 | // make sure we got 3 arguments: the linda, a key and a limit | ||
648 | luaL_argcheck( L, lua_gettop( L) == 3, 2, "wrong number of arguments"); | ||
649 | // make sure we got a numeric limit | ||
650 | luaL_checknumber( L, 3); | ||
651 | // make sure the key is of a valid type | ||
652 | check_key_types( L, 2, 2); | ||
653 | |||
654 | int pushed{ 0 }; | ||
655 | if (linda->simulate_cancel == CancelRequest::None) | ||
656 | { | ||
657 | Keeper* const K{ which_keeper(linda->U->keepers, linda->hashSeed()) }; | ||
658 | pushed = keeper_call(linda->U, K->L, KEEPER_API(limit), L, linda, 2); | ||
659 | ASSERT_L( pushed == 0 || pushed == 1); // no error, optional boolean value saying if we should wake blocked writer threads | ||
660 | if( pushed == 1) | ||
661 | { | ||
662 | ASSERT_L( lua_type( L, -1) == LUA_TBOOLEAN && lua_toboolean( L, -1) == 1); | ||
663 | linda->m_read_happened.notify_all(); // To be done from within the 'K' locking area | ||
664 | } | ||
665 | } | ||
666 | else // linda is cancelled | ||
667 | { | ||
668 | // do nothing and return lanes.cancel_error | ||
669 | CANCEL_ERROR.pushKey(L); | ||
670 | pushed = 1; | ||
671 | } | ||
672 | // propagate pushed boolean if any | ||
673 | return pushed; | ||
674 | } | ||
675 | |||
676 | // ################################################################################################# | ||
677 | |||
678 | /* | ||
679 | * (void) = linda_cancel( linda_ud, "read"|"write"|"both"|"none") | ||
680 | * | ||
681 | * Signal linda so that waiting threads wake up as if their own lane was cancelled | ||
682 | */ | ||
683 | LUAG_FUNC(linda_cancel) | ||
684 | { | ||
685 | Linda* const linda{ lua_toLinda<false>(L, 1) }; | ||
686 | char const* who = luaL_optstring(L, 2, "both"); | ||
687 | // make sure we got 3 arguments: the linda, a key and a limit | ||
688 | luaL_argcheck(L, lua_gettop(L) <= 2, 2, "wrong number of arguments"); | ||
689 | |||
690 | linda->simulate_cancel = CancelRequest::Soft; | ||
691 | if (strcmp(who, "both") == 0) // tell everyone writers to wake up | ||
692 | { | ||
693 | linda->m_write_happened.notify_all(); | ||
694 | linda->m_read_happened.notify_all(); | ||
695 | } | ||
696 | else if (strcmp(who, "none") == 0) // reset flag | ||
697 | { | ||
698 | linda->simulate_cancel = CancelRequest::None; | ||
699 | } | ||
700 | else if (strcmp(who, "read") == 0) // tell blocked readers to wake up | ||
701 | { | ||
702 | linda->m_write_happened.notify_all(); | ||
703 | } | ||
704 | else if (strcmp(who, "write") == 0) // tell blocked writers to wake up | ||
705 | { | ||
706 | linda->m_read_happened.notify_all(); | ||
707 | } | ||
708 | else | ||
709 | { | ||
710 | return luaL_error(L, "unknown wake hint '%s'", who); | ||
711 | } | ||
712 | return 0; | ||
713 | } | ||
714 | |||
715 | // ################################################################################################# | ||
716 | |||
717 | /* | ||
718 | * lightuserdata= linda_deep( linda_ud ) | ||
719 | * | ||
720 | * Return the 'deep' userdata pointer, identifying the Linda. | ||
721 | * | ||
722 | * This is needed for using Lindas as key indices (timer system needs it); | ||
723 | * separately created proxies of the same underlying deep object will have | ||
724 | * different userdata and won't be known to be essentially the same deep one | ||
725 | * without this. | ||
726 | */ | ||
727 | LUAG_FUNC(linda_deep) | ||
728 | { | ||
729 | Linda* const linda{ lua_toLinda<false>(L, 1) }; | ||
730 | lua_pushlightuserdata(L, linda); // just the address | ||
731 | return 1; | ||
732 | } | ||
733 | |||
734 | // ################################################################################################# | ||
735 | |||
736 | /* | ||
737 | * string = linda:__tostring( linda_ud) | ||
738 | * | ||
739 | * Return the stringification of a linda | ||
740 | * | ||
741 | * Useful for concatenation or debugging purposes | ||
742 | */ | ||
743 | |||
744 | template <bool OPT> | ||
745 | [[nodiscard]] static int linda_tostring(lua_State* L, int idx_) | ||
746 | { | ||
747 | Linda* const linda{ lua_toLinda<OPT>(L, idx_) }; | ||
748 | if (linda != nullptr) | ||
749 | { | ||
750 | char text[128]; | ||
751 | int len; | ||
752 | if (linda->getName()) | ||
753 | len = sprintf(text, "Linda: %.*s", (int) sizeof(text) - 8, linda->getName()); | ||
754 | else | ||
755 | len = sprintf(text, "Linda: %p", linda); | ||
756 | lua_pushlstring(L, text, len); | ||
757 | return 1; | ||
758 | } | ||
759 | return 0; | ||
760 | } | ||
761 | |||
762 | LUAG_FUNC(linda_tostring) | ||
763 | { | ||
764 | return linda_tostring<false>(L, 1); | ||
765 | } | ||
766 | |||
767 | // ################################################################################################# | ||
768 | |||
769 | /* | ||
770 | * string = linda:__concat( a, b) | ||
771 | * | ||
772 | * Return the concatenation of a pair of items, one of them being a linda | ||
773 | * | ||
774 | * Useful for concatenation or debugging purposes | ||
775 | */ | ||
776 | LUAG_FUNC(linda_concat) | ||
777 | { // linda1? linda2? | ||
778 | bool atLeastOneLinda{ false }; | ||
779 | // Lua semantics enforce that one of the 2 arguments is a Linda, but not necessarily both. | ||
780 | if (linda_tostring<true>(L, 1)) | ||
781 | { | ||
782 | atLeastOneLinda = true; | ||
783 | lua_replace(L, 1); | ||
784 | } | ||
785 | if (linda_tostring<true>(L, 2)) | ||
786 | { | ||
787 | atLeastOneLinda = true; | ||
788 | lua_replace(L, 2); | ||
789 | } | ||
790 | if (!atLeastOneLinda) // should not be possible | ||
791 | { | ||
792 | return luaL_error(L, "internal error: linda_concat called on non-Linda"); | ||
793 | } | ||
794 | lua_concat(L, 2); | ||
795 | return 1; | ||
796 | } | ||
797 | |||
798 | // ################################################################################################# | ||
799 | |||
800 | /* | ||
801 | * table = linda:dump() | ||
802 | * return a table listing all pending data inside the linda | ||
803 | */ | ||
804 | LUAG_FUNC(linda_dump) | ||
805 | { | ||
806 | Linda* const linda{ lua_toLinda<false>(L, 1) }; | ||
807 | return keeper_push_linda_storage(linda->U, Dest{ L }, linda, linda->hashSeed()); | ||
808 | } | ||
809 | |||
810 | // ################################################################################################# | ||
811 | |||
812 | /* | ||
813 | * table = linda:dump() | ||
814 | * return a table listing all pending data inside the linda | ||
815 | */ | ||
816 | LUAG_FUNC(linda_towatch) | ||
817 | { | ||
818 | Linda* const linda{ lua_toLinda<false>(L, 1) }; | ||
819 | int pushed{ keeper_push_linda_storage(linda->U, Dest{ L }, linda, linda->hashSeed()) }; | ||
820 | if (pushed == 0) | ||
821 | { | ||
822 | // if the linda is empty, don't return nil | ||
823 | pushed = linda_tostring<false>(L, 1); | ||
824 | } | ||
825 | return pushed; | ||
826 | } | ||
827 | |||
828 | // ################################################################################################# | ||
829 | |||
830 | /* | ||
831 | * Identity function of a shared userdata object. | ||
832 | * | ||
833 | * lightuserdata= linda_id( "new" [, ...] ) | ||
834 | * = linda_id( "delete", lightuserdata ) | ||
835 | * | ||
836 | * Creation and cleanup of actual 'deep' objects. 'luaG_...' will wrap them into | ||
837 | * regular userdata proxies, per each state using the deep data. | ||
838 | * | ||
839 | * tbl= linda_id( "metatable" ) | ||
840 | * | ||
841 | * Returns a metatable for the proxy objects ('__gc' method not needed; will | ||
842 | * be added by 'luaG_...') | ||
843 | * | ||
844 | * string= linda_id( "module") | ||
845 | * | ||
846 | * Returns the name of the module that a state should require | ||
847 | * in order to keep a handle on the shared library that exported the idfunc | ||
848 | * | ||
849 | * = linda_id( str, ... ) | ||
850 | * | ||
851 | * For any other strings, the ID function must not react at all. This allows | ||
852 | * future extensions of the system. | ||
853 | */ | ||
854 | [[nodiscard]] static void* linda_id(lua_State* L, DeepOp op_) | ||
855 | { | ||
856 | switch( op_) | ||
857 | { | ||
858 | case DeepOp::New: | ||
859 | { | ||
860 | size_t name_len = 0; | ||
861 | char const* linda_name = nullptr; | ||
862 | unsigned long linda_group = 0; | ||
863 | // should have a string and/or a number of the stack as parameters (name and group) | ||
864 | switch (lua_gettop(L)) | ||
865 | { | ||
866 | default: // 0 | ||
867 | break; | ||
868 | |||
869 | case 1: // 1 parameter, either a name or a group | ||
870 | if (lua_type(L, -1) == LUA_TSTRING) | ||
871 | { | ||
872 | linda_name = lua_tolstring(L, -1, &name_len); | ||
873 | } | ||
874 | else | ||
875 | { | ||
876 | linda_group = (unsigned long) lua_tointeger(L, -1); | ||
877 | } | ||
878 | break; | ||
879 | |||
880 | case 2: // 2 parameters, a name and group, in that order | ||
881 | linda_name = lua_tolstring(L, -2, &name_len); | ||
882 | linda_group = (unsigned long) lua_tointeger(L, -1); | ||
883 | break; | ||
884 | } | ||
885 | |||
886 | /* The deep data is allocated separately of Lua stack; we might no | ||
887 | * longer be around when last reference to it is being released. | ||
888 | * One can use any memory allocation scheme. | ||
889 | * just don't use L's allocF because we don't know which state will get the honor of GCing the linda | ||
890 | */ | ||
891 | Universe* const U{ universe_get(L) }; | ||
892 | Linda* linda{ new (U) Linda{ U, linda_group, linda_name, name_len } }; | ||
893 | return linda; | ||
894 | } | ||
895 | |||
896 | case DeepOp::Delete: | ||
897 | { | ||
898 | Linda* const linda{ lua_tolightuserdata<Linda>(L, 1) }; | ||
899 | ASSERT_L(linda); | ||
900 | Keeper* const myK{ which_keeper(linda->U->keepers, linda->hashSeed()) }; | ||
901 | // if collected after the universe, keepers are already destroyed, and there is nothing to clear | ||
902 | if (myK) | ||
903 | { | ||
904 | // if collected from my own keeper, we can't acquire/release it | ||
905 | // because we are already inside a protected area, and trying to do so would deadlock! | ||
906 | bool const need_acquire_release{ myK->L != L }; | ||
907 | // Clean associated structures in the keeper state. | ||
908 | Keeper* const K{ need_acquire_release ? keeper_acquire(linda->U->keepers, linda->hashSeed()) : myK }; | ||
909 | // hopefully this won't ever raise an error as we would jump to the closest pcall site while forgetting to release the keeper mutex... | ||
910 | std::ignore = keeper_call(linda->U, K->L, KEEPER_API(clear), L, linda, 0); | ||
911 | if (need_acquire_release) | ||
912 | { | ||
913 | keeper_release(K); | ||
914 | } | ||
915 | } | ||
916 | |||
917 | delete linda; // operator delete overload ensures things go as expected | ||
918 | return nullptr; | ||
919 | } | ||
920 | |||
921 | case DeepOp::Metatable: | ||
922 | { | ||
923 | STACK_CHECK_START_REL(L, 0); | ||
924 | lua_newtable(L); | ||
925 | // metatable is its own index | ||
926 | lua_pushvalue(L, -1); | ||
927 | lua_setfield(L, -2, "__index"); | ||
928 | |||
929 | // protect metatable from external access | ||
930 | lua_pushliteral(L, "Linda"); | ||
931 | lua_setfield(L, -2, "__metatable"); | ||
932 | |||
933 | lua_pushcfunction(L, LG_linda_tostring); | ||
934 | lua_setfield(L, -2, "__tostring"); | ||
935 | |||
936 | // Decoda __towatch support | ||
937 | lua_pushcfunction(L, LG_linda_towatch); | ||
938 | lua_setfield(L, -2, "__towatch"); | ||
939 | |||
940 | lua_pushcfunction(L, LG_linda_concat); | ||
941 | lua_setfield(L, -2, "__concat"); | ||
942 | |||
943 | // protected calls, to ensure associated keeper is always released even in case of error | ||
944 | // all function are the protected call wrapper, where the actual operation is provided as upvalue | ||
945 | // note that this kind of thing can break function lookup as we use the function pointer here and there | ||
946 | |||
947 | lua_pushcfunction(L, LG_linda_send); | ||
948 | lua_pushcclosure(L, LG_linda_protected_call, 1); | ||
949 | lua_setfield(L, -2, "send"); | ||
950 | |||
951 | lua_pushcfunction(L, LG_linda_receive); | ||
952 | lua_pushcclosure(L, LG_linda_protected_call, 1); | ||
953 | lua_setfield(L, -2, "receive"); | ||
954 | |||
955 | lua_pushcfunction(L, LG_linda_limit); | ||
956 | lua_pushcclosure(L, LG_linda_protected_call, 1); | ||
957 | lua_setfield(L, -2, "limit"); | ||
958 | |||
959 | lua_pushcfunction(L, LG_linda_set); | ||
960 | lua_pushcclosure(L, LG_linda_protected_call, 1); | ||
961 | lua_setfield(L, -2, "set"); | ||
962 | |||
963 | lua_pushcfunction(L, LG_linda_count); | ||
964 | lua_pushcclosure(L, LG_linda_protected_call, 1); | ||
965 | lua_setfield(L, -2, "count"); | ||
966 | |||
967 | lua_pushcfunction(L, LG_linda_get); | ||
968 | lua_pushcclosure(L, LG_linda_protected_call, 1); | ||
969 | lua_setfield(L, -2, "get"); | ||
970 | |||
971 | lua_pushcfunction(L, LG_linda_cancel); | ||
972 | lua_setfield(L, -2, "cancel"); | ||
973 | |||
974 | lua_pushcfunction(L, LG_linda_deep); | ||
975 | lua_setfield(L, -2, "deep"); | ||
976 | |||
977 | lua_pushcfunction(L, LG_linda_dump); | ||
978 | lua_pushcclosure(L, LG_linda_protected_call, 1); | ||
979 | lua_setfield(L, -2, "dump"); | ||
980 | |||
981 | // some constants | ||
982 | BATCH_SENTINEL.pushKey(L); | ||
983 | lua_setfield(L, -2, "batched"); | ||
984 | |||
985 | NIL_SENTINEL.pushKey(L); | ||
986 | lua_setfield(L, -2, "null"); | ||
987 | |||
988 | STACK_CHECK(L, 1); | ||
989 | return nullptr; | ||
990 | } | ||
991 | |||
992 | case DeepOp::Module: | ||
993 | // linda is a special case because we know lanes must be loaded from the main lua state | ||
994 | // to be able to ever get here, so we know it will remain loaded as long a the main state is around | ||
995 | // in other words, forever. | ||
996 | default: | ||
997 | { | ||
998 | return nullptr; | ||
999 | } | ||
1000 | } | ||
1001 | } | ||
1002 | |||
1003 | // ################################################################################################# | ||
1004 | |||
1005 | /* | ||
1006 | * ud = lanes.linda( [name[,group]]) | ||
1007 | * | ||
1008 | * returns a linda object, or raises an error if creation failed | ||
1009 | */ | ||
1010 | LUAG_FUNC(linda) | ||
1011 | { | ||
1012 | int const top{ lua_gettop(L) }; | ||
1013 | luaL_argcheck(L, top <= 2, top, "too many arguments"); | ||
1014 | if (top == 1) | ||
1015 | { | ||
1016 | int const t{ lua_type(L, 1) }; | ||
1017 | luaL_argcheck(L, t == LUA_TSTRING || t == LUA_TNUMBER, 1, "wrong parameter (should be a string or a number)"); | ||
1018 | } | ||
1019 | else if (top == 2) | ||
1020 | { | ||
1021 | luaL_checktype(L, 1, LUA_TSTRING); | ||
1022 | luaL_checktype(L, 2, LUA_TNUMBER); | ||
1023 | } | ||
1024 | return luaG_newdeepuserdata(Dest{ L }, linda_id, 0); | ||
1025 | } | ||
diff --git a/src/macros_and_utils.h b/src/macros_and_utils.h index e184476..e8d5ab5 100644 --- a/src/macros_and_utils.h +++ b/src/macros_and_utils.h | |||
@@ -1,107 +1,193 @@ | |||
1 | /* | 1 | #pragma once |
2 | * MACROS_AND_UTILS.H | ||
3 | */ | ||
4 | #ifndef MACROS_AND_UTILS_H | ||
5 | #define MACROS_AND_UTILS_H | ||
6 | 2 | ||
3 | #ifdef __cplusplus | ||
4 | extern "C" { | ||
5 | #endif // __cplusplus | ||
7 | #include "lua.h" | 6 | #include "lua.h" |
8 | #include "lualib.h" | 7 | #include "lualib.h" |
9 | #include "lauxlib.h" | 8 | #include "lauxlib.h" |
9 | #ifdef __cplusplus | ||
10 | } | ||
11 | #endif // __cplusplus | ||
12 | |||
13 | #include <cassert> | ||
14 | #include <chrono> | ||
15 | #include <tuple> | ||
16 | #include <type_traits> | ||
10 | 17 | ||
11 | // M$ compiler doesn't support 'inline' keyword in C files... | 18 | using namespace std::chrono_literals; |
12 | #if defined( _MSC_VER) | ||
13 | #define inline __inline | ||
14 | #endif | ||
15 | 19 | ||
16 | #define USE_DEBUG_SPEW() 0 | 20 | #define USE_DEBUG_SPEW() 0 |
17 | #if USE_DEBUG_SPEW() | 21 | #if USE_DEBUG_SPEW() |
18 | extern char const* debugspew_indent; | 22 | extern char const* debugspew_indent; |
19 | #define INDENT_BEGIN "%.*s " | 23 | #define INDENT_BEGIN "%.*s " |
20 | #define INDENT_END , (U ? U->debugspew_indent_depth : 0), debugspew_indent | 24 | #define INDENT_END , (U ? U->debugspew_indent_depth.load(std::memory_order_relaxed) : 0), debugspew_indent |
21 | #define DEBUGSPEW_CODE(_code) _code | 25 | #define DEBUGSPEW_CODE(_code) _code |
22 | #define DEBUGSPEW_PARAM_COMMA( param_) param_, | 26 | #define DEBUGSPEW_OR_NOT(a_, b_) a_ |
27 | #define DEBUGSPEW_PARAM_COMMA(param_) param_, | ||
23 | #define DEBUGSPEW_COMMA_PARAM( param_) , param_ | 28 | #define DEBUGSPEW_COMMA_PARAM( param_) , param_ |
24 | #else // USE_DEBUG_SPEW() | 29 | #else // USE_DEBUG_SPEW() |
25 | #define DEBUGSPEW_CODE(_code) | 30 | #define DEBUGSPEW_CODE(_code) |
26 | #define DEBUGSPEW_PARAM_COMMA( param_) | 31 | #define DEBUGSPEW_OR_NOT(a_, b_) b_ |
32 | #define DEBUGSPEW_PARAM_COMMA(param_) | ||
27 | #define DEBUGSPEW_COMMA_PARAM( param_) | 33 | #define DEBUGSPEW_COMMA_PARAM( param_) |
28 | #endif // USE_DEBUG_SPEW() | 34 | #endif // USE_DEBUG_SPEW() |
29 | 35 | ||
30 | #ifdef NDEBUG | 36 | #ifdef NDEBUG |
31 | 37 | ||
32 | #define _ASSERT_L(lua,c) //nothing | 38 | #define _ASSERT_L(lua,c) //nothing |
33 | #define STACK_CHECK(L,o) //nothing | ||
34 | #define STACK_CHECK_ABS(L,o) //nothing | ||
35 | #define STACK_MID(L,c) //nothing | ||
36 | #define STACK_END(L,c) //nothing | ||
37 | #define STACK_DUMP(L) //nothing | 39 | #define STACK_DUMP(L) //nothing |
38 | 40 | ||
41 | #define STACK_CHECK_START_REL(L, offset_) | ||
42 | #define STACK_CHECK_START_ABS(L, offset_) | ||
43 | #define STACK_CHECK_RESET_REL(L, offset_) | ||
44 | #define STACK_CHECK_RESET_ABS(L, offset_) | ||
45 | #define STACK_CHECK(L, offset_) | ||
46 | |||
39 | #else // NDEBUG | 47 | #else // NDEBUG |
40 | 48 | ||
41 | #define _ASSERT_L( L, cond_) if( (cond_) == 0) { (void) luaL_error( L, "ASSERT failed: %s:%d '%s'", __FILE__, __LINE__, #cond_);} | 49 | #define _ASSERT_L(L, cond_) if( (cond_) == 0) { (void) luaL_error(L, "ASSERT failed: %s:%d '%s'", __FILE__, __LINE__, #cond_);} |
42 | 50 | #define STACK_DUMP(L) luaG_dump(L) | |
43 | #define STACK_CHECK( L, offset_) \ | 51 | |
44 | { \ | 52 | class StackChecker |
45 | int const L##_delta = offset_; \ | 53 | { |
46 | if( (L##_delta < 0) || (lua_gettop( L) < L##_delta)) \ | 54 | private: |
47 | { \ | 55 | lua_State* const m_L; |
48 | assert( FALSE); \ | 56 | int m_oldtop; |
49 | (void) luaL_error( L, "STACK INIT ASSERT failed (%d not %d): %s:%d", lua_gettop( L), L##_delta, __FILE__, __LINE__); \ | 57 | |
50 | } \ | 58 | public: |
51 | int const L##_oldtop = lua_gettop( L) - L##_delta | 59 | struct Relative |
52 | 60 | { | |
53 | #define STACK_CHECK_ABS( L, offset_) \ | 61 | int const m_offset; |
54 | { \ | 62 | |
55 | int const L##_pos = offset_; \ | 63 | operator int() const { return m_offset; } |
56 | if( lua_gettop( L) < L##_pos) \ | 64 | }; |
57 | { \ | 65 | |
58 | assert( FALSE); \ | 66 | struct Absolute |
59 | (void) luaL_error( L, "STACK INIT ASSERT failed (%d not %d): %s:%d", lua_gettop( L), L##_pos, __FILE__, __LINE__); \ | 67 | { |
60 | } \ | 68 | int const m_offset; |
61 | int const L##_oldtop = 0 | 69 | |
62 | 70 | operator int() const { return m_offset; } | |
63 | #define STACK_MID( L, change) \ | 71 | }; |
64 | do if( change != LUA_MULTRET) \ | 72 | |
65 | { \ | 73 | StackChecker(lua_State* const L_, Relative offset_, char const* file_, size_t const line_) |
66 | int stack_check_a = lua_gettop( L) - L##_oldtop; \ | 74 | : m_L{ L_ } |
67 | int stack_check_b = (change); \ | 75 | , m_oldtop{ lua_gettop(L_) - offset_ } |
68 | if( stack_check_a != stack_check_b) \ | 76 | { |
69 | { \ | 77 | if ((offset_ < 0) || (m_oldtop < 0)) |
70 | assert( FALSE); \ | 78 | { |
71 | luaL_error( L, "STACK ASSERT failed (%d not %d): %s:%d", stack_check_a, stack_check_b, __FILE__, __LINE__); \ | 79 | assert(false); |
72 | } \ | 80 | luaL_error(m_L, "STACK INIT ASSERT failed (%d not %d): %s:%d", lua_gettop(m_L), offset_, file_, line_); // doesn't return |
73 | } while( 0) | 81 | } |
74 | 82 | } | |
75 | #define STACK_END( L, change) \ | 83 | |
76 | STACK_MID( L, change); \ | 84 | StackChecker(lua_State* const L_, Absolute pos_, char const* file_, size_t const line_) |
85 | : m_L{ L_ } | ||
86 | , m_oldtop{ 0 } | ||
87 | { | ||
88 | if (lua_gettop(m_L) != pos_) | ||
89 | { | ||
90 | assert(false); | ||
91 | luaL_error(m_L, "STACK INIT ASSERT failed (%d not %d): %s:%d", lua_gettop(m_L), pos_, file_, line_); // doesn't return | ||
92 | } | ||
77 | } | 93 | } |
78 | 94 | ||
79 | #define STACK_DUMP( L) luaG_dump( L) | 95 | StackChecker& operator=(StackChecker const& rhs_) |
96 | { | ||
97 | assert(m_L == rhs_.m_L); | ||
98 | m_oldtop = rhs_.m_oldtop; | ||
99 | return *this; | ||
100 | } | ||
101 | |||
102 | // verify if the distance between the current top and the initial one is what we expect | ||
103 | void check(int expected_, char const* file_, size_t const line_) | ||
104 | { | ||
105 | if (expected_ != LUA_MULTRET) | ||
106 | { | ||
107 | int const actual{ lua_gettop(m_L) - m_oldtop }; | ||
108 | if (actual != expected_) | ||
109 | { | ||
110 | assert(false); | ||
111 | luaL_error(m_L, "STACK ASSERT failed (%d not %d): %s:%d", actual, expected_, file_, line_); // doesn't return | ||
112 | } | ||
113 | } | ||
114 | } | ||
115 | }; | ||
116 | |||
117 | #define STACK_CHECK_START_REL(L, offset_) StackChecker stackChecker_##L(L, StackChecker::Relative{ offset_ }, __FILE__, __LINE__) | ||
118 | #define STACK_CHECK_START_ABS(L, offset_) StackChecker stackChecker_##L(L, StackChecker::Absolute{ offset_ }, __FILE__, __LINE__) | ||
119 | #define STACK_CHECK_RESET_REL(L, offset_) stackChecker_##L = StackChecker{L, StackChecker::Relative{ offset_ }, __FILE__, __LINE__} | ||
120 | #define STACK_CHECK_RESET_ABS(L, offset_) stackChecker_##L = StackChecker{L, StackChecker::Absolute{ offset_ }, __FILE__, __LINE__} | ||
121 | #define STACK_CHECK(L, offset_) stackChecker_##L.check(offset_, __FILE__, __LINE__) | ||
80 | 122 | ||
81 | #endif // NDEBUG | 123 | #endif // NDEBUG |
82 | 124 | ||
83 | #define ASSERT_L(c) _ASSERT_L(L,c) | 125 | #define ASSERT_L(c) _ASSERT_L(L,c) |
84 | 126 | ||
85 | inline void STACK_GROW(lua_State * L, int n_) | 127 | inline void STACK_GROW(lua_State* L, int n_) |
86 | { | 128 | { |
87 | if (!lua_checkstack(L, n_)) | 129 | if (!lua_checkstack(L, n_)) |
88 | luaL_error(L, "Cannot grow stack!"); | 130 | { |
131 | luaL_error(L, "Cannot grow stack!"); // doesn't return | ||
132 | } | ||
89 | } | 133 | } |
90 | 134 | ||
91 | // non-string keyed registry access | 135 | #define LUAG_FUNC(func_name) [[nodiscard]] int LG_##func_name(lua_State* L) |
92 | #define REGISTRY_SET( L, key_, value_) \ | 136 | |
93 | { \ | 137 | // ################################################################################################# |
94 | push_unique_key( L, key_); \ | 138 | |
95 | value_; \ | 139 | // a small helper to extract a full userdata pointer from the stack in a safe way |
96 | lua_rawset( L, LUA_REGISTRYINDEX); \ | 140 | template<typename T> |
141 | [[nodiscard]] T* lua_tofulluserdata(lua_State* L, int index_) | ||
142 | { | ||
143 | ASSERT_L(lua_isnil(L, index_) || lua_type(L, index_) == LUA_TUSERDATA); | ||
144 | return static_cast<T*>(lua_touserdata(L, index_)); | ||
97 | } | 145 | } |
98 | 146 | ||
99 | #define REGISTRY_GET( L, key_) \ | 147 | template<typename T> |
100 | { \ | 148 | [[nodiscard]] auto lua_tolightuserdata(lua_State* L, int index_) |
101 | push_unique_key( L, key_); \ | 149 | { |
102 | lua_rawget( L, LUA_REGISTRYINDEX); \ | 150 | ASSERT_L(lua_isnil(L, index_) || lua_islightuserdata(L, index_)); |
151 | if constexpr (std::is_pointer_v<T>) | ||
152 | { | ||
153 | return static_cast<T>(lua_touserdata(L, index_)); | ||
154 | } | ||
155 | else | ||
156 | { | ||
157 | return static_cast<T*>(lua_touserdata(L, index_)); | ||
158 | } | ||
159 | } | ||
160 | |||
161 | template <typename T> | ||
162 | [[nodiscard]] T* lua_newuserdatauv(lua_State* L, int nuvalue_) | ||
163 | { | ||
164 | return static_cast<T*>(lua_newuserdatauv(L, sizeof(T), nuvalue_)); | ||
165 | } | ||
166 | |||
167 | // ################################################################################################# | ||
168 | |||
169 | // use this instead of Lua's lua_error if possible | ||
170 | [[noreturn]] static inline void raise_lua_error(lua_State* L) | ||
171 | { | ||
172 | std::ignore = lua_error(L); // doesn't return | ||
173 | assert(false); // we should never get here, but i'm paranoid | ||
103 | } | 174 | } |
104 | 175 | ||
105 | #define LUAG_FUNC( func_name) int LG_##func_name( lua_State* L) | 176 | using lua_Duration = std::chrono::template duration<lua_Number>; |
177 | |||
178 | // ################################################################################################# | ||
179 | |||
180 | // A unique type generator | ||
181 | template <typename T, auto = []{}> | ||
182 | struct Unique | ||
183 | { | ||
184 | T m_val; | ||
185 | constexpr Unique() = default; | ||
186 | constexpr operator T() const { return m_val; } | ||
187 | constexpr explicit Unique(T b_) : m_val{ b_ } {} | ||
188 | }; | ||
189 | |||
190 | // ################################################################################################# | ||
106 | 191 | ||
107 | #endif // MACROS_AND_UTILS_H | 192 | using Source = Unique<lua_State*>; |
193 | using Dest = Unique<lua_State*>; \ No newline at end of file | ||
diff --git a/src/platform.h b/src/platform.h index 2f71c07..b10f0ad 100644 --- a/src/platform.h +++ b/src/platform.h | |||
@@ -1,5 +1,4 @@ | |||
1 | #ifndef __LANES_PLATFORM_H__ | 1 | #pragma once |
2 | #define __LANES_PLATFORM_H__ 1 | ||
3 | 2 | ||
4 | #ifdef _WIN32_WCE | 3 | #ifdef _WIN32_WCE |
5 | #define PLATFORM_POCKETPC | 4 | #define PLATFORM_POCKETPC |
@@ -21,5 +20,3 @@ | |||
21 | #else | 20 | #else |
22 | #error "Unknown platform!" | 21 | #error "Unknown platform!" |
23 | #endif | 22 | #endif |
24 | |||
25 | #endif // __LANES_PLATFORM_H__ | ||
diff --git a/src/state.c b/src/state.cpp index 32e5b47..4a5f995 100644 --- a/src/state.c +++ b/src/state.cpp | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * STATE.C | 2 | * STATE.CPP |
3 | * | 3 | * |
4 | * Lua tools to support Lanes. | 4 | * Lua tools to support Lanes. |
5 | */ | 5 | */ |
@@ -8,7 +8,7 @@ | |||
8 | =============================================================================== | 8 | =============================================================================== |
9 | 9 | ||
10 | Copyright (C) 2002-10 Asko Kauppi <akauppi@gmail.com> | 10 | Copyright (C) 2002-10 Asko Kauppi <akauppi@gmail.com> |
11 | 2011-21 benoit Germain <bnt.germain@gmail.com> | 11 | 2011-24 benoit Germain <bnt.germain@gmail.com> |
12 | 12 | ||
13 | Permission is hereby granted, free of charge, to any person obtaining a copy | 13 | Permission is hereby granted, free of charge, to any person obtaining a copy |
14 | of this software and associated documentation files (the "Software"), to deal | 14 | of this software and associated documentation files (the "Software"), to deal |
@@ -31,20 +31,11 @@ THE SOFTWARE. | |||
31 | =============================================================================== | 31 | =============================================================================== |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include <stdio.h> | 34 | #include "state.h" |
35 | #include <assert.h> | 35 | |
36 | #include <string.h> | ||
37 | #include <ctype.h> | ||
38 | #include <stdlib.h> | ||
39 | #if !defined(__APPLE__) | ||
40 | #include <malloc.h> | ||
41 | #endif // __APPLE__ | ||
42 | |||
43 | #include "compat.h" | ||
44 | #include "macros_and_utils.h" | ||
45 | #include "universe.h" | ||
46 | #include "tools.h" | ||
47 | #include "lanes.h" | 36 | #include "lanes.h" |
37 | #include "tools.h" | ||
38 | #include "universe.h" | ||
48 | 39 | ||
49 | // ################################################################################################ | 40 | // ################################################################################################ |
50 | 41 | ||
@@ -58,7 +49,7 @@ THE SOFTWARE. | |||
58 | // | 49 | // |
59 | // Upvalues: [1]: original 'require' function | 50 | // Upvalues: [1]: original 'require' function |
60 | // | 51 | // |
61 | static int luaG_new_require( lua_State* L) | 52 | [[nodiscard]] static int luaG_new_require(lua_State* L) |
62 | { | 53 | { |
63 | int rc; | 54 | int rc; |
64 | int const args = lua_gettop( L); // args | 55 | int const args = lua_gettop( L); // args |
@@ -72,62 +63,65 @@ static int luaG_new_require( lua_State* L) | |||
72 | 63 | ||
73 | // Using 'lua_pcall()' to catch errors; otherwise a failing 'require' would | 64 | // Using 'lua_pcall()' to catch errors; otherwise a failing 'require' would |
74 | // leave us locked, blocking any future 'require' calls from other lanes. | 65 | // leave us locked, blocking any future 'require' calls from other lanes. |
75 | 66 | ||
76 | MUTEX_LOCK( &U->require_cs); | 67 | U->require_cs.lock(); |
77 | // starting with Lua 5.4, require may return a second optional value, so we need LUA_MULTRET | 68 | // starting with Lua 5.4, require may return a second optional value, so we need LUA_MULTRET |
78 | rc = lua_pcall( L, args, LUA_MULTRET, 0 /*errfunc*/ ); // err|result(s) | 69 | rc = lua_pcall( L, args, LUA_MULTRET, 0 /*errfunc*/ ); // err|result(s) |
79 | MUTEX_UNLOCK( &U->require_cs); | 70 | U->require_cs.unlock(); |
80 | 71 | ||
81 | // the required module (or an error message) is left on the stack as returned value by original require function | 72 | // the required module (or an error message) is left on the stack as returned value by original require function |
82 | 73 | ||
83 | if( rc != LUA_OK) // LUA_ERRRUN / LUA_ERRMEM ? | 74 | if( rc != LUA_OK) // LUA_ERRRUN / LUA_ERRMEM ? |
84 | { | 75 | { |
85 | return lua_error( L); | 76 | raise_lua_error(L); |
86 | } | 77 | } |
87 | // should be 1 for Lua <= 5.3, 1 or 2 starting with Lua 5.4 | 78 | // should be 1 for Lua <= 5.3, 1 or 2 starting with Lua 5.4 |
88 | return lua_gettop(L); // result(s) | 79 | return lua_gettop(L); // result(s) |
89 | } | 80 | } |
90 | 81 | ||
82 | // ################################################################################################# | ||
83 | |||
91 | /* | 84 | /* |
92 | * Serialize calls to 'require', if it exists | 85 | * Serialize calls to 'require', if it exists |
93 | */ | 86 | */ |
94 | void serialize_require( DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State* L) | 87 | void serialize_require(DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State* L) |
95 | { | 88 | { |
96 | STACK_GROW( L, 1); | 89 | STACK_GROW(L, 1); |
97 | STACK_CHECK( L, 0); | 90 | STACK_CHECK_START_REL(L, 0); |
98 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "serializing require()\n" INDENT_END)); | 91 | DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "serializing require()\n" INDENT_END)); |
99 | 92 | ||
100 | // Check 'require' is there and not already wrapped; if not, do nothing | 93 | // Check 'require' is there and not already wrapped; if not, do nothing |
101 | // | 94 | // |
102 | lua_getglobal( L, "require"); | 95 | lua_getglobal(L, "require"); |
103 | if( lua_isfunction( L, -1) && lua_tocfunction( L, -1) != luaG_new_require) | 96 | if (lua_isfunction(L, -1) && lua_tocfunction(L, -1) != luaG_new_require) |
104 | { | 97 | { |
105 | // [-1]: original 'require' function | 98 | // [-1]: original 'require' function |
106 | lua_pushcclosure( L, luaG_new_require, 1 /*upvalues*/); | 99 | lua_pushcclosure(L, luaG_new_require, 1 /*upvalues*/); |
107 | lua_setglobal( L, "require"); | 100 | lua_setglobal(L, "require"); |
108 | } | 101 | } |
109 | else | 102 | else |
110 | { | 103 | { |
111 | // [-1]: nil | 104 | // [-1]: nil |
112 | lua_pop( L, 1); | 105 | lua_pop(L, 1); |
113 | } | 106 | } |
114 | 107 | ||
115 | STACK_END( L, 0); | 108 | STACK_CHECK(L, 0); |
116 | } | 109 | } |
117 | 110 | ||
118 | // ################################################################################################ | 111 | // ################################################################################################ |
119 | 112 | ||
120 | /*---=== luaG_newstate ===---*/ | 113 | /*---=== luaG_newstate ===---*/ |
121 | 114 | ||
122 | static int require_lanes_core( lua_State* L) | 115 | [[nodiscard]] static int require_lanes_core(lua_State* L) |
123 | { | 116 | { |
124 | // leaves a copy of 'lanes.core' module table on the stack | 117 | // leaves a copy of 'lanes.core' module table on the stack |
125 | luaL_requiref( L, "lanes.core", luaopen_lanes_core, 0); | 118 | luaL_requiref( L, "lanes.core", luaopen_lanes_core, 0); |
126 | return 1; | 119 | return 1; |
127 | } | 120 | } |
128 | 121 | ||
122 | // ################################################################################################# | ||
129 | 123 | ||
130 | static const luaL_Reg libs[] = | 124 | static luaL_Reg const libs[] = |
131 | { | 125 | { |
132 | { LUA_LOADLIBNAME, luaopen_package}, | 126 | { LUA_LOADLIBNAME, luaopen_package}, |
133 | { LUA_TABLIBNAME, luaopen_table}, | 127 | { LUA_TABLIBNAME, luaopen_table}, |
@@ -146,7 +140,7 @@ static const luaL_Reg libs[] = | |||
146 | #endif | 140 | #endif |
147 | { LUA_COLIBNAME, luaopen_coroutine}, // Lua 5.2: coroutine is no longer a part of base! | 141 | { LUA_COLIBNAME, luaopen_coroutine}, // Lua 5.2: coroutine is no longer a part of base! |
148 | #else // LUA_VERSION_NUM | 142 | #else // LUA_VERSION_NUM |
149 | { LUA_COLIBNAME, NULL}, // Lua 5.1: part of base package | 143 | { LUA_COLIBNAME, nullptr }, // Lua 5.1: part of base package |
150 | #endif // LUA_VERSION_NUM | 144 | #endif // LUA_VERSION_NUM |
151 | { LUA_DBLIBNAME, luaopen_debug}, | 145 | { LUA_DBLIBNAME, luaopen_debug}, |
152 | #if LUAJIT_FLAVOR() != 0 // building against LuaJIT headers, add some LuaJIT-specific libs | 146 | #if LUAJIT_FLAVOR() != 0 // building against LuaJIT headers, add some LuaJIT-specific libs |
@@ -156,14 +150,16 @@ static const luaL_Reg libs[] = | |||
156 | { LUA_FFILIBNAME, luaopen_ffi}, | 150 | { LUA_FFILIBNAME, luaopen_ffi}, |
157 | #endif // LUAJIT_FLAVOR() | 151 | #endif // LUAJIT_FLAVOR() |
158 | 152 | ||
159 | { LUA_DBLIBNAME, luaopen_debug}, | 153 | { LUA_DBLIBNAME, luaopen_debug}, |
160 | { "lanes.core", require_lanes_core}, // So that we can open it like any base library (possible since we have access to the init function) | 154 | { "lanes.core", require_lanes_core}, // So that we can open it like any base library (possible since we have access to the init function) |
161 | // | 155 | // |
162 | { "base", NULL}, // ignore "base" (already acquired it) | 156 | { "base", nullptr }, // ignore "base" (already acquired it) |
163 | { NULL, NULL } | 157 | { nullptr, nullptr } |
164 | }; | 158 | }; |
165 | 159 | ||
166 | static void open1lib( DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State* L, char const* name_, size_t len_) | 160 | // ################################################################################################# |
161 | |||
162 | static void open1lib(DEBUGSPEW_PARAM_COMMA(Universe* U) lua_State* L, char const* name_, size_t len_) | ||
167 | { | 163 | { |
168 | int i; | 164 | int i; |
169 | for( i = 0; libs[i].name; ++ i) | 165 | for( i = 0; libs[i].name; ++ i) |
@@ -172,70 +168,73 @@ static void open1lib( DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State* L, char con | |||
172 | { | 168 | { |
173 | lua_CFunction libfunc = libs[i].func; | 169 | lua_CFunction libfunc = libs[i].func; |
174 | name_ = libs[i].name; // note that the provided name_ doesn't necessarily ends with '\0', hence len_ | 170 | name_ = libs[i].name; // note that the provided name_ doesn't necessarily ends with '\0', hence len_ |
175 | if( libfunc != NULL) | 171 | if (libfunc != nullptr) |
176 | { | 172 | { |
177 | bool_t const isLanesCore = (libfunc == require_lanes_core) ? TRUE : FALSE; // don't want to create a global for "lanes.core" | 173 | bool const isLanesCore{ libfunc == require_lanes_core }; // don't want to create a global for "lanes.core" |
178 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "opening %.*s library\n" INDENT_END, (int) len_, name_)); | 174 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "opening %.*s library\n" INDENT_END, (int) len_, name_)); |
179 | STACK_CHECK( L, 0); | 175 | STACK_CHECK_START_REL(L, 0); |
180 | // open the library as if through require(), and create a global as well if necessary (the library table is left on the stack) | 176 | // open the library as if through require(), and create a global as well if necessary (the library table is left on the stack) |
181 | luaL_requiref( L, name_, libfunc, !isLanesCore); | 177 | luaL_requiref( L, name_, libfunc, !isLanesCore); |
182 | // lanes.core doesn't declare a global, so scan it here and now | 178 | // lanes.core doesn't declare a global, so scan it here and now |
183 | if( isLanesCore == TRUE) | 179 | if( isLanesCore == true) |
184 | { | 180 | { |
185 | populate_func_lookup_table( L, -1, name_); | 181 | populate_func_lookup_table( L, -1, name_); |
186 | } | 182 | } |
187 | lua_pop( L, 1); | 183 | lua_pop( L, 1); |
188 | STACK_END( L, 0); | 184 | STACK_CHECK( L, 0); |
189 | } | 185 | } |
190 | break; | 186 | break; |
191 | } | 187 | } |
192 | } | 188 | } |
193 | } | 189 | } |
194 | 190 | ||
191 | // ################################################################################################# | ||
195 | 192 | ||
196 | // just like lua_xmove, args are (from, to) | 193 | // just like lua_xmove, args are (from, to) |
197 | static void copy_one_time_settings( Universe* U, lua_State* L, lua_State* L2) | 194 | static void copy_one_time_settings(Universe* U, Source L, Dest L2) |
198 | { | 195 | { |
199 | STACK_GROW( L, 2); | 196 | STACK_GROW(L, 2); |
200 | STACK_CHECK( L, 0); | 197 | STACK_CHECK_START_REL(L, 0); |
201 | STACK_CHECK( L2, 0); | 198 | STACK_CHECK_START_REL(L2, 0); |
202 | 199 | ||
203 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "copy_one_time_settings()\n" INDENT_END)); | 200 | DEBUGSPEW_CODE(fprintf( stderr, INDENT_BEGIN "copy_one_time_settings()\n" INDENT_END)); |
204 | DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); | 201 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed)); |
205 | 202 | ||
206 | REGISTRY_GET( L, CONFIG_REGKEY); // config | 203 | CONFIG_REGKEY.pushValue(L); // config |
207 | // copy settings from from source to destination registry | 204 | // copy settings from from source to destination registry |
208 | if( luaG_inter_move( U, L, L2, 1, eLM_LaneBody) != eICR_Success) // // config | 205 | if (luaG_inter_move(U, L, L2, 1, LookupMode::LaneBody) != InterCopyResult::Success) // // config |
209 | { | 206 | { |
210 | (void) luaL_error( L, "failed to copy settings when loading lanes.core"); | 207 | luaL_error( L, "failed to copy settings when loading lanes.core"); // doesn't return |
211 | } | 208 | } |
212 | // set L2:_R[CONFIG_REGKEY] = settings | 209 | // set L2:_R[CONFIG_REGKEY] = settings |
213 | REGISTRY_SET( L2, CONFIG_REGKEY, lua_insert( L2, -2)); // | 210 | CONFIG_REGKEY.setValue(L2, [](lua_State* L) { lua_insert(L, -2); }); // config |
214 | STACK_END( L2, 0); | 211 | STACK_CHECK(L2, 0); |
215 | STACK_END( L, 0); | 212 | STACK_CHECK(L, 0); |
216 | DEBUGSPEW_CODE( -- U->debugspew_indent_depth); | 213 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed)); |
217 | } | 214 | } |
218 | 215 | ||
216 | // ################################################################################################# | ||
217 | |||
219 | void initialize_on_state_create( Universe* U, lua_State* L) | 218 | void initialize_on_state_create( Universe* U, lua_State* L) |
220 | { | 219 | { |
221 | STACK_CHECK( L, 0); | 220 | STACK_CHECK_START_REL(L, 1); // settings |
222 | lua_getfield( L, -1, "on_state_create"); // settings on_state_create|nil | 221 | lua_getfield(L, -1, "on_state_create"); // settings on_state_create|nil |
223 | if( !lua_isnil( L, -1)) | 222 | if( !lua_isnil(L, -1)) |
224 | { | 223 | { |
225 | // store C function pointer in an internal variable | 224 | // store C function pointer in an internal variable |
226 | U->on_state_create_func = lua_tocfunction( L, -1); // settings on_state_create | 225 | U->on_state_create_func = lua_tocfunction(L, -1); // settings on_state_create |
227 | if( U->on_state_create_func != NULL) | 226 | if (U->on_state_create_func != nullptr) |
228 | { | 227 | { |
229 | // make sure the function doesn't have upvalues | 228 | // make sure the function doesn't have upvalues |
230 | char const* upname = lua_getupvalue( L, -1, 1); // settings on_state_create upval? | 229 | char const* upname = lua_getupvalue(L, -1, 1); // settings on_state_create upval? |
231 | if( upname != NULL) // should be "" for C functions with upvalues if any | 230 | if (upname != nullptr) // should be "" for C functions with upvalues if any |
232 | { | 231 | { |
233 | (void) luaL_error( L, "on_state_create shouldn't have upvalues"); | 232 | (void) luaL_error(L, "on_state_create shouldn't have upvalues"); |
234 | } | 233 | } |
235 | // remove this C function from the config table so that it doesn't cause problems | 234 | // remove this C function from the config table so that it doesn't cause problems |
236 | // when we transfer the config table in newly created Lua states | 235 | // when we transfer the config table in newly created Lua states |
237 | lua_pushnil( L); // settings on_state_create nil | 236 | lua_pushnil(L); // settings on_state_create nil |
238 | lua_setfield( L, -3, "on_state_create"); // settings on_state_create | 237 | lua_setfield(L, -3, "on_state_create"); // settings on_state_create |
239 | } | 238 | } |
240 | else | 239 | else |
241 | { | 240 | { |
@@ -243,113 +242,120 @@ void initialize_on_state_create( Universe* U, lua_State* L) | |||
243 | U->on_state_create_func = (lua_CFunction) initialize_on_state_create; | 242 | U->on_state_create_func = (lua_CFunction) initialize_on_state_create; |
244 | } | 243 | } |
245 | } | 244 | } |
246 | lua_pop( L, 1); // settings | 245 | lua_pop(L, 1); // settings |
247 | STACK_END( L, 0); | 246 | STACK_CHECK(L, 1); |
248 | } | 247 | } |
249 | 248 | ||
250 | lua_State* create_state( Universe* U, lua_State* from_) | 249 | // ################################################################################################# |
250 | |||
251 | lua_State* create_state(Universe* U, lua_State* from_) | ||
251 | { | 252 | { |
252 | lua_State* L; | 253 | lua_State* L; |
253 | #if LUAJIT_FLAVOR() == 64 | 254 | #if LUAJIT_FLAVOR() == 64 |
254 | // for some reason, LuaJIT 64 bits does not support creating a state with lua_newstate... | 255 | // for some reason, LuaJIT 64 bits does not support creating a state with lua_newstate... |
255 | L = luaL_newstate(); | 256 | L = luaL_newstate(); |
256 | #else // LUAJIT_FLAVOR() == 64 | 257 | #else // LUAJIT_FLAVOR() == 64 |
257 | if( U->provide_allocator != NULL) // we have a function we can call to obtain an allocator | 258 | if (U->provide_allocator != nullptr) // we have a function we can call to obtain an allocator |
258 | { | 259 | { |
259 | lua_pushcclosure( from_, U->provide_allocator, 0); | 260 | lua_pushcclosure( from_, U->provide_allocator, 0); |
260 | lua_call( from_, 0, 1); | 261 | lua_call( from_, 0, 1); |
261 | { | 262 | { |
262 | AllocatorDefinition* const def = lua_touserdata( from_, -1); | 263 | AllocatorDefinition* const def{ lua_tofulluserdata<AllocatorDefinition>(from_, -1) }; |
263 | L = lua_newstate( def->allocF, def->allocUD); | 264 | L = lua_newstate( def->m_allocF, def->m_allocUD); |
264 | } | 265 | } |
265 | lua_pop( from_, 1); | 266 | lua_pop( from_, 1); |
266 | } | 267 | } |
267 | else | 268 | else |
268 | { | 269 | { |
269 | // reuse the allocator provided when the master state was created | 270 | // reuse the allocator provided when the master state was created |
270 | L = lua_newstate( U->protected_allocator.definition.allocF, U->protected_allocator.definition.allocUD); | 271 | L = lua_newstate(U->protected_allocator.m_allocF, U->protected_allocator.m_allocUD); |
271 | } | 272 | } |
272 | #endif // LUAJIT_FLAVOR() == 64 | 273 | #endif // LUAJIT_FLAVOR() == 64 |
273 | 274 | ||
274 | if( L == NULL) | 275 | if (L == nullptr) |
275 | { | 276 | { |
276 | (void) luaL_error( from_, "luaG_newstate() failed while creating state; out of memory"); | 277 | luaL_error(from_, "luaG_newstate() failed while creating state; out of memory"); // doesn't return |
277 | } | 278 | } |
278 | return L; | 279 | return L; |
279 | } | 280 | } |
280 | 281 | ||
281 | void call_on_state_create( Universe* U, lua_State* L, lua_State* from_, LookupMode mode_) | 282 | // ################################################################################################# |
283 | |||
284 | void call_on_state_create(Universe* U, lua_State* L, lua_State* from_, LookupMode mode_) | ||
282 | { | 285 | { |
283 | if( U->on_state_create_func != NULL) | 286 | if (U->on_state_create_func != nullptr) |
284 | { | 287 | { |
285 | STACK_CHECK( L, 0); | 288 | STACK_CHECK_START_REL(L, 0); |
286 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "calling on_state_create()\n" INDENT_END)); | 289 | DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "calling on_state_create()\n" INDENT_END)); |
287 | if( U->on_state_create_func != (lua_CFunction) initialize_on_state_create) | 290 | if (U->on_state_create_func != (lua_CFunction) initialize_on_state_create) |
288 | { | 291 | { |
289 | // C function: recreate a closure in the new state, bypassing the lookup scheme | 292 | // C function: recreate a closure in the new state, bypassing the lookup scheme |
290 | lua_pushcfunction( L, U->on_state_create_func); // on_state_create() | 293 | lua_pushcfunction(L, U->on_state_create_func); // on_state_create() |
291 | } | 294 | } |
292 | else // Lua function located in the config table, copied when we opened "lanes.core" | 295 | else // Lua function located in the config table, copied when we opened "lanes.core" |
293 | { | 296 | { |
294 | if( mode_ != eLM_LaneBody) | 297 | if (mode_ != LookupMode::LaneBody) |
295 | { | 298 | { |
296 | // if attempting to call in a keeper state, do nothing because the function doesn't exist there | 299 | // if attempting to call in a keeper state, do nothing because the function doesn't exist there |
297 | // this doesn't count as an error though | 300 | // this doesn't count as an error though |
301 | STACK_CHECK(L, 0); | ||
298 | return; | 302 | return; |
299 | } | 303 | } |
300 | REGISTRY_GET( L, CONFIG_REGKEY); // {} | 304 | CONFIG_REGKEY.pushValue(L); // {} |
301 | STACK_MID( L, 1); | 305 | STACK_CHECK(L, 1); |
302 | lua_getfield( L, -1, "on_state_create"); // {} on_state_create() | 306 | lua_getfield(L, -1, "on_state_create"); // {} on_state_create() |
303 | lua_remove( L, -2); // on_state_create() | 307 | lua_remove(L, -2); // on_state_create() |
304 | } | 308 | } |
305 | STACK_MID( L, 1); | 309 | STACK_CHECK(L, 1); |
306 | // capture error and raise it in caller state | 310 | // capture error and raise it in caller state |
307 | if( lua_pcall( L, 0, 0, 0) != LUA_OK) | 311 | if (lua_pcall(L, 0, 0, 0) != LUA_OK) |
308 | { | 312 | { |
309 | luaL_error( from_, "on_state_create failed: \"%s\"", lua_isstring( L, -1) ? lua_tostring( L, -1) : lua_typename( L, lua_type( L, -1))); | 313 | luaL_error(from_, "on_state_create failed: \"%s\"", lua_isstring(L, -1) ? lua_tostring(L, -1) : lua_typename(L, lua_type(L, -1))); |
310 | } | 314 | } |
311 | STACK_END( L, 0); | 315 | STACK_CHECK(L, 0); |
312 | } | 316 | } |
313 | } | 317 | } |
314 | 318 | ||
319 | // ################################################################################################# | ||
320 | |||
315 | /* | 321 | /* |
316 | * Like 'luaL_openlibs()' but allows the set of libraries be selected | 322 | * Like 'luaL_openlibs()' but allows the set of libraries be selected |
317 | * | 323 | * |
318 | * NULL no libraries, not even base | 324 | * nullptr no libraries, not even base |
319 | * "" base library only | 325 | * "" base library only |
320 | * "io,string" named libraries | 326 | * "io,string" named libraries |
321 | * "*" all libraries | 327 | * "*" all libraries |
322 | * | 328 | * |
323 | * Base ("unpack", "print" etc.) is always added, unless 'libs' is NULL. | 329 | * Base ("unpack", "print" etc.) is always added, unless 'libs' is nullptr. |
324 | * | 330 | * |
325 | * *NOT* called for keeper states! | 331 | * *NOT* called for keeper states! |
326 | * | 332 | * |
327 | */ | 333 | */ |
328 | lua_State* luaG_newstate( Universe* U, lua_State* from_, char const* libs_) | 334 | lua_State* luaG_newstate(Universe* U, Source from_, char const* libs_) |
329 | { | 335 | { |
330 | lua_State* L = create_state( U, from_); | 336 | Dest const L{ create_state(U, from_) }; |
331 | 337 | ||
332 | STACK_GROW( L, 2); | 338 | STACK_GROW(L, 2); |
333 | STACK_CHECK_ABS( L, 0); | 339 | STACK_CHECK_START_ABS(L, 0); |
334 | 340 | ||
335 | // copy the universe as a light userdata (only the master state holds the full userdata) | 341 | // copy the universe as a light userdata (only the master state holds the full userdata) |
336 | // that way, if Lanes is required in this new state, we'll know we are part of this universe | 342 | // that way, if Lanes is required in this new state, we'll know we are part of this universe |
337 | universe_store( L, U); | 343 | universe_store( L, U); |
338 | STACK_MID( L, 0); | 344 | STACK_CHECK(L, 0); |
339 | 345 | ||
340 | // we'll need this every time we transfer some C function from/to this state | 346 | // we'll need this every time we transfer some C function from/to this state |
341 | REGISTRY_SET( L, LOOKUP_REGKEY, lua_newtable( L)); | 347 | LOOKUP_REGKEY.setValue(L, [](lua_State* L) { lua_newtable(L); }); |
342 | STACK_MID( L, 0); | 348 | STACK_CHECK(L, 0); |
343 | 349 | ||
344 | // neither libs (not even 'base') nor special init func: we are done | 350 | // neither libs (not even 'base') nor special init func: we are done |
345 | if( libs_ == NULL && U->on_state_create_func == NULL) | 351 | if (libs_ == nullptr && U->on_state_create_func == nullptr) |
346 | { | 352 | { |
347 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "luaG_newstate(NULL)\n" INDENT_END)); | 353 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "luaG_newstate(nullptr)\n" INDENT_END)); |
348 | return L; | 354 | return L; |
349 | } | 355 | } |
350 | 356 | ||
351 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "luaG_newstate()\n" INDENT_END)); | 357 | DEBUGSPEW_CODE(fprintf( stderr, INDENT_BEGIN "luaG_newstate()\n" INDENT_END)); |
352 | DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); | 358 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed)); |
353 | 359 | ||
354 | // copy settings (for example because it may contain a Lua on_state_create function) | 360 | // copy settings (for example because it may contain a Lua on_state_create function) |
355 | copy_one_time_settings( U, from_, L); | 361 | copy_one_time_settings( U, from_, L); |
@@ -360,7 +366,7 @@ lua_State* luaG_newstate( Universe* U, lua_State* from_, char const* libs_) | |||
360 | 366 | ||
361 | // Anything causes 'base' to be taken in | 367 | // Anything causes 'base' to be taken in |
362 | // | 368 | // |
363 | if( libs_ != NULL) | 369 | if (libs_ != nullptr) |
364 | { | 370 | { |
365 | // special "*" case (mainly to help with LuaJIT compatibility) | 371 | // special "*" case (mainly to help with LuaJIT compatibility) |
366 | // as we are called from luaopen_lanes_core() already, and that would deadlock | 372 | // as we are called from luaopen_lanes_core() already, and that would deadlock |
@@ -370,7 +376,7 @@ lua_State* luaG_newstate( Universe* U, lua_State* from_, char const* libs_) | |||
370 | luaL_openlibs( L); | 376 | luaL_openlibs( L); |
371 | // don't forget lanes.core for regular lane states | 377 | // don't forget lanes.core for regular lane states |
372 | open1lib( DEBUGSPEW_PARAM_COMMA( U) L, "lanes.core", 10); | 378 | open1lib( DEBUGSPEW_PARAM_COMMA( U) L, "lanes.core", 10); |
373 | libs_ = NULL; // done with libs | 379 | libs_ = nullptr; // done with libs |
374 | } | 380 | } |
375 | else | 381 | else |
376 | { | 382 | { |
@@ -386,7 +392,7 @@ lua_State* luaG_newstate( Universe* U, lua_State* from_, char const* libs_) | |||
386 | #endif // LUA_VERSION_NUM | 392 | #endif // LUA_VERSION_NUM |
387 | } | 393 | } |
388 | } | 394 | } |
389 | STACK_END( L, 0); | 395 | STACK_CHECK(L, 0); |
390 | 396 | ||
391 | // scan all libraries, open them one by one | 397 | // scan all libraries, open them one by one |
392 | if( libs_) | 398 | if( libs_) |
@@ -412,31 +418,32 @@ lua_State* luaG_newstate( Universe* U, lua_State* from_, char const* libs_) | |||
412 | 418 | ||
413 | // call this after the base libraries are loaded and GC is restarted | 419 | // call this after the base libraries are loaded and GC is restarted |
414 | // will raise an error in from_ in case of problem | 420 | // will raise an error in from_ in case of problem |
415 | call_on_state_create( U, L, from_, eLM_LaneBody); | 421 | call_on_state_create(U, L, from_, LookupMode::LaneBody); |
416 | 422 | ||
417 | STACK_CHECK( L, 0); | 423 | STACK_CHECK(L, 0); |
418 | // after all this, register everything we find in our name<->function database | 424 | // after all this, register everything we find in our name<->function database |
419 | lua_pushglobaltable( L); // Lua 5.2 no longer has LUA_GLOBALSINDEX: we must push globals table on the stack | 425 | lua_pushglobaltable( L); // Lua 5.2 no longer has LUA_GLOBALSINDEX: we must push globals table on the stack |
420 | populate_func_lookup_table( L, -1, NULL); | 426 | STACK_CHECK(L, 1); |
427 | populate_func_lookup_table(L, -1, nullptr); | ||
421 | 428 | ||
422 | #if 0 && USE_DEBUG_SPEW() | 429 | #if 0 && USE_DEBUG_SPEW() |
423 | // dump the lookup database contents | 430 | // dump the lookup database contents |
424 | lua_getfield( L, LUA_REGISTRYINDEX, LOOKUP_REGKEY); // {} | 431 | lua_getfield(L, LUA_REGISTRYINDEX, LOOKUP_REGKEY); // {} |
425 | lua_pushnil( L); // {} nil | 432 | lua_pushnil(L); // {} nil |
426 | while( lua_next( L, -2)) // {} k v | 433 | while (lua_next(L, -2)) // {} k v |
427 | { | 434 | { |
428 | lua_getglobal( L, "print"); // {} k v print | 435 | lua_getglobal(L, "print"); // {} k v print |
429 | lua_pushlstring( L, debugspew_indent, U->debugspew_indent_depth); // {} k v print " " | 436 | lua_pushlstring(L, debugspew_indent, U->debugspew_indent_depth.load(std::memory_order_relaxed)); // {} k v print " " |
430 | lua_pushvalue( L, -4); // {} k v print " " k | 437 | lua_pushvalue(L, -4); // {} k v print " " k |
431 | lua_pushvalue( L, -4); // {} k v print " " k v | 438 | lua_pushvalue(L, -4); // {} k v print " " k v |
432 | lua_call( L, 3, 0); // {} k v | 439 | lua_call(L, 3, 0); // {} k v |
433 | lua_pop( L, 1); // {} k | 440 | lua_pop(L, 1); // {} k |
434 | } | 441 | } |
435 | lua_pop( L, 1); // {} | 442 | lua_pop(L, 1); // {} |
436 | #endif // USE_DEBUG_SPEW() | 443 | #endif // USE_DEBUG_SPEW() |
437 | 444 | ||
438 | lua_pop( L, 1); | 445 | lua_pop(L, 1); |
439 | STACK_END( L, 0); | 446 | STACK_CHECK(L, 0); |
440 | DEBUGSPEW_CODE( -- U->debugspew_indent_depth); | 447 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed)); |
441 | return L; | 448 | return L; |
442 | } | 449 | } |
diff --git a/src/state.h b/src/state.h index e844405..e1c311a 100644 --- a/src/state.h +++ b/src/state.h | |||
@@ -1,22 +1,19 @@ | |||
1 | #ifndef __LANES_STATE_H__ | 1 | #pragma once |
2 | #define __LANES_STATE_H__ | ||
3 | |||
4 | //#include "lauxlib.h" | ||
5 | #include "threading.h" | ||
6 | #include "deep.h" | ||
7 | 2 | ||
8 | #include "macros_and_utils.h" | 3 | #include "macros_and_utils.h" |
9 | 4 | ||
10 | void serialize_require( DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State *L); | 5 | // forwards |
6 | enum class LookupMode; | ||
7 | class Universe; | ||
8 | |||
9 | void serialize_require(DEBUGSPEW_PARAM_COMMA(Universe* U) lua_State* L); | ||
11 | 10 | ||
12 | // ################################################################################################ | 11 | // ################################################################################################ |
13 | 12 | ||
14 | lua_State* create_state( Universe* U, lua_State* from_); | 13 | [[nodiscard]] lua_State* create_state(Universe* U, lua_State* from_); |
15 | lua_State* luaG_newstate( Universe* U, lua_State* _from, char const* libs); | 14 | [[nodiscard]] lua_State* luaG_newstate(Universe* U, Source _from, char const* libs); |
16 | 15 | ||
17 | // ################################################################################################ | 16 | // ################################################################################################ |
18 | 17 | ||
19 | void initialize_on_state_create( Universe* U, lua_State* L); | 18 | void initialize_on_state_create(Universe* U, lua_State* L); |
20 | void call_on_state_create( Universe* U, lua_State* L, lua_State* from_, LookupMode mode_); | 19 | void call_on_state_create(Universe* U, lua_State* L, lua_State* from_, LookupMode mode_); |
21 | |||
22 | #endif // __LANES_STATE_H__ | ||
diff --git a/src/threading.c b/src/threading.c deleted file mode 100644 index 2464d03..0000000 --- a/src/threading.c +++ /dev/null | |||
@@ -1,1041 +0,0 @@ | |||
1 | /* | ||
2 | * THREADING.C Copyright (c) 2007-08, Asko Kauppi | ||
3 | * Copyright (C) 2009-19, Benoit Germain | ||
4 | * | ||
5 | * Lua Lanes OS threading specific code. | ||
6 | * | ||
7 | * References: | ||
8 | * <http://www.cse.wustl.edu/~schmidt/win32-cv-1.html> | ||
9 | */ | ||
10 | |||
11 | /* | ||
12 | =============================================================================== | ||
13 | |||
14 | Copyright (C) 2007-10 Asko Kauppi <akauppi@gmail.com> | ||
15 | Copyright (C) 2009-14, Benoit Germain <bnt.germain@gmail.com> | ||
16 | |||
17 | Permission is hereby granted, free of charge, to any person obtaining a copy | ||
18 | of this software and associated documentation files (the "Software"), to deal | ||
19 | in the Software without restriction, including without limitation the rights | ||
20 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||
21 | copies of the Software, and to permit persons to whom the Software is | ||
22 | furnished to do so, subject to the following conditions: | ||
23 | |||
24 | The above copyright notice and this permission notice shall be included in | ||
25 | all copies or substantial portions of the Software. | ||
26 | |||
27 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
28 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
29 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
30 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
31 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
32 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | ||
33 | THE SOFTWARE. | ||
34 | |||
35 | =============================================================================== | ||
36 | */ | ||
37 | #if defined(__linux__) | ||
38 | |||
39 | # ifndef _GNU_SOURCE // definition by the makefile can cause a redefinition error | ||
40 | # define _GNU_SOURCE // must be defined before any include | ||
41 | # endif // _GNU_SOURCE | ||
42 | |||
43 | # ifdef __ANDROID__ | ||
44 | # include <android/log.h> | ||
45 | # define LOG_TAG "LuaLanes" | ||
46 | # endif // __ANDROID__ | ||
47 | |||
48 | #endif // __linux__ | ||
49 | |||
50 | #include <stdio.h> | ||
51 | #include <stdlib.h> | ||
52 | #include <assert.h> | ||
53 | #include <errno.h> | ||
54 | #include <math.h> | ||
55 | |||
56 | #include "threading.h" | ||
57 | |||
58 | #if !defined( PLATFORM_XBOX) && !defined( PLATFORM_WIN32) && !defined( PLATFORM_POCKETPC) | ||
59 | # include <sys/time.h> | ||
60 | #endif // non-WIN32 timing | ||
61 | |||
62 | |||
63 | #if defined(PLATFORM_LINUX) || defined(PLATFORM_CYGWIN) | ||
64 | # include <sys/types.h> | ||
65 | # include <unistd.h> | ||
66 | #endif | ||
67 | |||
68 | /* Linux needs to check, whether it's been run as root | ||
69 | */ | ||
70 | #ifdef PLATFORM_LINUX | ||
71 | volatile bool_t sudo; | ||
72 | #endif | ||
73 | |||
74 | #ifdef PLATFORM_OSX | ||
75 | # include "threading_osx.h" | ||
76 | #endif | ||
77 | |||
78 | /* Linux with older glibc (such as Debian) don't have pthread_setname_np, but have prctl | ||
79 | */ | ||
80 | #if defined PLATFORM_LINUX | ||
81 | #if defined __GNU_LIBRARY__ && __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 12 | ||
82 | #define LINUX_USE_PTHREAD_SETNAME_NP 1 | ||
83 | #else // glibc without pthread_setname_np | ||
84 | #include <sys/prctl.h> | ||
85 | #define LINUX_USE_PTHREAD_SETNAME_NP 0 | ||
86 | #endif // glibc without pthread_setname_np | ||
87 | #endif // PLATFORM_LINUX | ||
88 | |||
89 | #ifdef _MSC_VER | ||
90 | // ".. selected for automatic inline expansion" (/O2 option) | ||
91 | # pragma warning( disable : 4711 ) | ||
92 | // ".. type cast from function pointer ... to data pointer" | ||
93 | # pragma warning( disable : 4054 ) | ||
94 | #endif | ||
95 | |||
96 | //#define THREAD_CREATE_RETRIES_MAX 20 | ||
97 | // loops (maybe retry forever?) | ||
98 | |||
99 | /* | ||
100 | * FAIL is for unexpected API return values - essentially programming | ||
101 | * error in _this_ code. | ||
102 | */ | ||
103 | #if defined( PLATFORM_XBOX) || defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC) | ||
104 | static void FAIL( char const* funcname, int rc) | ||
105 | { | ||
106 | #if defined( PLATFORM_XBOX) | ||
107 | fprintf( stderr, "%s() failed! (%d)\n", funcname, rc ); | ||
108 | #else // PLATFORM_XBOX | ||
109 | char buf[256]; | ||
110 | FormatMessageA( FORMAT_MESSAGE_FROM_SYSTEM, NULL, rc, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), buf, 256, NULL); | ||
111 | fprintf( stderr, "%s() failed! [GetLastError() -> %d] '%s'", funcname, rc, buf); | ||
112 | #endif // PLATFORM_XBOX | ||
113 | #ifdef _MSC_VER | ||
114 | __debugbreak(); // give a chance to the debugger! | ||
115 | #endif // _MSC_VER | ||
116 | abort(); | ||
117 | } | ||
118 | #endif // win32 build | ||
119 | |||
120 | |||
121 | /* | ||
122 | * Returns millisecond timing (in seconds) for the current time. | ||
123 | * | ||
124 | * Note: This function should be called once in single-threaded mode in Win32, | ||
125 | * to get it initialized. | ||
126 | */ | ||
127 | time_d now_secs(void) { | ||
128 | |||
129 | #if defined( PLATFORM_XBOX) || defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC) | ||
130 | /* | ||
131 | * Windows FILETIME values are "100-nanosecond intervals since | ||
132 | * January 1, 1601 (UTC)" (MSDN). Well, we'd want Unix Epoch as | ||
133 | * the offset and it seems, so would they: | ||
134 | * | ||
135 | * <http://msdn.microsoft.com/en-us/library/ms724928(VS.85).aspx> | ||
136 | */ | ||
137 | SYSTEMTIME st; | ||
138 | FILETIME ft; | ||
139 | ULARGE_INTEGER uli; | ||
140 | static ULARGE_INTEGER uli_epoch; // Jan 1st 1970 0:0:0 | ||
141 | |||
142 | if (uli_epoch.HighPart==0) { | ||
143 | st.wYear= 1970; | ||
144 | st.wMonth= 1; // Jan | ||
145 | st.wDay= 1; | ||
146 | st.wHour= st.wMinute= st.wSecond= st.wMilliseconds= 0; | ||
147 | |||
148 | if (!SystemTimeToFileTime( &st, &ft )) | ||
149 | FAIL( "SystemTimeToFileTime", GetLastError() ); | ||
150 | |||
151 | uli_epoch.LowPart= ft.dwLowDateTime; | ||
152 | uli_epoch.HighPart= ft.dwHighDateTime; | ||
153 | } | ||
154 | |||
155 | GetSystemTime( &st ); // current system date/time in UTC | ||
156 | if (!SystemTimeToFileTime( &st, &ft )) | ||
157 | FAIL( "SystemTimeToFileTime", GetLastError() ); | ||
158 | |||
159 | uli.LowPart= ft.dwLowDateTime; | ||
160 | uli.HighPart= ft.dwHighDateTime; | ||
161 | |||
162 | /* 'double' has less accuracy than 64-bit int, but if it were to degrade, | ||
163 | * it would do so gracefully. In practice, the integer accuracy is not | ||
164 | * of the 100ns class but just 1ms (Windows XP). | ||
165 | */ | ||
166 | # if 1 | ||
167 | // >= 2.0.3 code | ||
168 | return (double) ((uli.QuadPart - uli_epoch.QuadPart)/10000) / 1000.0; | ||
169 | # elif 0 | ||
170 | // fix from Kriss Daniels, see: | ||
171 | // <http://luaforge.net/forum/forum.php?thread_id=22704&forum_id=1781> | ||
172 | // | ||
173 | // "seem to be getting negative numbers from the old version, probably number | ||
174 | // conversion clipping, this fixes it and maintains ms resolution" | ||
175 | // | ||
176 | // This was a bad fix, and caused timer test 5 sec timers to disappear. | ||
177 | // --AKa 25-Jan-2009 | ||
178 | // | ||
179 | return ((double)((signed)((uli.QuadPart/10000) - (uli_epoch.QuadPart/10000)))) / 1000.0; | ||
180 | # else | ||
181 | // <= 2.0.2 code | ||
182 | return (double)(uli.QuadPart - uli_epoch.QuadPart) / 10000000.0; | ||
183 | # endif | ||
184 | #else // !(defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC)) | ||
185 | struct timeval tv; | ||
186 | // { | ||
187 | // time_t tv_sec; /* seconds since Jan. 1, 1970 */ | ||
188 | // suseconds_t tv_usec; /* and microseconds */ | ||
189 | // }; | ||
190 | |||
191 | int rc= gettimeofday( &tv, NULL /*time zone not used any more (in Linux)*/ ); | ||
192 | assert( rc==0 ); | ||
193 | |||
194 | return ((double)tv.tv_sec) + ((tv.tv_usec)/1000) / 1000.0; | ||
195 | #endif // !(defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC)) | ||
196 | } | ||
197 | |||
198 | |||
199 | /* | ||
200 | */ | ||
201 | time_d SIGNAL_TIMEOUT_PREPARE( double secs ) { | ||
202 | if (secs<=0.0) return secs; | ||
203 | else return now_secs() + secs; | ||
204 | } | ||
205 | |||
206 | |||
207 | #if THREADAPI == THREADAPI_PTHREAD | ||
208 | /* | ||
209 | * Prepare 'abs_secs' kind of timeout to 'timespec' format | ||
210 | */ | ||
211 | static void prepare_timeout( struct timespec *ts, time_d abs_secs ) { | ||
212 | assert(ts); | ||
213 | assert( abs_secs >= 0.0 ); | ||
214 | |||
215 | if (abs_secs==0.0) | ||
216 | abs_secs= now_secs(); | ||
217 | |||
218 | ts->tv_sec= (time_t) floor( abs_secs ); | ||
219 | ts->tv_nsec= ((long)((abs_secs - ts->tv_sec) * 1000.0 +0.5)) * 1000000UL; // 1ms = 1000000ns | ||
220 | if (ts->tv_nsec == 1000000000UL) | ||
221 | { | ||
222 | ts->tv_nsec = 0; | ||
223 | ts->tv_sec = ts->tv_sec + 1; | ||
224 | } | ||
225 | } | ||
226 | #endif // THREADAPI == THREADAPI_PTHREAD | ||
227 | |||
228 | |||
229 | /*---=== Threading ===---*/ | ||
230 | |||
231 | //--- | ||
232 | // It may be meaningful to explicitly limit the new threads' C stack size. | ||
233 | // We should know how much Lua needs in the C stack, all Lua side allocations | ||
234 | // are done in heap so they don't count. | ||
235 | // | ||
236 | // Consequence of _not_ limiting the stack is running out of virtual memory | ||
237 | // with 1000-5000 threads on 32-bit systems. | ||
238 | // | ||
239 | // Note: using external C modules may be affected by the stack size check. | ||
240 | // if having problems, set back to '0' (default stack size of the system). | ||
241 | // | ||
242 | // Win32: 64K (?) | ||
243 | // Win64: xxx | ||
244 | // | ||
245 | // Linux x86: 2MB Ubuntu 7.04 via 'pthread_getstacksize()' | ||
246 | // Linux x64: xxx | ||
247 | // Linux ARM: xxx | ||
248 | // | ||
249 | // OS X 10.4.9: 512K <http://developer.apple.com/qa/qa2005/qa1419.html> | ||
250 | // valid values N * 4KB | ||
251 | // | ||
252 | #ifndef _THREAD_STACK_SIZE | ||
253 | # if defined( PLATFORM_XBOX) || defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC) || defined( PLATFORM_CYGWIN) | ||
254 | # define _THREAD_STACK_SIZE 0 | ||
255 | // Win32: does it work with less? | ||
256 | # elif (defined PLATFORM_OSX) | ||
257 | # define _THREAD_STACK_SIZE (524288/2) // 262144 | ||
258 | // OS X: "make test" works on 65536 and even below | ||
259 | // "make perftest" works on >= 4*65536 == 262144 (not 3*65536) | ||
260 | # elif (defined PLATFORM_LINUX) && (defined __i386) | ||
261 | # define _THREAD_STACK_SIZE (2097152/16) // 131072 | ||
262 | // Linux x86 (Ubuntu 7.04): "make perftest" works on /16 (not on /32) | ||
263 | # elif (defined PLATFORM_BSD) && (defined __i386) | ||
264 | # define _THREAD_STACK_SIZE (1048576/8) // 131072 | ||
265 | // FreeBSD 6.2 SMP i386: ("gmake perftest" works on /8 (not on /16) | ||
266 | # endif | ||
267 | #endif | ||
268 | |||
269 | #if THREADAPI == THREADAPI_WINDOWS | ||
270 | |||
271 | #if _WIN32_WINNT < 0x0600 // CONDITION_VARIABLE aren't available | ||
272 | // | ||
273 | void MUTEX_INIT( MUTEX_T *ref ) { | ||
274 | *ref= CreateMutex( NULL /*security attr*/, FALSE /*not locked*/, NULL ); | ||
275 | if (!ref) FAIL( "CreateMutex", GetLastError() ); | ||
276 | } | ||
277 | void MUTEX_FREE( MUTEX_T *ref ) { | ||
278 | if (!CloseHandle(*ref)) FAIL( "CloseHandle (mutex)", GetLastError() ); | ||
279 | *ref= NULL; | ||
280 | } | ||
281 | void MUTEX_LOCK( MUTEX_T *ref ) | ||
282 | { | ||
283 | DWORD rc = WaitForSingleObject( *ref, INFINITE); | ||
284 | // ERROR_WAIT_NO_CHILDREN means a thread was killed (lane terminated because of error raised during a linda transfer for example) while having grabbed this mutex | ||
285 | // this is not a big problem as we will grab it just the same, so ignore this particular error | ||
286 | if( rc != 0 && rc != ERROR_WAIT_NO_CHILDREN) | ||
287 | FAIL( "WaitForSingleObject", (rc == WAIT_FAILED) ? GetLastError() : rc); | ||
288 | } | ||
289 | void MUTEX_UNLOCK( MUTEX_T *ref ) { | ||
290 | if (!ReleaseMutex(*ref)) | ||
291 | FAIL( "ReleaseMutex", GetLastError() ); | ||
292 | } | ||
293 | #endif // CONDITION_VARIABLE aren't available | ||
294 | |||
295 | static int const gs_prio_remap[] = | ||
296 | { | ||
297 | THREAD_PRIORITY_IDLE, | ||
298 | THREAD_PRIORITY_LOWEST, | ||
299 | THREAD_PRIORITY_BELOW_NORMAL, | ||
300 | THREAD_PRIORITY_NORMAL, | ||
301 | THREAD_PRIORITY_ABOVE_NORMAL, | ||
302 | THREAD_PRIORITY_HIGHEST, | ||
303 | THREAD_PRIORITY_TIME_CRITICAL | ||
304 | }; | ||
305 | |||
306 | /* MSDN: "If you would like to use the CRT in ThreadProc, use the | ||
307 | _beginthreadex function instead (of CreateThread)." | ||
308 | MSDN: "you can create at most 2028 threads" | ||
309 | */ | ||
310 | // Note: Visual C++ requires '__stdcall' where it is | ||
311 | void THREAD_CREATE( THREAD_T* ref, THREAD_RETURN_T (__stdcall *func)( void*), void* data, int prio /* -3..+3 */) | ||
312 | { | ||
313 | HANDLE h = (HANDLE) _beginthreadex( NULL, // security | ||
314 | _THREAD_STACK_SIZE, | ||
315 | func, | ||
316 | data, | ||
317 | 0, // flags (0/CREATE_SUSPENDED) | ||
318 | NULL // thread id (not used) | ||
319 | ); | ||
320 | |||
321 | if( h == NULL) // _beginthreadex returns 0L on failure instead of -1L (like _beginthread) | ||
322 | { | ||
323 | FAIL( "CreateThread", GetLastError()); | ||
324 | } | ||
325 | |||
326 | if (prio != THREAD_PRIO_DEFAULT) | ||
327 | { | ||
328 | if (!SetThreadPriority( h, gs_prio_remap[prio + 3])) | ||
329 | { | ||
330 | FAIL( "SetThreadPriority", GetLastError()); | ||
331 | } | ||
332 | } | ||
333 | |||
334 | *ref = h; | ||
335 | } | ||
336 | |||
337 | |||
338 | void THREAD_SET_PRIORITY( int prio) | ||
339 | { | ||
340 | // prio range [-3,+3] was checked by the caller | ||
341 | if (!SetThreadPriority( GetCurrentThread(), gs_prio_remap[prio + 3])) | ||
342 | { | ||
343 | FAIL( "THREAD_SET_PRIORITY", GetLastError()); | ||
344 | } | ||
345 | } | ||
346 | |||
347 | void THREAD_SET_AFFINITY( unsigned int aff) | ||
348 | { | ||
349 | if( !SetThreadAffinityMask( GetCurrentThread(), aff)) | ||
350 | { | ||
351 | FAIL( "THREAD_SET_AFFINITY", GetLastError()); | ||
352 | } | ||
353 | } | ||
354 | |||
355 | bool_t THREAD_WAIT_IMPL( THREAD_T *ref, double secs) | ||
356 | { | ||
357 | DWORD ms = (secs<0.0) ? INFINITE : (DWORD)((secs*1000.0)+0.5); | ||
358 | |||
359 | DWORD rc= WaitForSingleObject( *ref, ms /*timeout*/ ); | ||
360 | // | ||
361 | // (WAIT_ABANDONED) | ||
362 | // WAIT_OBJECT_0 success (0) | ||
363 | // WAIT_TIMEOUT | ||
364 | // WAIT_FAILED more info via GetLastError() | ||
365 | |||
366 | if (rc == WAIT_TIMEOUT) return FALSE; | ||
367 | if( rc !=0) FAIL( "WaitForSingleObject", rc==WAIT_FAILED ? GetLastError() : rc); | ||
368 | *ref= NULL; // thread no longer usable | ||
369 | return TRUE; | ||
370 | } | ||
371 | // | ||
372 | void THREAD_KILL( THREAD_T *ref ) | ||
373 | { | ||
374 | // nonexistent on Xbox360, simply disable until a better solution is found | ||
375 | #if !defined( PLATFORM_XBOX) | ||
376 | // in theory no-one should call this as it is very dangerous (memory and mutex leaks, no notification of DLLs, etc.) | ||
377 | if (!TerminateThread( *ref, 0 )) FAIL("TerminateThread", GetLastError()); | ||
378 | #endif // PLATFORM_XBOX | ||
379 | *ref= NULL; | ||
380 | } | ||
381 | |||
382 | void THREAD_MAKE_ASYNCH_CANCELLABLE() {} // nothing to do for windows threads, we can cancel them anytime we want | ||
383 | |||
384 | #if !defined __GNUC__ | ||
385 | //see http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx | ||
386 | #define MS_VC_EXCEPTION 0x406D1388 | ||
387 | #pragma pack(push,8) | ||
388 | typedef struct tagTHREADNAME_INFO | ||
389 | { | ||
390 | DWORD dwType; // Must be 0x1000. | ||
391 | LPCSTR szName; // Pointer to name (in user addr space). | ||
392 | DWORD dwThreadID; // Thread ID (-1=caller thread). | ||
393 | DWORD dwFlags; // Reserved for future use, must be zero. | ||
394 | } THREADNAME_INFO; | ||
395 | #pragma pack(pop) | ||
396 | #endif // !__GNUC__ | ||
397 | |||
398 | void THREAD_SETNAME( char const* _name) | ||
399 | { | ||
400 | #if !defined __GNUC__ | ||
401 | THREADNAME_INFO info; | ||
402 | info.dwType = 0x1000; | ||
403 | info.szName = _name; | ||
404 | info.dwThreadID = GetCurrentThreadId(); | ||
405 | info.dwFlags = 0; | ||
406 | |||
407 | __try | ||
408 | { | ||
409 | RaiseException( MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(ULONG_PTR), (ULONG_PTR*)&info ); | ||
410 | } | ||
411 | __except(EXCEPTION_EXECUTE_HANDLER) | ||
412 | { | ||
413 | } | ||
414 | #endif // !__GNUC__ | ||
415 | } | ||
416 | |||
417 | #if _WIN32_WINNT < 0x0600 // CONDITION_VARIABLE aren't available | ||
418 | |||
419 | void SIGNAL_INIT( SIGNAL_T* ref) | ||
420 | { | ||
421 | InitializeCriticalSection( &ref->signalCS); | ||
422 | InitializeCriticalSection( &ref->countCS); | ||
423 | if( 0 == (ref->waitEvent = CreateEvent( 0, TRUE, FALSE, 0))) // manual-reset | ||
424 | FAIL( "CreateEvent", GetLastError()); | ||
425 | if( 0 == (ref->waitDoneEvent = CreateEvent( 0, FALSE, FALSE, 0))) // auto-reset | ||
426 | FAIL( "CreateEvent", GetLastError()); | ||
427 | ref->waitersCount = 0; | ||
428 | } | ||
429 | |||
430 | void SIGNAL_FREE( SIGNAL_T* ref) | ||
431 | { | ||
432 | CloseHandle( ref->waitDoneEvent); | ||
433 | CloseHandle( ref->waitEvent); | ||
434 | DeleteCriticalSection( &ref->countCS); | ||
435 | DeleteCriticalSection( &ref->signalCS); | ||
436 | } | ||
437 | |||
438 | bool_t SIGNAL_WAIT( SIGNAL_T* ref, MUTEX_T* mu_ref, time_d abs_secs) | ||
439 | { | ||
440 | DWORD errc; | ||
441 | DWORD ms; | ||
442 | |||
443 | if( abs_secs < 0.0) | ||
444 | ms = INFINITE; | ||
445 | else if( abs_secs == 0.0) | ||
446 | ms = 0; | ||
447 | else | ||
448 | { | ||
449 | time_d msd = (abs_secs - now_secs()) * 1000.0 + 0.5; | ||
450 | // If the time already passed, still try once (ms==0). A short timeout | ||
451 | // may have turned negative or 0 because of the two time samples done. | ||
452 | ms = msd <= 0.0 ? 0 : (DWORD)msd; | ||
453 | } | ||
454 | |||
455 | EnterCriticalSection( &ref->signalCS); | ||
456 | EnterCriticalSection( &ref->countCS); | ||
457 | ++ ref->waitersCount; | ||
458 | LeaveCriticalSection( &ref->countCS); | ||
459 | LeaveCriticalSection( &ref->signalCS); | ||
460 | |||
461 | errc = SignalObjectAndWait( *mu_ref, ref->waitEvent, ms, FALSE); | ||
462 | |||
463 | EnterCriticalSection( &ref->countCS); | ||
464 | if( 0 == -- ref->waitersCount) | ||
465 | { | ||
466 | // we're the last one leaving... | ||
467 | ResetEvent( ref->waitEvent); | ||
468 | SetEvent( ref->waitDoneEvent); | ||
469 | } | ||
470 | LeaveCriticalSection( &ref->countCS); | ||
471 | MUTEX_LOCK( mu_ref); | ||
472 | |||
473 | switch( errc) | ||
474 | { | ||
475 | case WAIT_TIMEOUT: | ||
476 | return FALSE; | ||
477 | case WAIT_OBJECT_0: | ||
478 | return TRUE; | ||
479 | } | ||
480 | |||
481 | FAIL( "SignalObjectAndWait", GetLastError()); | ||
482 | return FALSE; | ||
483 | } | ||
484 | |||
485 | void SIGNAL_ALL( SIGNAL_T* ref) | ||
486 | { | ||
487 | DWORD errc = WAIT_OBJECT_0; | ||
488 | |||
489 | EnterCriticalSection( &ref->signalCS); | ||
490 | EnterCriticalSection( &ref->countCS); | ||
491 | |||
492 | if( ref->waitersCount > 0) | ||
493 | { | ||
494 | ResetEvent( ref->waitDoneEvent); | ||
495 | SetEvent( ref->waitEvent); | ||
496 | LeaveCriticalSection( &ref->countCS); | ||
497 | errc = WaitForSingleObject( ref->waitDoneEvent, INFINITE); | ||
498 | } | ||
499 | else | ||
500 | { | ||
501 | LeaveCriticalSection( &ref->countCS); | ||
502 | } | ||
503 | |||
504 | LeaveCriticalSection( &ref->signalCS); | ||
505 | |||
506 | if( WAIT_OBJECT_0 != errc) | ||
507 | FAIL( "WaitForSingleObject", GetLastError()); | ||
508 | } | ||
509 | |||
510 | #else // CONDITION_VARIABLE are available, use them | ||
511 | |||
512 | // | ||
513 | void SIGNAL_INIT( SIGNAL_T *ref ) | ||
514 | { | ||
515 | InitializeConditionVariable( ref); | ||
516 | } | ||
517 | |||
518 | void SIGNAL_FREE( SIGNAL_T *ref ) | ||
519 | { | ||
520 | // nothing to do | ||
521 | (void)ref; | ||
522 | } | ||
523 | |||
524 | bool_t SIGNAL_WAIT( SIGNAL_T *ref, MUTEX_T *mu_ref, time_d abs_secs) | ||
525 | { | ||
526 | long ms; | ||
527 | |||
528 | if( abs_secs < 0.0) | ||
529 | ms = INFINITE; | ||
530 | else if( abs_secs == 0.0) | ||
531 | ms = 0; | ||
532 | else | ||
533 | { | ||
534 | ms = (long) ((abs_secs - now_secs())*1000.0 + 0.5); | ||
535 | |||
536 | // If the time already passed, still try once (ms==0). A short timeout | ||
537 | // may have turned negative or 0 because of the two time samples done. | ||
538 | // | ||
539 | if( ms < 0) | ||
540 | ms = 0; | ||
541 | } | ||
542 | |||
543 | if( !SleepConditionVariableCS( ref, mu_ref, ms)) | ||
544 | { | ||
545 | if( GetLastError() == ERROR_TIMEOUT) | ||
546 | { | ||
547 | return FALSE; | ||
548 | } | ||
549 | else | ||
550 | { | ||
551 | FAIL( "SleepConditionVariableCS", GetLastError()); | ||
552 | } | ||
553 | } | ||
554 | return TRUE; | ||
555 | } | ||
556 | |||
557 | void SIGNAL_ONE( SIGNAL_T *ref ) | ||
558 | { | ||
559 | WakeConditionVariable( ref); | ||
560 | } | ||
561 | |||
562 | void SIGNAL_ALL( SIGNAL_T *ref ) | ||
563 | { | ||
564 | WakeAllConditionVariable( ref); | ||
565 | } | ||
566 | |||
567 | #endif // CONDITION_VARIABLE are available | ||
568 | |||
569 | #else // THREADAPI == THREADAPI_PTHREAD | ||
570 | // PThread (Linux, OS X, ...) | ||
571 | // | ||
572 | // On OS X, user processes seem to be able to change priorities. | ||
573 | // On Linux, SCHED_RR and su privileges are required.. !-( | ||
574 | // | ||
575 | #include <errno.h> | ||
576 | #include <sched.h> | ||
577 | |||
578 | # if (defined(__MINGW32__) || defined(__MINGW64__)) && defined pthread_attr_setschedpolicy | ||
579 | # if pthread_attr_setschedpolicy( A, S) == ENOTSUP | ||
580 | // from the mingw-w64 team: | ||
581 | // Well, we support pthread_setschedparam by which you can specify | ||
582 | // threading-policy. Nevertheless, yes we lack this function. In | ||
583 | // general its implementation is pretty much trivial, as on Win32 target | ||
584 | // just SCHED_OTHER can be supported. | ||
585 | #undef pthread_attr_setschedpolicy | ||
586 | static int pthread_attr_setschedpolicy( pthread_attr_t* attr, int policy) | ||
587 | { | ||
588 | if( policy != SCHED_OTHER) | ||
589 | { | ||
590 | return ENOTSUP; | ||
591 | } | ||
592 | return 0; | ||
593 | } | ||
594 | # endif // pthread_attr_setschedpolicy() | ||
595 | # endif // defined(__MINGW32__) || defined(__MINGW64__) | ||
596 | |||
597 | static void _PT_FAIL( int rc, const char *name, const char *file, uint_t line ) { | ||
598 | const char *why= (rc==EINVAL) ? "EINVAL" : | ||
599 | (rc==EBUSY) ? "EBUSY" : | ||
600 | (rc==EPERM) ? "EPERM" : | ||
601 | (rc==ENOMEM) ? "ENOMEM" : | ||
602 | (rc==ESRCH) ? "ESRCH" : | ||
603 | (rc==ENOTSUP) ? "ENOTSUP": | ||
604 | //... | ||
605 | "<UNKNOWN>"; | ||
606 | fprintf( stderr, "%s %d: %s failed, %d %s\n", file, line, name, rc, why ); | ||
607 | abort(); | ||
608 | } | ||
609 | #define PT_CALL( call ) { int rc= call; if (rc!=0) _PT_FAIL( rc, #call, __FILE__, __LINE__ ); } | ||
610 | // | ||
611 | void SIGNAL_INIT( SIGNAL_T *ref ) { | ||
612 | PT_CALL( pthread_cond_init(ref,NULL /*attr*/) ); | ||
613 | } | ||
614 | void SIGNAL_FREE( SIGNAL_T *ref ) { | ||
615 | PT_CALL( pthread_cond_destroy(ref) ); | ||
616 | } | ||
617 | // | ||
618 | /* | ||
619 | * Timeout is given as absolute since we may have fake wakeups during | ||
620 | * a timed out sleep. A Linda with some other key read, or just because | ||
621 | * PThread cond vars can wake up unwantedly. | ||
622 | */ | ||
623 | bool_t SIGNAL_WAIT( SIGNAL_T *ref, pthread_mutex_t *mu, time_d abs_secs ) { | ||
624 | if (abs_secs<0.0) { | ||
625 | PT_CALL( pthread_cond_wait( ref, mu ) ); // infinite | ||
626 | } else { | ||
627 | int rc; | ||
628 | struct timespec ts; | ||
629 | |||
630 | assert( abs_secs != 0.0 ); | ||
631 | prepare_timeout( &ts, abs_secs ); | ||
632 | |||
633 | rc= pthread_cond_timedwait( ref, mu, &ts ); | ||
634 | |||
635 | if (rc==ETIMEDOUT) return FALSE; | ||
636 | if (rc) { _PT_FAIL( rc, "pthread_cond_timedwait()", __FILE__, __LINE__ ); } | ||
637 | } | ||
638 | return TRUE; | ||
639 | } | ||
640 | // | ||
641 | void SIGNAL_ONE( SIGNAL_T *ref ) { | ||
642 | PT_CALL( pthread_cond_signal(ref) ); // wake up ONE (or no) waiting thread | ||
643 | } | ||
644 | // | ||
645 | void SIGNAL_ALL( SIGNAL_T *ref ) { | ||
646 | PT_CALL( pthread_cond_broadcast(ref) ); // wake up ALL waiting threads | ||
647 | } | ||
648 | |||
649 | // array of 7 thread priority values, hand-tuned by platform so that we offer a uniform [-3,+3] public priority range | ||
650 | static int const gs_prio_remap[] = | ||
651 | { | ||
652 | // NB: PThreads priority handling is about as twisty as one can get it | ||
653 | // (and then some). DON*T TRUST ANYTHING YOU READ ON THE NET!!! | ||
654 | |||
655 | //--- | ||
656 | // "Select the scheduling policy for the thread: one of SCHED_OTHER | ||
657 | // (regular, non-real-time scheduling), SCHED_RR (real-time, | ||
658 | // round-robin) or SCHED_FIFO (real-time, first-in first-out)." | ||
659 | // | ||
660 | // "Using the RR policy ensures that all threads having the same | ||
661 | // priority level will be scheduled equally, regardless of their activity." | ||
662 | // | ||
663 | // "For SCHED_FIFO and SCHED_RR, the only required member of the | ||
664 | // sched_param structure is the priority sched_priority. For SCHED_OTHER, | ||
665 | // the affected scheduling parameters are implementation-defined." | ||
666 | // | ||
667 | // "The priority of a thread is specified as a delta which is added to | ||
668 | // the priority of the process." | ||
669 | // | ||
670 | // ".. priority is an integer value, in the range from 1 to 127. | ||
671 | // 1 is the least-favored priority, 127 is the most-favored." | ||
672 | // | ||
673 | // "Priority level 0 cannot be used: it is reserved for the system." | ||
674 | // | ||
675 | // "When you use specify a priority of -99 in a call to | ||
676 | // pthread_setschedparam(), the priority of the target thread is | ||
677 | // lowered to the lowest possible value." | ||
678 | // | ||
679 | // ... | ||
680 | |||
681 | // ** CONCLUSION ** | ||
682 | // | ||
683 | // PThread priorities are _hugely_ system specific, and we need at | ||
684 | // least OS specific settings. Hopefully, Linuxes and OS X versions | ||
685 | // are uniform enough, among each other... | ||
686 | // | ||
687 | # if defined PLATFORM_OSX | ||
688 | // AK 10-Apr-07 (OS X PowerPC 10.4.9): | ||
689 | // | ||
690 | // With SCHED_RR, 26 seems to be the "normal" priority, where setting | ||
691 | // it does not seem to affect the order of threads processed. | ||
692 | // | ||
693 | // With SCHED_OTHER, the range 25..32 is normal (maybe the same 26, | ||
694 | // but the difference is not so clear with OTHER). | ||
695 | // | ||
696 | // 'sched_get_priority_min()' and '..max()' give 15, 47 as the | ||
697 | // priority limits. This could imply, user mode applications won't | ||
698 | // be able to use values outside of that range. | ||
699 | // | ||
700 | # define _PRIO_MODE SCHED_OTHER | ||
701 | |||
702 | // OS X 10.4.9 (PowerPC) gives ENOTSUP for process scope | ||
703 | //#define _PRIO_SCOPE PTHREAD_SCOPE_PROCESS | ||
704 | |||
705 | # define _PRIO_HI 32 // seems to work (_carefully_ picked!) | ||
706 | # define _PRIO_0 26 // detected | ||
707 | # define _PRIO_LO 1 // seems to work (tested) | ||
708 | |||
709 | # elif defined PLATFORM_LINUX | ||
710 | // (based on Ubuntu Linux 2.6.15 kernel) | ||
711 | // | ||
712 | // SCHED_OTHER is the default policy, but does not allow for priorities. | ||
713 | // SCHED_RR allows priorities, all of which (1..99) are higher than | ||
714 | // a thread with SCHED_OTHER policy. | ||
715 | // | ||
716 | // <http://kerneltrap.org/node/6080> | ||
717 | // <http://en.wikipedia.org/wiki/Native_POSIX_Thread_Library> | ||
718 | // <http://www.net.in.tum.de/~gregor/docs/pthread-scheduling.html> | ||
719 | // | ||
720 | // Manuals suggest checking #ifdef _POSIX_THREAD_PRIORITY_SCHEDULING, | ||
721 | // but even Ubuntu does not seem to define it. | ||
722 | // | ||
723 | # define _PRIO_MODE SCHED_RR | ||
724 | |||
725 | // NTLP 2.5: only system scope allowed (being the basic reason why | ||
726 | // root privileges are required..) | ||
727 | //#define _PRIO_SCOPE PTHREAD_SCOPE_PROCESS | ||
728 | |||
729 | # define _PRIO_HI 99 | ||
730 | # define _PRIO_0 50 | ||
731 | # define _PRIO_LO 1 | ||
732 | |||
733 | # elif defined(PLATFORM_BSD) | ||
734 | // | ||
735 | // <http://www.net.in.tum.de/~gregor/docs/pthread-scheduling.html> | ||
736 | // | ||
737 | // "When control over the thread scheduling is desired, then FreeBSD | ||
738 | // with the libpthread implementation is by far the best choice .." | ||
739 | // | ||
740 | # define _PRIO_MODE SCHED_OTHER | ||
741 | # define _PRIO_SCOPE PTHREAD_SCOPE_PROCESS | ||
742 | # define _PRIO_HI 31 | ||
743 | # define _PRIO_0 15 | ||
744 | # define _PRIO_LO 1 | ||
745 | |||
746 | # elif defined(PLATFORM_CYGWIN) | ||
747 | // | ||
748 | // TBD: Find right values for Cygwin | ||
749 | // | ||
750 | # elif defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC) | ||
751 | // any other value not supported by win32-pthread as of version 2.9.1 | ||
752 | # define _PRIO_MODE SCHED_OTHER | ||
753 | |||
754 | // PTHREAD_SCOPE_PROCESS not supported by win32-pthread as of version 2.9.1 | ||
755 | //#define _PRIO_SCOPE PTHREAD_SCOPE_SYSTEM // but do we need this at all to start with? | ||
756 | THREAD_PRIORITY_IDLE, THREAD_PRIORITY_LOWEST, THREAD_PRIORITY_BELOW_NORMAL, THREAD_PRIORITY_NORMAL, THREAD_PRIORITY_ABOVE_NORMAL, THREAD_PRIORITY_HIGHEST, THREAD_PRIORITY_TIME_CRITICAL | ||
757 | |||
758 | # else | ||
759 | # error "Unknown OS: not implemented!" | ||
760 | # endif | ||
761 | |||
762 | #if defined _PRIO_0 | ||
763 | # define _PRIO_AN (_PRIO_0 + ((_PRIO_HI-_PRIO_0)/2)) | ||
764 | # define _PRIO_BN (_PRIO_LO + ((_PRIO_0-_PRIO_LO)/2)) | ||
765 | |||
766 | _PRIO_LO, _PRIO_LO, _PRIO_BN, _PRIO_0, _PRIO_AN, _PRIO_HI, _PRIO_HI | ||
767 | #endif // _PRIO_0 | ||
768 | }; | ||
769 | |||
770 | static int select_prio(int prio /* -3..+3 */) | ||
771 | { | ||
772 | if (prio == THREAD_PRIO_DEFAULT) | ||
773 | prio = 0; | ||
774 | // prio range [-3,+3] was checked by the caller | ||
775 | return gs_prio_remap[prio + 3]; | ||
776 | } | ||
777 | |||
778 | void THREAD_CREATE( THREAD_T* ref, THREAD_RETURN_T (*func)( void*), void* data, int prio /* -3..+3 */) | ||
779 | { | ||
780 | pthread_attr_t a; | ||
781 | bool_t const change_priority = | ||
782 | #ifdef PLATFORM_LINUX | ||
783 | sudo && // only root-privileged process can change priorities | ||
784 | #endif | ||
785 | (prio != THREAD_PRIO_DEFAULT); | ||
786 | |||
787 | PT_CALL( pthread_attr_init( &a)); | ||
788 | |||
789 | #ifndef PTHREAD_TIMEDJOIN | ||
790 | // We create a NON-JOINABLE thread. This is mainly due to the lack of | ||
791 | // 'pthread_timedjoin()', but does offer other benefits (s.a. earlier | ||
792 | // freeing of the thread's resources). | ||
793 | // | ||
794 | PT_CALL( pthread_attr_setdetachstate( &a, PTHREAD_CREATE_DETACHED)); | ||
795 | #endif // PTHREAD_TIMEDJOIN | ||
796 | |||
797 | // Use this to find a system's default stack size (DEBUG) | ||
798 | #if 0 | ||
799 | { | ||
800 | size_t n; | ||
801 | pthread_attr_getstacksize( &a, &n); | ||
802 | fprintf( stderr, "Getstack: %u\n", (unsigned int)n); | ||
803 | } | ||
804 | // 524288 on OS X | ||
805 | // 2097152 on Linux x86 (Ubuntu 7.04) | ||
806 | // 1048576 on FreeBSD 6.2 SMP i386 | ||
807 | #endif // 0 | ||
808 | |||
809 | #if defined _THREAD_STACK_SIZE && _THREAD_STACK_SIZE > 0 | ||
810 | PT_CALL( pthread_attr_setstacksize( &a, _THREAD_STACK_SIZE)); | ||
811 | #endif | ||
812 | |||
813 | if (change_priority) | ||
814 | { | ||
815 | struct sched_param sp; | ||
816 | // "The specified scheduling parameters are only used if the scheduling | ||
817 | // parameter inheritance attribute is PTHREAD_EXPLICIT_SCHED." | ||
818 | // | ||
819 | #if !defined __ANDROID__ || ( defined __ANDROID__ && __ANDROID_API__ >= 28 ) | ||
820 | PT_CALL( pthread_attr_setinheritsched( &a, PTHREAD_EXPLICIT_SCHED)); | ||
821 | #endif | ||
822 | |||
823 | #ifdef _PRIO_SCOPE | ||
824 | PT_CALL( pthread_attr_setscope( &a, _PRIO_SCOPE)); | ||
825 | #endif // _PRIO_SCOPE | ||
826 | |||
827 | PT_CALL( pthread_attr_setschedpolicy( &a, _PRIO_MODE)); | ||
828 | |||
829 | sp.sched_priority = select_prio(prio); | ||
830 | PT_CALL( pthread_attr_setschedparam( &a, &sp)); | ||
831 | } | ||
832 | |||
833 | //--- | ||
834 | // Seems on OS X, _POSIX_THREAD_THREADS_MAX is some kind of system | ||
835 | // thread limit (not userland thread). Actual limit for us is way higher. | ||
836 | // PTHREAD_THREADS_MAX is not defined (even though man page refers to it!) | ||
837 | // | ||
838 | # ifndef THREAD_CREATE_RETRIES_MAX | ||
839 | // Don't bother with retries; a failure is a failure | ||
840 | // | ||
841 | { | ||
842 | int rc = pthread_create( ref, &a, func, data); | ||
843 | if( rc) _PT_FAIL( rc, "pthread_create()", __FILE__, __LINE__ - 1); | ||
844 | } | ||
845 | # else | ||
846 | # error "This code deprecated" | ||
847 | /* | ||
848 | // Wait slightly if thread creation has exchausted the system | ||
849 | // | ||
850 | { uint_t retries; | ||
851 | for( retries=0; retries<THREAD_CREATE_RETRIES_MAX; retries++ ) { | ||
852 | |||
853 | int rc= pthread_create( ref, &a, func, data ); | ||
854 | // | ||
855 | // OS X / Linux: | ||
856 | // EAGAIN: ".. lacked the necessary resources to create | ||
857 | // another thread, or the system-imposed limit on the | ||
858 | // total number of threads in a process | ||
859 | // [PTHREAD_THREADS_MAX] would be exceeded." | ||
860 | // EINVAL: attr is invalid | ||
861 | // Linux: | ||
862 | // EPERM: no rights for given parameters or scheduling (no sudo) | ||
863 | // ENOMEM: (known to fail with this code, too - not listed in man) | ||
864 | |||
865 | if (rc==0) break; // ok! | ||
866 | |||
867 | // In practise, exhaustion seems to be coming from memory, not a | ||
868 | // maximum number of threads. Keep tuning... ;) | ||
869 | // | ||
870 | if (rc==EAGAIN) { | ||
871 | //fprintf( stderr, "Looping (retries=%d) ", retries ); // DEBUG | ||
872 | |||
873 | // Try again, later. | ||
874 | |||
875 | Yield(); | ||
876 | } else { | ||
877 | _PT_FAIL( rc, "pthread_create()", __FILE__, __LINE__ ); | ||
878 | } | ||
879 | } | ||
880 | } | ||
881 | */ | ||
882 | # endif | ||
883 | |||
884 | PT_CALL( pthread_attr_destroy( &a)); | ||
885 | } | ||
886 | |||
887 | |||
888 | void THREAD_SET_PRIORITY( int prio) | ||
889 | { | ||
890 | #ifdef PLATFORM_LINUX | ||
891 | if( sudo) // only root-privileged process can change priorities | ||
892 | #endif // PLATFORM_LINUX | ||
893 | { | ||
894 | struct sched_param sp; | ||
895 | // prio range [-3,+3] was checked by the caller | ||
896 | sp.sched_priority = gs_prio_remap[ prio + 3]; | ||
897 | PT_CALL( pthread_setschedparam( pthread_self(), _PRIO_MODE, &sp)); | ||
898 | } | ||
899 | } | ||
900 | |||
901 | void THREAD_SET_AFFINITY( unsigned int aff) | ||
902 | { | ||
903 | int bit = 0; | ||
904 | #ifdef __NetBSD__ | ||
905 | cpuset_t *cpuset = cpuset_create(); | ||
906 | if( cpuset == NULL) | ||
907 | _PT_FAIL( errno, "cpuset_create", __FILE__, __LINE__-2 ); | ||
908 | #define CPU_SET(b, s) cpuset_set(b, *(s)) | ||
909 | #else | ||
910 | cpu_set_t cpuset; | ||
911 | CPU_ZERO( &cpuset); | ||
912 | #endif | ||
913 | while( aff != 0) | ||
914 | { | ||
915 | if( aff & 1) | ||
916 | { | ||
917 | CPU_SET( bit, &cpuset); | ||
918 | } | ||
919 | ++ bit; | ||
920 | aff >>= 1; | ||
921 | } | ||
922 | #ifdef __ANDROID__ | ||
923 | PT_CALL( sched_setaffinity( pthread_self(), sizeof(cpu_set_t), &cpuset)); | ||
924 | #elif defined(__NetBSD__) | ||
925 | PT_CALL( pthread_setaffinity_np( pthread_self(), cpuset_size(cpuset), cpuset)); | ||
926 | cpuset_destroy( cpuset); | ||
927 | #else | ||
928 | PT_CALL( pthread_setaffinity_np( pthread_self(), sizeof(cpu_set_t), &cpuset)); | ||
929 | #endif | ||
930 | } | ||
931 | |||
932 | /* | ||
933 | * Wait for a thread to finish. | ||
934 | * | ||
935 | * 'mu_ref' is a lock we should use for the waiting; initially unlocked. | ||
936 | * Same lock as passed to THREAD_EXIT. | ||
937 | * | ||
938 | * Returns TRUE for successful wait, FALSE for timed out | ||
939 | */ | ||
940 | bool_t THREAD_WAIT( THREAD_T *ref, double secs , SIGNAL_T *signal_ref, MUTEX_T *mu_ref, volatile enum e_status *st_ref) | ||
941 | { | ||
942 | struct timespec ts_store; | ||
943 | const struct timespec *timeout= NULL; | ||
944 | bool_t done; | ||
945 | |||
946 | // Do timeout counting before the locks | ||
947 | // | ||
948 | #if THREADWAIT_METHOD == THREADWAIT_TIMEOUT | ||
949 | if (secs>=0.0) | ||
950 | #else // THREADWAIT_METHOD == THREADWAIT_CONDVAR | ||
951 | if (secs>0.0) | ||
952 | #endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR | ||
953 | { | ||
954 | prepare_timeout( &ts_store, now_secs()+secs ); | ||
955 | timeout= &ts_store; | ||
956 | } | ||
957 | |||
958 | #if THREADWAIT_METHOD == THREADWAIT_TIMEOUT | ||
959 | /* Thread is joinable | ||
960 | */ | ||
961 | if (!timeout) { | ||
962 | PT_CALL( pthread_join( *ref, NULL /*ignore exit value*/ )); | ||
963 | done= TRUE; | ||
964 | } else { | ||
965 | int rc= PTHREAD_TIMEDJOIN( *ref, NULL, timeout ); | ||
966 | if ((rc!=0) && (rc!=ETIMEDOUT)) { | ||
967 | _PT_FAIL( rc, "PTHREAD_TIMEDJOIN", __FILE__, __LINE__-2 ); | ||
968 | } | ||
969 | done= rc==0; | ||
970 | } | ||
971 | #else // THREADWAIT_METHOD == THREADWAIT_CONDVAR | ||
972 | /* Since we've set the thread up as PTHREAD_CREATE_DETACHED, we cannot | ||
973 | * join with it. Use the cond.var. | ||
974 | */ | ||
975 | (void) ref; // unused | ||
976 | MUTEX_LOCK( mu_ref ); | ||
977 | |||
978 | // 'secs'==0.0 does not need to wait, just take the current status | ||
979 | // within the 'mu_ref' locks | ||
980 | // | ||
981 | if (secs != 0.0) { | ||
982 | while( *st_ref < DONE ) { | ||
983 | if (!timeout) { | ||
984 | PT_CALL( pthread_cond_wait( signal_ref, mu_ref )); | ||
985 | } else { | ||
986 | int rc= pthread_cond_timedwait( signal_ref, mu_ref, timeout ); | ||
987 | if (rc==ETIMEDOUT) break; | ||
988 | if (rc!=0) _PT_FAIL( rc, "pthread_cond_timedwait", __FILE__, __LINE__-2 ); | ||
989 | } | ||
990 | } | ||
991 | } | ||
992 | done= *st_ref >= DONE; // DONE|ERROR_ST|CANCELLED | ||
993 | |||
994 | MUTEX_UNLOCK( mu_ref ); | ||
995 | #endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR | ||
996 | return done; | ||
997 | } | ||
998 | // | ||
999 | void THREAD_KILL( THREAD_T *ref ) { | ||
1000 | #ifdef __ANDROID__ | ||
1001 | __android_log_print(ANDROID_LOG_WARN, LOG_TAG, "Cannot kill thread!"); | ||
1002 | #else | ||
1003 | pthread_cancel( *ref ); | ||
1004 | #endif | ||
1005 | } | ||
1006 | |||
1007 | void THREAD_MAKE_ASYNCH_CANCELLABLE() | ||
1008 | { | ||
1009 | #ifdef __ANDROID__ | ||
1010 | __android_log_print(ANDROID_LOG_WARN, LOG_TAG, "Cannot make thread async cancellable!"); | ||
1011 | #else | ||
1012 | // that's the default, but just in case... | ||
1013 | pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); | ||
1014 | // we want cancellation to take effect immediately if possible, instead of waiting for a cancellation point (which is the default) | ||
1015 | pthread_setcanceltype( PTHREAD_CANCEL_ASYNCHRONOUS, NULL); | ||
1016 | #endif | ||
1017 | } | ||
1018 | |||
1019 | void THREAD_SETNAME( char const* _name) | ||
1020 | { | ||
1021 | // exact API to set the thread name is platform-dependant | ||
1022 | // if you need to fix the build, or if you know how to fill a hole, tell me (bnt.germain@gmail.com) so that I can submit the fix in github. | ||
1023 | #if defined PLATFORM_BSD && !defined __NetBSD__ | ||
1024 | pthread_set_name_np( pthread_self(), _name); | ||
1025 | #elif defined PLATFORM_BSD && defined __NetBSD__ | ||
1026 | pthread_setname_np( pthread_self(), "%s", (void *)_name); | ||
1027 | #elif defined PLATFORM_LINUX | ||
1028 | #if LINUX_USE_PTHREAD_SETNAME_NP | ||
1029 | pthread_setname_np( pthread_self(), _name); | ||
1030 | #else // LINUX_USE_PTHREAD_SETNAME_NP | ||
1031 | prctl(PR_SET_NAME, _name, 0, 0, 0); | ||
1032 | #endif // LINUX_USE_PTHREAD_SETNAME_NP | ||
1033 | #elif defined PLATFORM_QNX || defined PLATFORM_CYGWIN | ||
1034 | pthread_setname_np( pthread_self(), _name); | ||
1035 | #elif defined PLATFORM_OSX | ||
1036 | pthread_setname_np(_name); | ||
1037 | #elif defined PLATFORM_WIN32 || defined PLATFORM_POCKETPC | ||
1038 | PT_CALL( pthread_setname_np( pthread_self(), _name)); | ||
1039 | #endif | ||
1040 | } | ||
1041 | #endif // THREADAPI == THREADAPI_PTHREAD | ||
diff --git a/src/threading.cpp b/src/threading.cpp new file mode 100644 index 0000000..259693a --- /dev/null +++ b/src/threading.cpp | |||
@@ -0,0 +1,448 @@ | |||
1 | /* | ||
2 | * THREADING.CPP Copyright (c) 2007-08, Asko Kauppi | ||
3 | * Copyright (C) 2009-24, Benoit Germain | ||
4 | * | ||
5 | * Lua Lanes OS threading specific code. | ||
6 | * | ||
7 | * References: | ||
8 | * <http://www.cse.wustl.edu/~schmidt/win32-cv-1.html> | ||
9 | */ | ||
10 | |||
11 | /* | ||
12 | =============================================================================== | ||
13 | |||
14 | Copyright (C) 2007-10 Asko Kauppi <akauppi@gmail.com> | ||
15 | Copyright (C) 2009-24, Benoit Germain <bnt.germain@gmail.com> | ||
16 | |||
17 | Permission is hereby granted, free of charge, to any person obtaining a copy | ||
18 | of this software and associated documentation files (the "Software"), to deal | ||
19 | in the Software without restriction, including without limitation the rights | ||
20 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||
21 | copies of the Software, and to permit persons to whom the Software is | ||
22 | furnished to do so, subject to the following conditions: | ||
23 | |||
24 | The above copyright notice and this permission notice shall be included in | ||
25 | all copies or substantial portions of the Software. | ||
26 | |||
27 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
28 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
29 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
30 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
31 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
32 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | ||
33 | THE SOFTWARE. | ||
34 | |||
35 | =============================================================================== | ||
36 | */ | ||
37 | #if defined(__linux__) | ||
38 | |||
39 | # ifndef _GNU_SOURCE // definition by the makefile can cause a redefinition error | ||
40 | # define _GNU_SOURCE // must be defined before any include | ||
41 | # endif // _GNU_SOURCE | ||
42 | |||
43 | # ifdef __ANDROID__ | ||
44 | # include <android/log.h> | ||
45 | # define LOG_TAG "LuaLanes" | ||
46 | # endif // __ANDROID__ | ||
47 | |||
48 | #endif // __linux__ | ||
49 | |||
50 | #include "threading.h" | ||
51 | |||
52 | #if !defined( PLATFORM_XBOX) && !defined( PLATFORM_WIN32) && !defined( PLATFORM_POCKETPC) | ||
53 | # include <sys/time.h> | ||
54 | #endif // non-WIN32 timing | ||
55 | |||
56 | |||
57 | #if defined(PLATFORM_LINUX) || defined(PLATFORM_CYGWIN) | ||
58 | # include <sys/types.h> | ||
59 | # include <unistd.h> | ||
60 | #endif | ||
61 | |||
62 | #ifdef PLATFORM_OSX | ||
63 | # include "threading_osx.h" | ||
64 | #endif | ||
65 | |||
66 | /* Linux with older glibc (such as Debian) don't have pthread_setname_np, but have prctl | ||
67 | */ | ||
68 | #if defined PLATFORM_LINUX | ||
69 | #if defined __GNU_LIBRARY__ && __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 12 | ||
70 | #define LINUX_USE_PTHREAD_SETNAME_NP 1 | ||
71 | #else // glibc without pthread_setname_np | ||
72 | #include <sys/prctl.h> | ||
73 | #define LINUX_USE_PTHREAD_SETNAME_NP 0 | ||
74 | #endif // glibc without pthread_setname_np | ||
75 | #endif // PLATFORM_LINUX | ||
76 | |||
77 | #ifdef _MSC_VER | ||
78 | // ".. selected for automatic inline expansion" (/O2 option) | ||
79 | # pragma warning( disable : 4711 ) | ||
80 | // ".. type cast from function pointer ... to data pointer" | ||
81 | # pragma warning( disable : 4054 ) | ||
82 | #endif | ||
83 | |||
84 | /* | ||
85 | * FAIL is for unexpected API return values - essentially programming | ||
86 | * error in _this_ code. | ||
87 | */ | ||
88 | #if defined(PLATFORM_XBOX) || defined(PLATFORM_WIN32) || defined(PLATFORM_POCKETPC) | ||
89 | static void FAIL(char const* funcname, int rc) | ||
90 | { | ||
91 | #if defined(PLATFORM_XBOX) | ||
92 | fprintf(stderr, "%s() failed! (%d)\n", funcname, rc); | ||
93 | #else // PLATFORM_XBOX | ||
94 | char buf[256]; | ||
95 | FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, nullptr, rc, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), buf, 256, nullptr); | ||
96 | fprintf(stderr, "%s() failed! [GetLastError() -> %d] '%s'", funcname, rc, buf); | ||
97 | #endif // PLATFORM_XBOX | ||
98 | #ifdef _MSC_VER | ||
99 | __debugbreak(); // give a chance to the debugger! | ||
100 | #endif // _MSC_VER | ||
101 | abort(); | ||
102 | } | ||
103 | #endif // win32 build | ||
104 | |||
105 | |||
106 | /*---=== Threading ===---*/ | ||
107 | |||
108 | // ################################################################################################## | ||
109 | // ################################################################################################## | ||
110 | #if THREADAPI == THREADAPI_WINDOWS | ||
111 | |||
112 | static int const gs_prio_remap[] = | ||
113 | { | ||
114 | THREAD_PRIORITY_IDLE, | ||
115 | THREAD_PRIORITY_LOWEST, | ||
116 | THREAD_PRIORITY_BELOW_NORMAL, | ||
117 | THREAD_PRIORITY_NORMAL, | ||
118 | THREAD_PRIORITY_ABOVE_NORMAL, | ||
119 | THREAD_PRIORITY_HIGHEST, | ||
120 | THREAD_PRIORITY_TIME_CRITICAL | ||
121 | }; | ||
122 | |||
123 | // ############################################################################################### | ||
124 | |||
125 | void THREAD_SET_PRIORITY(int prio_, [[maybe_unused]] bool sudo_) | ||
126 | { | ||
127 | // prio range [-3,+3] was checked by the caller | ||
128 | if (!SetThreadPriority(GetCurrentThread(), gs_prio_remap[prio_ + 3])) | ||
129 | { | ||
130 | FAIL("THREAD_SET_PRIORITY", GetLastError()); | ||
131 | } | ||
132 | } | ||
133 | |||
134 | // ############################################################################################### | ||
135 | |||
136 | void JTHREAD_SET_PRIORITY(std::jthread& thread_, int prio_, [[maybe_unused]] bool sudo_) | ||
137 | { | ||
138 | // prio range [-3,+3] was checked by the caller | ||
139 | if (!SetThreadPriority(thread_.native_handle(), gs_prio_remap[prio_ + 3])) | ||
140 | { | ||
141 | FAIL("JTHREAD_SET_PRIORITY", GetLastError()); | ||
142 | } | ||
143 | } | ||
144 | |||
145 | // ############################################################################################### | ||
146 | |||
147 | void THREAD_SET_AFFINITY(unsigned int aff) | ||
148 | { | ||
149 | if (!SetThreadAffinityMask(GetCurrentThread(), aff)) | ||
150 | { | ||
151 | FAIL("THREAD_SET_AFFINITY", GetLastError()); | ||
152 | } | ||
153 | } | ||
154 | |||
155 | // ############################################################################################### | ||
156 | |||
157 | #if !defined __GNUC__ | ||
158 | //see http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx | ||
159 | #define MS_VC_EXCEPTION 0x406D1388 | ||
160 | #pragma pack(push,8) | ||
161 | typedef struct tagTHREADNAME_INFO | ||
162 | { | ||
163 | DWORD dwType; // Must be 0x1000. | ||
164 | LPCSTR szName; // Pointer to name (in user addr space). | ||
165 | DWORD dwThreadID; // Thread ID (-1=caller thread). | ||
166 | DWORD dwFlags; // Reserved for future use, must be zero. | ||
167 | } THREADNAME_INFO; | ||
168 | #pragma pack(pop) | ||
169 | #endif // !__GNUC__ | ||
170 | |||
171 | void THREAD_SETNAME(char const* _name) | ||
172 | { | ||
173 | #if !defined __GNUC__ | ||
174 | THREADNAME_INFO info; | ||
175 | info.dwType = 0x1000; | ||
176 | info.szName = _name; | ||
177 | info.dwThreadID = GetCurrentThreadId(); | ||
178 | info.dwFlags = 0; | ||
179 | |||
180 | __try | ||
181 | { | ||
182 | RaiseException( MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(ULONG_PTR), (ULONG_PTR*)&info ); | ||
183 | } | ||
184 | __except(EXCEPTION_EXECUTE_HANDLER) | ||
185 | { | ||
186 | } | ||
187 | #endif // !__GNUC__ | ||
188 | } | ||
189 | |||
190 | // ################################################################################################## | ||
191 | // ################################################################################################## | ||
192 | #else // THREADAPI == THREADAPI_PTHREAD | ||
193 | // ################################################################################################## | ||
194 | // ################################################################################################## | ||
195 | |||
196 | // PThread (Linux, OS X, ...) | ||
197 | // | ||
198 | // On OS X, user processes seem to be able to change priorities. | ||
199 | // On Linux, SCHED_RR and su privileges are required.. !-( | ||
200 | // | ||
201 | #include <errno.h> | ||
202 | #include <sched.h> | ||
203 | |||
204 | #if (defined(__MINGW32__) || defined(__MINGW64__)) && defined pthread_attr_setschedpolicy | ||
205 | #if pthread_attr_setschedpolicy(A, S) == ENOTSUP | ||
206 | // from the mingw-w64 team: | ||
207 | // Well, we support pthread_setschedparam by which you can specify | ||
208 | // threading-policy. Nevertheless, yes we lack this function. In | ||
209 | // general its implementation is pretty much trivial, as on Win32 target | ||
210 | // just SCHED_OTHER can be supported. | ||
211 | #undef pthread_attr_setschedpolicy | ||
212 | [[nodiscard]] static int pthread_attr_setschedpolicy(pthread_attr_t* attr, int policy) | ||
213 | { | ||
214 | if (policy != SCHED_OTHER) | ||
215 | { | ||
216 | return ENOTSUP; | ||
217 | } | ||
218 | return 0; | ||
219 | } | ||
220 | #endif // pthread_attr_setschedpolicy() | ||
221 | #endif // defined(__MINGW32__) || defined(__MINGW64__) | ||
222 | |||
223 | static void _PT_FAIL( int rc, const char *name, const char *file, int line ) | ||
224 | { | ||
225 | const char *why= (rc==EINVAL) ? "EINVAL" : | ||
226 | (rc==EBUSY) ? "EBUSY" : | ||
227 | (rc==EPERM) ? "EPERM" : | ||
228 | (rc==ENOMEM) ? "ENOMEM" : | ||
229 | (rc==ESRCH) ? "ESRCH" : | ||
230 | (rc==ENOTSUP) ? "ENOTSUP": | ||
231 | //... | ||
232 | "<UNKNOWN>"; | ||
233 | fprintf( stderr, "%s %d: %s failed, %d %s\n", file, line, name, rc, why ); | ||
234 | abort(); | ||
235 | } | ||
236 | #define PT_CALL( call ) { int rc= call; if (rc!=0) _PT_FAIL( rc, #call, __FILE__, __LINE__ ); } | ||
237 | |||
238 | // array of 7 thread priority values, hand-tuned by platform so that we offer a uniform [-3,+3] public priority range | ||
239 | static int const gs_prio_remap[] = | ||
240 | { | ||
241 | // NB: PThreads priority handling is about as twisty as one can get it | ||
242 | // (and then some). DON*T TRUST ANYTHING YOU READ ON THE NET!!! | ||
243 | |||
244 | //--- | ||
245 | // "Select the scheduling policy for the thread: one of SCHED_OTHER | ||
246 | // (regular, non-real-time scheduling), SCHED_RR (real-time, | ||
247 | // round-robin) or SCHED_FIFO (real-time, first-in first-out)." | ||
248 | // | ||
249 | // "Using the RR policy ensures that all threads having the same | ||
250 | // priority level will be scheduled equally, regardless of their activity." | ||
251 | // | ||
252 | // "For SCHED_FIFO and SCHED_RR, the only required member of the | ||
253 | // sched_param structure is the priority sched_priority. For SCHED_OTHER, | ||
254 | // the affected scheduling parameters are implementation-defined." | ||
255 | // | ||
256 | // "The priority of a thread is specified as a delta which is added to | ||
257 | // the priority of the process." | ||
258 | // | ||
259 | // ".. priority is an integer value, in the range from 1 to 127. | ||
260 | // 1 is the least-favored priority, 127 is the most-favored." | ||
261 | // | ||
262 | // "Priority level 0 cannot be used: it is reserved for the system." | ||
263 | // | ||
264 | // "When you use specify a priority of -99 in a call to | ||
265 | // pthread_setschedparam(), the priority of the target thread is | ||
266 | // lowered to the lowest possible value." | ||
267 | // | ||
268 | // ... | ||
269 | |||
270 | // ** CONCLUSION ** | ||
271 | // | ||
272 | // PThread priorities are _hugely_ system specific, and we need at | ||
273 | // least OS specific settings. Hopefully, Linuxes and OS X versions | ||
274 | // are uniform enough, among each other... | ||
275 | // | ||
276 | # if defined PLATFORM_OSX | ||
277 | // AK 10-Apr-07 (OS X PowerPC 10.4.9): | ||
278 | // | ||
279 | // With SCHED_RR, 26 seems to be the "normal" priority, where setting | ||
280 | // it does not seem to affect the order of threads processed. | ||
281 | // | ||
282 | // With SCHED_OTHER, the range 25..32 is normal (maybe the same 26, | ||
283 | // but the difference is not so clear with OTHER). | ||
284 | // | ||
285 | // 'sched_get_priority_min()' and '..max()' give 15, 47 as the | ||
286 | // priority limits. This could imply, user mode applications won't | ||
287 | // be able to use values outside of that range. | ||
288 | // | ||
289 | # define _PRIO_MODE SCHED_OTHER | ||
290 | |||
291 | // OS X 10.4.9 (PowerPC) gives ENOTSUP for process scope | ||
292 | //#define _PRIO_SCOPE PTHREAD_SCOPE_PROCESS | ||
293 | |||
294 | # define _PRIO_HI 32 // seems to work (_carefully_ picked!) | ||
295 | # define _PRIO_0 26 // detected | ||
296 | # define _PRIO_LO 1 // seems to work (tested) | ||
297 | |||
298 | # elif defined PLATFORM_LINUX | ||
299 | // (based on Ubuntu Linux 2.6.15 kernel) | ||
300 | // | ||
301 | // SCHED_OTHER is the default policy, but does not allow for priorities. | ||
302 | // SCHED_RR allows priorities, all of which (1..99) are higher than | ||
303 | // a thread with SCHED_OTHER policy. | ||
304 | // | ||
305 | // <http://kerneltrap.org/node/6080> | ||
306 | // <http://en.wikipedia.org/wiki/Native_POSIX_Thread_Library> | ||
307 | // <http://www.net.in.tum.de/~gregor/docs/pthread-scheduling.html> | ||
308 | // | ||
309 | // Manuals suggest checking #ifdef _POSIX_THREAD_PRIORITY_SCHEDULING, | ||
310 | // but even Ubuntu does not seem to define it. | ||
311 | // | ||
312 | # define _PRIO_MODE SCHED_RR | ||
313 | |||
314 | // NTLP 2.5: only system scope allowed (being the basic reason why | ||
315 | // root privileges are required..) | ||
316 | //#define _PRIO_SCOPE PTHREAD_SCOPE_PROCESS | ||
317 | |||
318 | # define _PRIO_HI 99 | ||
319 | # define _PRIO_0 50 | ||
320 | # define _PRIO_LO 1 | ||
321 | |||
322 | # elif defined(PLATFORM_BSD) | ||
323 | // | ||
324 | // <http://www.net.in.tum.de/~gregor/docs/pthread-scheduling.html> | ||
325 | // | ||
326 | // "When control over the thread scheduling is desired, then FreeBSD | ||
327 | // with the libpthread implementation is by far the best choice .." | ||
328 | // | ||
329 | # define _PRIO_MODE SCHED_OTHER | ||
330 | # define _PRIO_SCOPE PTHREAD_SCOPE_PROCESS | ||
331 | # define _PRIO_HI 31 | ||
332 | # define _PRIO_0 15 | ||
333 | # define _PRIO_LO 1 | ||
334 | |||
335 | # elif defined(PLATFORM_CYGWIN) | ||
336 | // | ||
337 | // TBD: Find right values for Cygwin | ||
338 | // | ||
339 | # else | ||
340 | # error "Unknown OS: not implemented!" | ||
341 | # endif | ||
342 | |||
343 | #if defined _PRIO_0 | ||
344 | # define _PRIO_AN (_PRIO_0 + ((_PRIO_HI-_PRIO_0)/2)) | ||
345 | # define _PRIO_BN (_PRIO_LO + ((_PRIO_0-_PRIO_LO)/2)) | ||
346 | |||
347 | _PRIO_LO, _PRIO_LO, _PRIO_BN, _PRIO_0, _PRIO_AN, _PRIO_HI, _PRIO_HI | ||
348 | #endif // _PRIO_0 | ||
349 | }; | ||
350 | |||
351 | [[nodiscard]] static int select_prio(int prio /* -3..+3 */) | ||
352 | { | ||
353 | if (prio == THREAD_PRIO_DEFAULT) | ||
354 | prio = 0; | ||
355 | // prio range [-3,+3] was checked by the caller | ||
356 | return gs_prio_remap[prio + 3]; | ||
357 | } | ||
358 | |||
359 | void THREAD_SET_PRIORITY(int prio_, [[maybe_unused]] bool sudo_) | ||
360 | { | ||
361 | #ifdef PLATFORM_LINUX | ||
362 | if (!sudo_) // only root-privileged process can change priorities | ||
363 | return; | ||
364 | #endif // PLATFORM_LINUX | ||
365 | |||
366 | struct sched_param sp; | ||
367 | // prio range [-3,+3] was checked by the caller | ||
368 | sp.sched_priority = gs_prio_remap[prio_ + 3]; | ||
369 | PT_CALL(pthread_setschedparam(pthread_self(), _PRIO_MODE, &sp)); | ||
370 | } | ||
371 | |||
372 | // ################################################################################################# | ||
373 | |||
374 | void JTHREAD_SET_PRIORITY(std::jthread& thread_, int prio_, [[maybe_unused]] bool sudo_) | ||
375 | { | ||
376 | #ifdef PLATFORM_LINUX | ||
377 | if (!sudo_) // only root-privileged process can change priorities | ||
378 | return; | ||
379 | #endif // PLATFORM_LINUX | ||
380 | |||
381 | struct sched_param sp; | ||
382 | // prio range [-3,+3] was checked by the caller | ||
383 | sp.sched_priority = gs_prio_remap[prio_ + 3]; | ||
384 | PT_CALL(pthread_setschedparam(static_cast<pthread_t>(thread_.native_handle()), _PRIO_MODE, &sp)); | ||
385 | } | ||
386 | |||
387 | // ################################################################################################# | ||
388 | |||
389 | void THREAD_SET_AFFINITY(unsigned int aff) | ||
390 | { | ||
391 | int bit = 0; | ||
392 | #ifdef __NetBSD__ | ||
393 | cpuset_t* cpuset = cpuset_create(); | ||
394 | if (cpuset == nullptr) | ||
395 | _PT_FAIL(errno, "cpuset_create", __FILE__, __LINE__ - 2); | ||
396 | #define CPU_SET(b, s) cpuset_set(b, *(s)) | ||
397 | #else | ||
398 | cpu_set_t cpuset; | ||
399 | CPU_ZERO(&cpuset); | ||
400 | #endif | ||
401 | while (aff != 0) | ||
402 | { | ||
403 | if (aff & 1) | ||
404 | { | ||
405 | CPU_SET(bit, &cpuset); | ||
406 | } | ||
407 | ++bit; | ||
408 | aff >>= 1; | ||
409 | } | ||
410 | #ifdef __ANDROID__ | ||
411 | PT_CALL(sched_setaffinity(pthread_self(), sizeof(cpu_set_t), &cpuset)); | ||
412 | #elif defined(__NetBSD__) | ||
413 | PT_CALL(pthread_setaffinity_np(pthread_self(), cpuset_size(cpuset), cpuset)); | ||
414 | cpuset_destroy(cpuset); | ||
415 | #else | ||
416 | PT_CALL(pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset)); | ||
417 | #endif | ||
418 | } | ||
419 | |||
420 | // ################################################################################################# | ||
421 | |||
422 | void THREAD_SETNAME(char const* _name) | ||
423 | { | ||
424 | // exact API to set the thread name is platform-dependant | ||
425 | // if you need to fix the build, or if you know how to fill a hole, tell me (bnt.germain@gmail.com) so that I can submit the fix in github. | ||
426 | #if defined PLATFORM_BSD && !defined __NetBSD__ | ||
427 | pthread_set_name_np(pthread_self(), _name); | ||
428 | #elif defined PLATFORM_BSD && defined __NetBSD__ | ||
429 | pthread_setname_np(pthread_self(), "%s", (void*) _name); | ||
430 | #elif defined PLATFORM_LINUX | ||
431 | #if LINUX_USE_PTHREAD_SETNAME_NP | ||
432 | pthread_setname_np(pthread_self(), _name); | ||
433 | #else // LINUX_USE_PTHREAD_SETNAME_NP | ||
434 | prctl(PR_SET_NAME, _name, 0, 0, 0); | ||
435 | #endif // LINUX_USE_PTHREAD_SETNAME_NP | ||
436 | #elif defined PLATFORM_QNX || defined PLATFORM_CYGWIN | ||
437 | pthread_setname_np(pthread_self(), _name); | ||
438 | #elif defined PLATFORM_OSX | ||
439 | pthread_setname_np(_name); | ||
440 | #else | ||
441 | fprintf(stderr, "THREAD_SETNAME: unsupported platform\n"); | ||
442 | abort(); | ||
443 | #endif | ||
444 | } | ||
445 | |||
446 | #endif // THREADAPI == THREADAPI_PTHREAD | ||
447 | // ################################################################################################# | ||
448 | // ################################################################################################# | ||
diff --git a/src/threading.h b/src/threading.h index b1706ac..fc35730 100644 --- a/src/threading.h +++ b/src/threading.h | |||
@@ -1,37 +1,13 @@ | |||
1 | /* | 1 | #pragma once |
2 | * THREADING.H | ||
3 | */ | ||
4 | #ifndef __threading_h__ | ||
5 | #define __threading_h__ 1 | ||
6 | 2 | ||
7 | /* | ||
8 | * win32-pthread: | ||
9 | * define HAVE_WIN32_PTHREAD and PTW32_INCLUDE_WINDOWS_H in your project configuration when building for win32-pthread. | ||
10 | * link against pthreadVC2.lib, and of course have pthreadVC2.dll somewhere in your path. | ||
11 | */ | ||
12 | #include "platform.h" | 3 | #include "platform.h" |
13 | 4 | ||
14 | typedef int bool_t; | 5 | #include <thread> |
15 | #ifndef FALSE | ||
16 | # define FALSE 0 | ||
17 | # define TRUE 1 | ||
18 | #endif | ||
19 | |||
20 | typedef unsigned int uint_t; | ||
21 | |||
22 | #include <time.h> | ||
23 | |||
24 | /* Note: ERROR is a defined entity on Win32 | ||
25 | PENDING: The Lua VM hasn't done anything yet. | ||
26 | RUNNING, WAITING: Thread is inside the Lua VM. If the thread is forcefully stopped, we can't lua_close() the Lua State. | ||
27 | DONE, ERROR_ST, CANCELLED: Thread execution is outside the Lua VM. It can be lua_close()d. | ||
28 | */ | ||
29 | enum e_status { PENDING, RUNNING, WAITING, DONE, ERROR_ST, CANCELLED }; | ||
30 | 6 | ||
31 | #define THREADAPI_WINDOWS 1 | 7 | #define THREADAPI_WINDOWS 1 |
32 | #define THREADAPI_PTHREAD 2 | 8 | #define THREADAPI_PTHREAD 2 |
33 | 9 | ||
34 | #if( defined( PLATFORM_XBOX) || defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC)) && !defined( HAVE_WIN32_PTHREAD) | 10 | #if( defined( PLATFORM_XBOX) || defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC)) |
35 | //#pragma message ( "THREADAPI_WINDOWS" ) | 11 | //#pragma message ( "THREADAPI_WINDOWS" ) |
36 | #define THREADAPI THREADAPI_WINDOWS | 12 | #define THREADAPI THREADAPI_WINDOWS |
37 | #else // (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC) | 13 | #else // (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC) |
@@ -39,22 +15,24 @@ enum e_status { PENDING, RUNNING, WAITING, DONE, ERROR_ST, CANCELLED }; | |||
39 | #define THREADAPI THREADAPI_PTHREAD | 15 | #define THREADAPI THREADAPI_PTHREAD |
40 | #endif // (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC) | 16 | #endif // (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC) |
41 | 17 | ||
42 | /*---=== Locks & Signals ===--- | 18 | static constexpr int THREAD_PRIO_DEFAULT{ -999 }; |
43 | */ | ||
44 | 19 | ||
20 | // ################################################################################################## | ||
21 | // ################################################################################################## | ||
45 | #if THREADAPI == THREADAPI_WINDOWS | 22 | #if THREADAPI == THREADAPI_WINDOWS |
46 | #if defined( PLATFORM_XBOX) | 23 | |
47 | #include <xtl.h> | 24 | #if defined(PLATFORM_XBOX) |
48 | #else // !PLATFORM_XBOX | 25 | #include <xtl.h> |
49 | #define WIN32_LEAN_AND_MEAN | 26 | #else // !PLATFORM_XBOX |
50 | // CONDITION_VARIABLE needs version 0x0600+ | 27 | #define WIN32_LEAN_AND_MEAN |
51 | // _WIN32_WINNT value is already defined by MinGW, but not by MSVC | 28 | // CONDITION_VARIABLE needs version 0x0600+ |
52 | #ifndef _WIN32_WINNT | 29 | // _WIN32_WINNT value is already defined by MinGW, but not by MSVC |
53 | #define _WIN32_WINNT 0x0600 | 30 | #ifndef _WIN32_WINNT |
54 | #endif // _WIN32_WINNT | 31 | #define _WIN32_WINNT 0x0600 |
55 | #include <windows.h> | 32 | #endif // _WIN32_WINNT |
56 | #endif // !PLATFORM_XBOX | 33 | #include <windows.h> |
57 | #include <process.h> | 34 | #endif // !PLATFORM_XBOX |
35 | #include <process.h> | ||
58 | 36 | ||
59 | /* | 37 | /* |
60 | #define XSTR(x) STR(x) | 38 | #define XSTR(x) STR(x) |
@@ -62,204 +40,36 @@ enum e_status { PENDING, RUNNING, WAITING, DONE, ERROR_ST, CANCELLED }; | |||
62 | #pragma message( "The value of _WIN32_WINNT: " XSTR(_WIN32_WINNT)) | 40 | #pragma message( "The value of _WIN32_WINNT: " XSTR(_WIN32_WINNT)) |
63 | */ | 41 | */ |
64 | 42 | ||
65 | // MSDN: http://msdn2.microsoft.com/en-us/library/ms684254.aspx | 43 | static constexpr int THREAD_PRIO_MIN{ -3 }; |
66 | // | 44 | static constexpr int THREAD_PRIO_MAX{ +3 }; |
67 | // CRITICAL_SECTION can be used for simple code protection. Mutexes are | ||
68 | // needed for use with the SIGNAL system. | ||
69 | // | ||
70 | |||
71 | #if _WIN32_WINNT < 0x0600 // CONDITION_VARIABLE aren't available, use a signal | ||
72 | |||
73 | typedef struct | ||
74 | { | ||
75 | CRITICAL_SECTION signalCS; | ||
76 | CRITICAL_SECTION countCS; | ||
77 | HANDLE waitEvent; | ||
78 | HANDLE waitDoneEvent; | ||
79 | LONG waitersCount; | ||
80 | } SIGNAL_T; | ||
81 | |||
82 | |||
83 | #define MUTEX_T HANDLE | ||
84 | void MUTEX_INIT( MUTEX_T* ref); | ||
85 | void MUTEX_FREE( MUTEX_T* ref); | ||
86 | void MUTEX_LOCK( MUTEX_T* ref); | ||
87 | void MUTEX_UNLOCK( MUTEX_T* ref); | ||
88 | |||
89 | #else // CONDITION_VARIABLE are available, use them | ||
90 | |||
91 | #define SIGNAL_T CONDITION_VARIABLE | ||
92 | #define MUTEX_T CRITICAL_SECTION | ||
93 | #define MUTEX_INIT( ref) InitializeCriticalSection( ref) | ||
94 | #define MUTEX_FREE( ref) DeleteCriticalSection( ref) | ||
95 | #define MUTEX_LOCK( ref) EnterCriticalSection( ref) | ||
96 | #define MUTEX_UNLOCK( ref) LeaveCriticalSection( ref) | ||
97 | |||
98 | #endif // CONDITION_VARIABLE are available | ||
99 | |||
100 | #define MUTEX_RECURSIVE_INIT(ref) MUTEX_INIT(ref) /* always recursive in Win32 */ | ||
101 | |||
102 | typedef unsigned int THREAD_RETURN_T; | ||
103 | |||
104 | #define YIELD() Sleep(0) | ||
105 | #define THREAD_CALLCONV __stdcall | ||
106 | #else // THREADAPI == THREADAPI_PTHREAD | ||
107 | // PThread (Linux, OS X, ...) | ||
108 | |||
109 | // looks like some MinGW installations don't support PTW32_INCLUDE_WINDOWS_H, so let's include it ourselves, just in case | ||
110 | #if defined(PLATFORM_WIN32) | ||
111 | #include <windows.h> | ||
112 | #endif // PLATFORM_WIN32 | ||
113 | #include <pthread.h> | ||
114 | |||
115 | #ifdef PLATFORM_LINUX | ||
116 | #if defined(__GLIBC__) | ||
117 | # define _MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP | ||
118 | #else | ||
119 | # define _MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE | ||
120 | #endif | ||
121 | #else | ||
122 | /* OS X, ... */ | ||
123 | # define _MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE | ||
124 | #endif | ||
125 | |||
126 | #define MUTEX_T pthread_mutex_t | ||
127 | #define MUTEX_INIT(ref) pthread_mutex_init(ref,NULL) | ||
128 | #define MUTEX_RECURSIVE_INIT(ref) \ | ||
129 | { pthread_mutexattr_t a; pthread_mutexattr_init( &a ); \ | ||
130 | pthread_mutexattr_settype( &a, _MUTEX_RECURSIVE ); \ | ||
131 | pthread_mutex_init(ref,&a); pthread_mutexattr_destroy( &a ); \ | ||
132 | } | ||
133 | #define MUTEX_FREE(ref) pthread_mutex_destroy(ref) | ||
134 | #define MUTEX_LOCK(ref) pthread_mutex_lock(ref) | ||
135 | #define MUTEX_UNLOCK(ref) pthread_mutex_unlock(ref) | ||
136 | |||
137 | typedef void * THREAD_RETURN_T; | ||
138 | |||
139 | typedef pthread_cond_t SIGNAL_T; | ||
140 | |||
141 | void SIGNAL_ONE( SIGNAL_T *ref ); | ||
142 | |||
143 | // Yield is non-portable: | ||
144 | // | ||
145 | // OS X 10.4.8/9 has pthread_yield_np() | ||
146 | // Linux 2.4 has pthread_yield() if _GNU_SOURCE is #defined | ||
147 | // FreeBSD 6.2 has pthread_yield() | ||
148 | // ... | ||
149 | // | ||
150 | #if defined( PLATFORM_OSX) | ||
151 | #define YIELD() pthread_yield_np() | ||
152 | #else | ||
153 | #define YIELD() sched_yield() | ||
154 | #endif | ||
155 | #define THREAD_CALLCONV | ||
156 | #endif //THREADAPI == THREADAPI_PTHREAD | ||
157 | |||
158 | void SIGNAL_INIT( SIGNAL_T *ref ); | ||
159 | void SIGNAL_FREE( SIGNAL_T *ref ); | ||
160 | void SIGNAL_ALL( SIGNAL_T *ref ); | ||
161 | |||
162 | /* | ||
163 | * 'time_d': <0.0 for no timeout | ||
164 | * 0.0 for instant check | ||
165 | * >0.0 absolute timeout in secs + ms | ||
166 | */ | ||
167 | typedef double time_d; | ||
168 | time_d now_secs(void); | ||
169 | |||
170 | time_d SIGNAL_TIMEOUT_PREPARE( double rel_secs ); | ||
171 | |||
172 | bool_t SIGNAL_WAIT( SIGNAL_T *ref, MUTEX_T *mu, time_d timeout ); | ||
173 | |||
174 | |||
175 | /*---=== Threading ===--- | ||
176 | */ | ||
177 | |||
178 | #define THREAD_PRIO_DEFAULT (-999) | ||
179 | |||
180 | #if THREADAPI == THREADAPI_WINDOWS | ||
181 | |||
182 | typedef HANDLE THREAD_T; | ||
183 | # define THREAD_ISNULL( _h) (_h == 0) | ||
184 | void THREAD_CREATE( THREAD_T* ref, THREAD_RETURN_T (__stdcall *func)( void*), void* data, int prio /* -3..+3 */); | ||
185 | |||
186 | # define THREAD_PRIO_MIN (-3) | ||
187 | # define THREAD_PRIO_MAX (+3) | ||
188 | |||
189 | # define THREAD_CLEANUP_PUSH( cb_, val_) | ||
190 | # define THREAD_CLEANUP_POP( execute_) | ||
191 | 45 | ||
46 | // ################################################################################################## | ||
47 | // ################################################################################################## | ||
192 | #else // THREADAPI == THREADAPI_PTHREAD | 48 | #else // THREADAPI == THREADAPI_PTHREAD |
49 | // ################################################################################################## | ||
50 | // ################################################################################################## | ||
193 | 51 | ||
194 | /* Platforms that have a timed 'pthread_join()' can get away with a simpler | 52 | // PThread (Linux, OS X, ...) |
195 | * implementation. Others will use a condition variable. | ||
196 | */ | ||
197 | # if defined __WINPTHREADS_VERSION | ||
198 | //# define USE_PTHREAD_TIMEDJOIN | ||
199 | # endif // __WINPTHREADS_VERSION | ||
200 | |||
201 | # ifdef USE_PTHREAD_TIMEDJOIN | ||
202 | # ifdef PLATFORM_OSX | ||
203 | # error "No 'pthread_timedjoin()' on this system" | ||
204 | # else | ||
205 | /* Linux, ... */ | ||
206 | # define PTHREAD_TIMEDJOIN pthread_timedjoin_np | ||
207 | # endif | ||
208 | # endif | ||
209 | |||
210 | typedef pthread_t THREAD_T; | ||
211 | # define THREAD_ISNULL( _h) 0 // pthread_t may be a structure: never 'null' by itself | ||
212 | |||
213 | void THREAD_CREATE( THREAD_T* ref, THREAD_RETURN_T (*func)( void*), void* data, int prio /* -3..+3 */); | ||
214 | 53 | ||
215 | # if defined(PLATFORM_LINUX) | 54 | // looks like some MinGW installations don't support PTW32_INCLUDE_WINDOWS_H, so let's include it ourselves, just in case |
216 | extern volatile bool_t sudo; | 55 | #if defined(PLATFORM_WIN32) |
217 | # ifdef LINUX_SCHED_RR | 56 | #include <windows.h> |
218 | # define THREAD_PRIO_MIN (sudo ? -3 : 0) | 57 | #endif // PLATFORM_WIN32 |
219 | # else | 58 | #include <pthread.h> |
220 | # define THREAD_PRIO_MIN (0) | ||
221 | # endif | ||
222 | # define THREAD_PRIO_MAX (sudo ? +3 : 0) | ||
223 | # else | ||
224 | # define THREAD_PRIO_MIN (-3) | ||
225 | # define THREAD_PRIO_MAX (+3) | ||
226 | # endif | ||
227 | |||
228 | # if THREADWAIT_METHOD == THREADWAIT_CONDVAR | ||
229 | # define THREAD_CLEANUP_PUSH( cb_, val_) pthread_cleanup_push( cb_, val_) | ||
230 | # define THREAD_CLEANUP_POP( execute_) pthread_cleanup_pop( execute_) | ||
231 | # else | ||
232 | # define THREAD_CLEANUP_PUSH( cb_, val_) { | ||
233 | # define THREAD_CLEANUP_POP( execute_) } | ||
234 | # endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR | ||
235 | #endif // THREADAPI == THREADAPI_WINDOWS | ||
236 | |||
237 | /* | ||
238 | * Win32 and PTHREAD_TIMEDJOIN allow waiting for a thread with a timeout. | ||
239 | * Posix without PTHREAD_TIMEDJOIN needs to use a condition variable approach. | ||
240 | */ | ||
241 | #define THREADWAIT_TIMEOUT 1 | ||
242 | #define THREADWAIT_CONDVAR 2 | ||
243 | |||
244 | #if THREADAPI == THREADAPI_WINDOWS || (defined PTHREAD_TIMEDJOIN) | ||
245 | #define THREADWAIT_METHOD THREADWAIT_TIMEOUT | ||
246 | #else // THREADAPI == THREADAPI_WINDOWS || (defined PTHREAD_TIMEDJOIN) | ||
247 | #define THREADWAIT_METHOD THREADWAIT_CONDVAR | ||
248 | #endif // THREADAPI == THREADAPI_WINDOWS || (defined PTHREAD_TIMEDJOIN) | ||
249 | 59 | ||
60 | #if defined(PLATFORM_LINUX) && !defined(LINUX_SCHED_RR) | ||
61 | static constexpr int THREAD_PRIO_MIN{ 0 }; | ||
62 | #else | ||
63 | static constexpr int THREAD_PRIO_MIN{ -3 }; | ||
64 | #endif | ||
65 | static constexpr int THREAD_PRIO_MAX{ +3 }; | ||
250 | 66 | ||
251 | #if THREADWAIT_METHOD == THREADWAIT_TIMEOUT | 67 | #endif // THREADAPI == THREADAPI_PTHREAD |
252 | bool_t THREAD_WAIT_IMPL( THREAD_T *ref, double secs); | 68 | // ################################################################################################## |
253 | #define THREAD_WAIT( a, b, c, d, e) THREAD_WAIT_IMPL( a, b) | 69 | // ################################################################################################## |
254 | #else // THREADWAIT_METHOD == THREADWAIT_CONDVAR | ||
255 | bool_t THREAD_WAIT_IMPL( THREAD_T *ref, double secs, SIGNAL_T *signal_ref, MUTEX_T *mu_ref, volatile enum e_status *st_ref); | ||
256 | #define THREAD_WAIT THREAD_WAIT_IMPL | ||
257 | #endif // // THREADWAIT_METHOD == THREADWAIT_CONDVAR | ||
258 | 70 | ||
259 | void THREAD_KILL( THREAD_T* ref); | 71 | void THREAD_SETNAME(char const* _name); |
260 | void THREAD_SETNAME( char const* _name); | 72 | void THREAD_SET_PRIORITY(int prio_, bool sudo_); |
261 | void THREAD_MAKE_ASYNCH_CANCELLABLE(); | 73 | void THREAD_SET_AFFINITY(unsigned int aff); |
262 | void THREAD_SET_PRIORITY( int prio); | ||
263 | void THREAD_SET_AFFINITY( unsigned int aff); | ||
264 | 74 | ||
265 | #endif // __threading_h__ | 75 | void JTHREAD_SET_PRIORITY(std::jthread& thread_, int prio_, bool sudo_); |
diff --git a/src/threading_osx.h b/src/threading_osx.h index 93da8c3..f4d41e0 100644 --- a/src/threading_osx.h +++ b/src/threading_osx.h | |||
@@ -2,8 +2,7 @@ | |||
2 | * THREADING_OSX.H | 2 | * THREADING_OSX.H |
3 | * http://yyshen.github.io/2015/01/18/binding_threads_to_cores_osx.html | 3 | * http://yyshen.github.io/2015/01/18/binding_threads_to_cores_osx.html |
4 | */ | 4 | */ |
5 | #ifndef __threading_osx_h__ | 5 | #pragma once |
6 | #define __threading_osx_h__ 1 | ||
7 | 6 | ||
8 | #include <mach/mach_types.h> | 7 | #include <mach/mach_types.h> |
9 | #include <mach/thread_act.h> | 8 | #include <mach/thread_act.h> |
@@ -11,15 +10,16 @@ | |||
11 | 10 | ||
12 | #define SYSCTL_CORE_COUNT "machdep.cpu.core_count" | 11 | #define SYSCTL_CORE_COUNT "machdep.cpu.core_count" |
13 | 12 | ||
14 | typedef struct cpu_set { | 13 | struct cpu_set_t |
14 | { | ||
15 | uint32_t count; | 15 | uint32_t count; |
16 | } cpu_set_t; | 16 | } ; |
17 | 17 | ||
18 | static inline void CPU_ZERO(cpu_set_t *cs) { cs->count = 0; } | 18 | static inline void CPU_ZERO(cpu_set_t *cs) { cs->count = 0; } |
19 | static inline void CPU_SET(int num, cpu_set_t *cs) { cs->count |= (1 << num); } | 19 | static inline void CPU_SET(int num, cpu_set_t *cs) { cs->count |= (1 << num); } |
20 | static inline int CPU_ISSET(int num, cpu_set_t *cs) { return (cs->count & (1 << num)); } | 20 | [[nodiscard]] static inline int CPU_ISSET(int num, cpu_set_t *cs) { return (cs->count & (1 << num)); } |
21 | 21 | ||
22 | int sched_getaffinity(pid_t pid, size_t cpu_size, cpu_set_t *cpu_set) | 22 | [[nodiscard]] int sched_getaffinity(pid_t pid, size_t cpu_size, cpu_set_t *cpu_set) |
23 | { | 23 | { |
24 | int32_t core_count = 0; | 24 | int32_t core_count = 0; |
25 | size_t len = sizeof(core_count); | 25 | size_t len = sizeof(core_count); |
@@ -38,7 +38,7 @@ int sched_getaffinity(pid_t pid, size_t cpu_size, cpu_set_t *cpu_set) | |||
38 | return 0; | 38 | return 0; |
39 | } | 39 | } |
40 | 40 | ||
41 | int pthread_setaffinity_np(pthread_t thread, size_t cpu_size, cpu_set_t *cpu_set) | 41 | [[nodiscard]] int pthread_setaffinity_np(pthread_t thread, size_t cpu_size, cpu_set_t *cpu_set) |
42 | { | 42 | { |
43 | thread_port_t mach_thread; | 43 | thread_port_t mach_thread; |
44 | int core = 0; | 44 | int core = 0; |
@@ -56,4 +56,3 @@ int pthread_setaffinity_np(pthread_t thread, size_t cpu_size, cpu_set_t *cpu_set | |||
56 | return 0; | 56 | return 0; |
57 | } | 57 | } |
58 | 58 | ||
59 | #endif | ||
diff --git a/src/tools.c b/src/tools.cpp index c43d8a2..a0a3018 100644 --- a/src/tools.c +++ b/src/tools.cpp | |||
@@ -31,24 +31,12 @@ THE SOFTWARE. | |||
31 | =============================================================================== | 31 | =============================================================================== |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include <stdio.h> | ||
35 | #include <assert.h> | ||
36 | #include <string.h> | ||
37 | #include <ctype.h> | ||
38 | #include <stdlib.h> | ||
39 | #if !defined(__APPLE__) | ||
40 | #include <malloc.h> | ||
41 | #endif // __APPLE__ | ||
42 | |||
43 | #include "tools.h" | 34 | #include "tools.h" |
44 | #include "compat.h" | 35 | |
45 | #include "universe.h" | 36 | #include "universe.h" |
46 | #include "keeper.h" | ||
47 | #include "lanes.h" | ||
48 | #include "uniquekey.h" | ||
49 | 37 | ||
50 | // functions implemented in deep.c | 38 | // functions implemented in deep.c |
51 | extern bool_t copydeep( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, LookupMode mode_, char const* upName_); | 39 | extern bool copydeep(Universe* U, Dest L2, int L2_cache_i, Source L, int i, LookupMode mode_, char const* upName_); |
52 | extern void push_registry_subtable( lua_State* L, UniqueKey key_); | 40 | extern void push_registry_subtable( lua_State* L, UniqueKey key_); |
53 | 41 | ||
54 | DEBUGSPEW_CODE( char const* debugspew_indent = "----+----!----+----!----+----!----+----!----+----!----+----!----+----!----+"); | 42 | DEBUGSPEW_CODE( char const* debugspew_indent = "----+----!----+----!----+----!----+----!----+----!----+----!----+----!----+"); |
@@ -61,32 +49,32 @@ DEBUGSPEW_CODE( char const* debugspew_indent = "----+----!----+----!----+----!-- | |||
61 | */ | 49 | */ |
62 | void push_registry_subtable_mode( lua_State* L, UniqueKey key_, const char* mode_) | 50 | void push_registry_subtable_mode( lua_State* L, UniqueKey key_, const char* mode_) |
63 | { | 51 | { |
64 | STACK_GROW( L, 3); | 52 | STACK_GROW(L, 3); |
65 | STACK_CHECK( L, 0); | 53 | STACK_CHECK_START_REL(L, 0); |
66 | 54 | ||
67 | REGISTRY_GET( L, key_); // {}|nil | 55 | key_.pushValue(L); // {}|nil |
68 | STACK_MID( L, 1); | 56 | STACK_CHECK(L, 1); |
69 | 57 | ||
70 | if( lua_isnil( L, -1)) | 58 | if (lua_isnil(L, -1)) |
71 | { | 59 | { |
72 | lua_pop( L, 1); // | 60 | lua_pop(L, 1); // |
73 | lua_newtable( L); // {} | 61 | lua_newtable(L); // {} |
74 | // _R[key_] = {} | 62 | // _R[key_] = {} |
75 | REGISTRY_SET( L, key_, lua_pushvalue( L, -2)); // {} | 63 | key_.setValue(L, [](lua_State* L) { lua_pushvalue(L, -2); }); // {} |
76 | STACK_MID( L, 1); | 64 | STACK_CHECK(L, 1); |
77 | 65 | ||
78 | // Set its metatable if requested | 66 | // Set its metatable if requested |
79 | if( mode_) | 67 | if (mode_) |
80 | { | 68 | { |
81 | lua_newtable( L); // {} mt | 69 | lua_newtable(L); // {} mt |
82 | lua_pushliteral( L, "__mode"); // {} mt "__mode" | 70 | lua_pushliteral(L, "__mode"); // {} mt "__mode" |
83 | lua_pushstring( L, mode_); // {} mt "__mode" mode | 71 | lua_pushstring(L, mode_); // {} mt "__mode" mode |
84 | lua_rawset( L, -3); // {} mt | 72 | lua_rawset(L, -3); // {} mt |
85 | lua_setmetatable( L, -2); // {} | 73 | lua_setmetatable(L, -2); // {} |
86 | } | 74 | } |
87 | } | 75 | } |
88 | STACK_END( L, 1); | 76 | STACK_CHECK(L, 1); |
89 | ASSERT_L( lua_istable( L, -1)); | 77 | ASSERT_L(lua_istable(L, -1)); |
90 | } | 78 | } |
91 | 79 | ||
92 | // ################################################################################################ | 80 | // ################################################################################################ |
@@ -97,7 +85,7 @@ void push_registry_subtable_mode( lua_State* L, UniqueKey key_, const char* mode | |||
97 | */ | 85 | */ |
98 | void push_registry_subtable( lua_State* L, UniqueKey key_) | 86 | void push_registry_subtable( lua_State* L, UniqueKey key_) |
99 | { | 87 | { |
100 | push_registry_subtable_mode( L, key_, NULL); | 88 | push_registry_subtable_mode(L, key_, nullptr); |
101 | } | 89 | } |
102 | 90 | ||
103 | // ################################################################################################ | 91 | // ################################################################################################ |
@@ -116,7 +104,7 @@ void luaG_dump( lua_State* L) | |||
116 | 104 | ||
117 | for( i = 1; i <= top; ++ i) | 105 | for( i = 1; i <= top; ++ i) |
118 | { | 106 | { |
119 | int type = lua_type( L, i); | 107 | LuaType type{ lua_type_as_enum(L, i) }; |
120 | 108 | ||
121 | fprintf( stderr, "\t[%d]= (%s) ", i, lua_typename( L, type)); | 109 | fprintf( stderr, "\t[%d]= (%s) ", i, lua_typename( L, type)); |
122 | 110 | ||
@@ -125,7 +113,7 @@ void luaG_dump( lua_State* L) | |||
125 | // Note: this requires 'tostring()' to be defined. If it is NOT, | 113 | // Note: this requires 'tostring()' to be defined. If it is NOT, |
126 | // enable it for more debugging. | 114 | // enable it for more debugging. |
127 | // | 115 | // |
128 | STACK_CHECK( L, 0); | 116 | STACK_CHECK_START_REL(L, 0); |
129 | STACK_GROW( L, 2); | 117 | STACK_GROW( L, 2); |
130 | 118 | ||
131 | lua_getglobal( L, "tostring"); | 119 | lua_getglobal( L, "tostring"); |
@@ -146,7 +134,7 @@ void luaG_dump( lua_State* L) | |||
146 | fprintf( stderr, "%s", lua_tostring( L, -1)); | 134 | fprintf( stderr, "%s", lua_tostring( L, -1)); |
147 | } | 135 | } |
148 | lua_pop( L, 1); | 136 | lua_pop( L, 1); |
149 | STACK_END( L, 0); | 137 | STACK_CHECK( L, 0); |
150 | fprintf( stderr, "\n"); | 138 | fprintf( stderr, "\n"); |
151 | } | 139 | } |
152 | fprintf( stderr, "\n"); | 140 | fprintf( stderr, "\n"); |
@@ -156,122 +144,97 @@ void luaG_dump( lua_State* L) | |||
156 | // ################################################################################################ | 144 | // ################################################################################################ |
157 | 145 | ||
158 | // same as PUC-Lua l_alloc | 146 | // same as PUC-Lua l_alloc |
159 | static void* libc_lua_Alloc(void* ud, void* ptr, size_t osize, size_t nsize) | 147 | extern "C" [[nodiscard]] static void* libc_lua_Alloc([[maybe_unused]] void* ud, [[maybe_unused]] void* ptr_, [[maybe_unused]] size_t osize_, size_t nsize_) |
160 | { | 148 | { |
161 | (void)ud; (void)osize; /* not used */ | 149 | if (nsize_ == 0) |
162 | if (nsize == 0) | ||
163 | { | 150 | { |
164 | free(ptr); | 151 | free(ptr_); |
165 | return NULL; | 152 | return nullptr; |
166 | } | 153 | } |
167 | else | 154 | else |
168 | { | 155 | { |
169 | return realloc(ptr, nsize); | 156 | return realloc(ptr_, nsize_); |
170 | } | 157 | } |
171 | } | 158 | } |
172 | 159 | ||
173 | static void* protected_lua_Alloc( void *ud, void *ptr, size_t osize, size_t nsize) | 160 | // ################################################################################################# |
174 | { | ||
175 | void* p; | ||
176 | ProtectedAllocator* s = (ProtectedAllocator*) ud; | ||
177 | MUTEX_LOCK( &s->lock); | ||
178 | p = s->definition.allocF( s->definition.allocUD, ptr, osize, nsize); | ||
179 | MUTEX_UNLOCK( &s->lock); | ||
180 | return p; | ||
181 | } | ||
182 | 161 | ||
183 | static int luaG_provide_protected_allocator( lua_State* L) | 162 | [[nodiscard]] static int luaG_provide_protected_allocator(lua_State* L) |
184 | { | 163 | { |
185 | Universe* U = universe_get( L); | 164 | Universe* const U{ universe_get(L) }; |
186 | AllocatorDefinition* const def = lua_newuserdatauv( L, sizeof(AllocatorDefinition), 0); | 165 | // push a new full userdata on the stack, giving access to the universe's protected allocator |
187 | def->allocF = protected_lua_Alloc; | 166 | [[maybe_unused]] AllocatorDefinition* const def{ new (L) AllocatorDefinition{ U->protected_allocator.makeDefinition() } }; |
188 | def->allocUD = &U->protected_allocator; | ||
189 | return 1; | 167 | return 1; |
190 | } | 168 | } |
191 | 169 | ||
170 | // ################################################################################################# | ||
171 | |||
192 | // called once at the creation of the universe (therefore L is the master Lua state everything originates from) | 172 | // called once at the creation of the universe (therefore L is the master Lua state everything originates from) |
193 | // Do I need to disable this when compiling for LuaJIT to prevent issues? | 173 | // Do I need to disable this when compiling for LuaJIT to prevent issues? |
194 | void initialize_allocator_function( Universe* U, lua_State* L) | 174 | void initialize_allocator_function(Universe* U, lua_State* L) |
195 | { | 175 | { |
196 | STACK_CHECK( L, 0); | 176 | STACK_CHECK_START_REL(L, 1); // settings |
197 | lua_getfield( L, -1, "allocator"); // settings allocator|nil|"protected" | 177 | lua_getfield(L, -1, "allocator"); // settings allocator|nil|"protected" |
198 | if( !lua_isnil( L, -1)) | 178 | if (!lua_isnil(L, -1)) |
199 | { | 179 | { |
200 | // store C function pointer in an internal variable | 180 | // store C function pointer in an internal variable |
201 | U->provide_allocator = lua_tocfunction( L, -1); // settings allocator | 181 | U->provide_allocator = lua_tocfunction(L, -1); // settings allocator |
202 | if( U->provide_allocator != NULL) | 182 | if (U->provide_allocator != nullptr) |
203 | { | 183 | { |
204 | // make sure the function doesn't have upvalues | 184 | // make sure the function doesn't have upvalues |
205 | char const* upname = lua_getupvalue( L, -1, 1); // settings allocator upval? | 185 | char const* upname = lua_getupvalue(L, -1, 1); // settings allocator upval? |
206 | if( upname != NULL) // should be "" for C functions with upvalues if any | 186 | if (upname != nullptr) // should be "" for C functions with upvalues if any |
207 | { | 187 | { |
208 | (void) luaL_error( L, "config.allocator() shouldn't have upvalues"); | 188 | (void) luaL_error(L, "config.allocator() shouldn't have upvalues"); |
209 | } | 189 | } |
210 | // remove this C function from the config table so that it doesn't cause problems | 190 | // remove this C function from the config table so that it doesn't cause problems |
211 | // when we transfer the config table in newly created Lua states | 191 | // when we transfer the config table in newly created Lua states |
212 | lua_pushnil( L); // settings allocator nil | 192 | lua_pushnil(L); // settings allocator nil |
213 | lua_setfield( L, -3, "allocator"); // settings allocator | 193 | lua_setfield(L, -3, "allocator"); // settings allocator |
214 | } | 194 | } |
215 | else if( lua_type( L, -1) == LUA_TSTRING) // should be "protected" | 195 | else if (lua_type(L, -1) == LUA_TSTRING) // should be "protected" |
216 | { | 196 | { |
217 | // initialize all we need for the protected allocator | 197 | ASSERT_L(strcmp(lua_tostring(L, -1), "protected") == 0); |
218 | MUTEX_INIT( &U->protected_allocator.lock); // the mutex | 198 | // set the original allocator to call from inside protection by the mutex |
219 | // and the original allocator to call from inside protection by the mutex | 199 | U->protected_allocator.initFrom(L); |
220 | U->protected_allocator.definition.allocF = lua_getallocf( L, &U->protected_allocator.definition.allocUD); | 200 | U->protected_allocator.installIn(L); |
221 | // before a state is created, this function will be called to obtain the allocator | 201 | // before a state is created, this function will be called to obtain the allocator |
222 | U->provide_allocator = luaG_provide_protected_allocator; | 202 | U->provide_allocator = luaG_provide_protected_allocator; |
223 | |||
224 | lua_setallocf( L, protected_lua_Alloc, &U->protected_allocator); | ||
225 | } | 203 | } |
226 | } | 204 | } |
227 | else | 205 | else |
228 | { | 206 | { |
229 | // initialize the mutex even if we are not going to use it, because cleanup_allocator_function will deinitialize it | ||
230 | MUTEX_INIT( &U->protected_allocator.lock); | ||
231 | // just grab whatever allocator was provided to lua_newstate | 207 | // just grab whatever allocator was provided to lua_newstate |
232 | U->protected_allocator.definition.allocF = lua_getallocf( L, &U->protected_allocator.definition.allocUD); | 208 | U->protected_allocator.initFrom(L); |
233 | } | 209 | } |
234 | lua_pop( L, 1); // settings | 210 | lua_pop(L, 1); // settings |
235 | STACK_MID(L, 0); | 211 | STACK_CHECK(L, 1); |
236 | 212 | ||
237 | lua_getfield( L, -1, "internal_allocator"); // settings "libc"|"allocator" | 213 | lua_getfield(L, -1, "internal_allocator"); // settings "libc"|"allocator" |
238 | { | 214 | { |
239 | char const* allocator = lua_tostring( L, -1); | 215 | char const* allocator = lua_tostring(L, -1); |
240 | if (strcmp(allocator, "libc") == 0) | 216 | if (strcmp(allocator, "libc") == 0) |
241 | { | 217 | { |
242 | U->internal_allocator.allocF = libc_lua_Alloc; | 218 | U->internal_allocator = AllocatorDefinition{ libc_lua_Alloc, nullptr }; |
243 | U->internal_allocator.allocUD = NULL; | ||
244 | } | 219 | } |
245 | else if (U->provide_allocator == luaG_provide_protected_allocator) | 220 | else if (U->provide_allocator == luaG_provide_protected_allocator) |
246 | { | 221 | { |
247 | // user wants mutex protection on the state's allocator. Use protection for our own allocations too, just in case. | 222 | // user wants mutex protection on the state's allocator. Use protection for our own allocations too, just in case. |
248 | U->internal_allocator.allocF = lua_getallocf(L, &U->internal_allocator.allocUD); | 223 | U->internal_allocator = U->protected_allocator.makeDefinition(); |
249 | } | 224 | } |
250 | else | 225 | else |
251 | { | 226 | { |
252 | // no protection required, just use whatever we have as-is. | 227 | // no protection required, just use whatever we have as-is. |
253 | U->internal_allocator = U->protected_allocator.definition; | 228 | U->internal_allocator = U->protected_allocator; |
254 | } | 229 | } |
255 | } | 230 | } |
256 | lua_pop( L, 1); // settings | 231 | lua_pop(L, 1); // settings |
257 | STACK_END( L, 0); | 232 | STACK_CHECK(L, 1); |
258 | } | ||
259 | |||
260 | void cleanup_allocator_function( Universe* U, lua_State* L) | ||
261 | { | ||
262 | // remove the protected allocator, if any | ||
263 | if( U->protected_allocator.definition.allocF != NULL) | ||
264 | { | ||
265 | // install the non-protected allocator | ||
266 | lua_setallocf( L, U->protected_allocator.definition.allocF, U->protected_allocator.definition.allocUD); | ||
267 | // release the mutex | ||
268 | MUTEX_FREE( &U->protected_allocator.lock); | ||
269 | } | ||
270 | } | 233 | } |
271 | 234 | ||
272 | // ################################################################################################ | 235 | // ################################################################################################ |
273 | 236 | ||
274 | static int dummy_writer( lua_State* L, void const* p, size_t sz, void* ud) | 237 | [[nodiscard]] static int dummy_writer(lua_State* L, void const* p, size_t sz, void* ud) |
275 | { | 238 | { |
276 | (void)L; (void)p; (void)sz; (void) ud; // unused | 239 | (void)L; (void)p; (void)sz; (void) ud; // unused |
277 | return 666; | 240 | return 666; |
@@ -287,18 +250,18 @@ static int dummy_writer( lua_State* L, void const* p, size_t sz, void* ud) | |||
287 | * +-----------------+----------+------------+----------+ | 250 | * +-----------------+----------+------------+----------+ |
288 | * | lua_topointer | | | | | 251 | * | lua_topointer | | | | |
289 | * +-----------------+----------+------------+----------+ | 252 | * +-----------------+----------+------------+----------+ |
290 | * | lua_tocfunction | NULL | | NULL | | 253 | * | lua_tocfunction | nullptr | | nullptr | |
291 | * +-----------------+----------+------------+----------+ | 254 | * +-----------------+----------+------------+----------+ |
292 | * | lua_dump | 666 | 1 | 1 | | 255 | * | lua_dump | 666 | 1 | 1 | |
293 | * +-----------------+----------+------------+----------+ | 256 | * +-----------------+----------+------------+----------+ |
294 | */ | 257 | */ |
295 | 258 | ||
296 | typedef enum | 259 | enum FuncSubType |
297 | { | 260 | { |
298 | FST_Bytecode, | 261 | FST_Bytecode, |
299 | FST_Native, | 262 | FST_Native, |
300 | FST_FastJIT | 263 | FST_FastJIT |
301 | } FuncSubType; | 264 | } ; |
302 | 265 | ||
303 | FuncSubType luaG_getfuncsubtype( lua_State *L, int _i) | 266 | FuncSubType luaG_getfuncsubtype( lua_State *L, int _i) |
304 | { | 267 | { |
@@ -316,7 +279,7 @@ FuncSubType luaG_getfuncsubtype( lua_State *L, int _i) | |||
316 | // the provided writer fails with code 666 | 279 | // the provided writer fails with code 666 |
317 | // therefore, anytime we get 666, this means that lua_dump() attempted a dump | 280 | // therefore, anytime we get 666, this means that lua_dump() attempted a dump |
318 | // all other cases mean this is either a C or LuaJIT-fast function | 281 | // all other cases mean this is either a C or LuaJIT-fast function |
319 | dumpres = lua504_dump( L, dummy_writer, NULL, 0); | 282 | dumpres = lua504_dump(L, dummy_writer, nullptr, 0); |
320 | lua_pop( L, mustpush); | 283 | lua_pop( L, mustpush); |
321 | if( dumpres == 666) | 284 | if( dumpres == 666) |
322 | { | 285 | { |
@@ -326,7 +289,9 @@ FuncSubType luaG_getfuncsubtype( lua_State *L, int _i) | |||
326 | return FST_FastJIT; | 289 | return FST_FastJIT; |
327 | } | 290 | } |
328 | 291 | ||
329 | static lua_CFunction luaG_tocfunction( lua_State *L, int _i, FuncSubType *_out) | 292 | // ################################################################################################# |
293 | |||
294 | [[nodiscard]] static lua_CFunction luaG_tocfunction(lua_State* L, int _i, FuncSubType* _out) | ||
330 | { | 295 | { |
331 | lua_CFunction p = lua_tocfunction( L, _i); | 296 | lua_CFunction p = lua_tocfunction( L, _i); |
332 | *_out = luaG_getfuncsubtype( L, _i); | 297 | *_out = luaG_getfuncsubtype( L, _i); |
@@ -334,14 +299,16 @@ static lua_CFunction luaG_tocfunction( lua_State *L, int _i, FuncSubType *_out) | |||
334 | } | 299 | } |
335 | 300 | ||
336 | // crc64/we of string "LOOKUPCACHE_REGKEY" generated at http://www.nitrxgen.net/hashgen/ | 301 | // crc64/we of string "LOOKUPCACHE_REGKEY" generated at http://www.nitrxgen.net/hashgen/ |
337 | static DECLARE_CONST_UNIQUE_KEY( LOOKUPCACHE_REGKEY, 0x837a68dfc6fcb716); | 302 | static constexpr UniqueKey LOOKUPCACHE_REGKEY{ 0x837a68dfc6fcb716ull }; |
303 | |||
304 | // ################################################################################################# | ||
338 | 305 | ||
339 | // inspired from tconcat() in ltablib.c | 306 | // inspired from tconcat() in ltablib.c |
340 | static char const* luaG_pushFQN( lua_State* L, int t, int last, size_t* length) | 307 | [[nodiscard]] static char const* luaG_pushFQN(lua_State* L, int t, int last, size_t* length) |
341 | { | 308 | { |
342 | int i = 1; | 309 | int i = 1; |
343 | luaL_Buffer b; | 310 | luaL_Buffer b; |
344 | STACK_CHECK( L, 0); | 311 | STACK_CHECK_START_REL(L, 0); |
345 | // Lua 5.4 pushes &b as light userdata on the stack. be aware of it... | 312 | // Lua 5.4 pushes &b as light userdata on the stack. be aware of it... |
346 | luaL_buffinit( L, &b); // ... {} ... &b? | 313 | luaL_buffinit( L, &b); // ... {} ... &b? |
347 | for( ; i < last; ++ i) | 314 | for( ; i < last; ++ i) |
@@ -357,10 +324,12 @@ static char const* luaG_pushFQN( lua_State* L, int t, int last, size_t* length) | |||
357 | } | 324 | } |
358 | // &b is popped at that point (-> replaced by the result) | 325 | // &b is popped at that point (-> replaced by the result) |
359 | luaL_pushresult( &b); // ... {} ... "<result>" | 326 | luaL_pushresult( &b); // ... {} ... "<result>" |
360 | STACK_END( L, 1); | 327 | STACK_CHECK( L, 1); |
361 | return lua_tolstring( L, -1, length); | 328 | return lua_tolstring( L, -1, length); |
362 | } | 329 | } |
363 | 330 | ||
331 | // ################################################################################################# | ||
332 | |||
364 | /* | 333 | /* |
365 | * receives 2 arguments: a name k and an object o | 334 | * receives 2 arguments: a name k and an object o |
366 | * add two entries ["fully.qualified.name"] = o | 335 | * add two entries ["fully.qualified.name"] = o |
@@ -369,7 +338,7 @@ static char const* luaG_pushFQN( lua_State* L, int t, int last, size_t* length) | |||
369 | * if we already had an entry of type [o] = ..., replace the name if the new one is shorter | 338 | * if we already had an entry of type [o] = ..., replace the name if the new one is shorter |
370 | * pops the processed object from the stack | 339 | * pops the processed object from the stack |
371 | */ | 340 | */ |
372 | static void update_lookup_entry( DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State* L, int _ctx_base, int _depth) | 341 | static void update_lookup_entry(DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State* L, int _ctx_base, int _depth) |
373 | { | 342 | { |
374 | // slot 1 in the stack contains the table that receives everything we found | 343 | // slot 1 in the stack contains the table that receives everything we found |
375 | int const dest = _ctx_base; | 344 | int const dest = _ctx_base; |
@@ -378,22 +347,22 @@ static void update_lookup_entry( DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State* | |||
378 | 347 | ||
379 | size_t prevNameLength, newNameLength; | 348 | size_t prevNameLength, newNameLength; |
380 | char const* prevName; | 349 | char const* prevName; |
381 | DEBUGSPEW_CODE( char const *newName); | 350 | DEBUGSPEW_CODE(char const *newName); |
382 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "update_lookup_entry()\n" INDENT_END)); | 351 | DEBUGSPEW_CODE(fprintf( stderr, INDENT_BEGIN "update_lookup_entry()\n" INDENT_END)); |
383 | DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); | 352 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed)); |
384 | 353 | ||
385 | STACK_CHECK( L, 0); | 354 | STACK_CHECK_START_REL(L, 0); |
386 | // first, raise an error if the function is already known | 355 | // first, raise an error if the function is already known |
387 | lua_pushvalue( L, -1); // ... {bfc} k o o | 356 | lua_pushvalue( L, -1); // ... {bfc} k o o |
388 | lua_rawget( L, dest); // ... {bfc} k o name? | 357 | lua_rawget( L, dest); // ... {bfc} k o name? |
389 | prevName = lua_tolstring( L, -1, &prevNameLength); // NULL if we got nil (first encounter of this object) | 358 | prevName = lua_tolstring( L, -1, &prevNameLength); // nullptr if we got nil (first encounter of this object) |
390 | // push name in fqn stack (note that concatenation will crash if name is a not string or a number) | 359 | // push name in fqn stack (note that concatenation will crash if name is a not string or a number) |
391 | lua_pushvalue( L, -3); // ... {bfc} k o name? k | 360 | lua_pushvalue( L, -3); // ... {bfc} k o name? k |
392 | ASSERT_L( lua_type( L, -1) == LUA_TNUMBER || lua_type( L, -1) == LUA_TSTRING); | 361 | ASSERT_L( lua_type( L, -1) == LUA_TNUMBER || lua_type( L, -1) == LUA_TSTRING); |
393 | ++ _depth; | 362 | ++ _depth; |
394 | lua_rawseti( L, fqn, _depth); // ... {bfc} k o name? | 363 | lua_rawseti( L, fqn, _depth); // ... {bfc} k o name? |
395 | // generate name | 364 | // generate name |
396 | DEBUGSPEW_CODE( newName =) luaG_pushFQN( L, fqn, _depth, &newNameLength); // ... {bfc} k o name? "f.q.n" | 365 | DEBUGSPEW_OR_NOT(newName, std::ignore) = luaG_pushFQN(L, fqn, _depth, &newNameLength);// ... {bfc} k o name? "f.q.n" |
397 | // Lua 5.2 introduced a hash randomizer seed which causes table iteration to yield a different key order | 366 | // Lua 5.2 introduced a hash randomizer seed which causes table iteration to yield a different key order |
398 | // on different VMs even when the tables are populated the exact same way. | 367 | // on different VMs even when the tables are populated the exact same way. |
399 | // When Lua is built with compatibility options (such as LUA_COMPAT_ALL), | 368 | // When Lua is built with compatibility options (such as LUA_COMPAT_ALL), |
@@ -403,7 +372,7 @@ static void update_lookup_entry( DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State* | |||
403 | // Also, nothing prevents any external module from exposing a given object under several names, so... | 372 | // Also, nothing prevents any external module from exposing a given object under several names, so... |
404 | // Therefore, when we encounter an object for which a name was previously registered, we need to select the names | 373 | // Therefore, when we encounter an object for which a name was previously registered, we need to select the names |
405 | // based on some sorting order so that we end up with the same name in all databases whatever order the table walk yielded | 374 | // based on some sorting order so that we end up with the same name in all databases whatever order the table walk yielded |
406 | if( prevName != NULL && (prevNameLength < newNameLength || lua_lessthan( L, -2, -1))) | 375 | if (prevName != nullptr && (prevNameLength < newNameLength || lua_lessthan(L, -2, -1))) |
407 | { | 376 | { |
408 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%s '%s' remained named '%s'\n" INDENT_END, lua_typename( L, lua_type( L, -3)), newName, prevName)); | 377 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%s '%s' remained named '%s'\n" INDENT_END, lua_typename( L, lua_type( L, -3)), newName, prevName)); |
409 | // the previous name is 'smaller' than the one we just generated: keep it! | 378 | // the previous name is 'smaller' than the one we just generated: keep it! |
@@ -439,11 +408,13 @@ static void update_lookup_entry( DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State* | |||
439 | lua_rawseti( L, fqn, _depth); // ... {bfc} k | 408 | lua_rawseti( L, fqn, _depth); // ... {bfc} k |
440 | } | 409 | } |
441 | -- _depth; | 410 | -- _depth; |
442 | STACK_END( L, -1); | 411 | STACK_CHECK(L, -1); |
443 | DEBUGSPEW_CODE( -- U->debugspew_indent_depth); | 412 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed)); |
444 | } | 413 | } |
445 | 414 | ||
446 | static void populate_func_lookup_table_recur( DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State* L, int _ctx_base, int _i, int _depth) | 415 | // ################################################################################################# |
416 | |||
417 | static void populate_func_lookup_table_recur(DEBUGSPEW_PARAM_COMMA(Universe* U) lua_State* L, int _ctx_base, int _i, int _depth) | ||
447 | { | 418 | { |
448 | lua_Integer visit_count; | 419 | lua_Integer visit_count; |
449 | // slot 2 contains a table that, when concatenated, produces the fully qualified name of scanned elements in the table provided at slot _i | 420 | // slot 2 contains a table that, when concatenated, produces the fully qualified name of scanned elements in the table provided at slot _i |
@@ -452,18 +423,18 @@ static void populate_func_lookup_table_recur( DEBUGSPEW_PARAM_COMMA( Universe* U | |||
452 | int const cache = _ctx_base + 2; | 423 | int const cache = _ctx_base + 2; |
453 | // we need to remember subtables to process them after functions encountered at the current depth (breadth-first search) | 424 | // we need to remember subtables to process them after functions encountered at the current depth (breadth-first search) |
454 | int const breadth_first_cache = lua_gettop( L) + 1; | 425 | int const breadth_first_cache = lua_gettop( L) + 1; |
455 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "populate_func_lookup_table_recur()\n" INDENT_END)); | 426 | DEBUGSPEW_CODE(fprintf( stderr, INDENT_BEGIN "populate_func_lookup_table_recur()\n" INDENT_END)); |
456 | DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); | 427 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed)); |
457 | 428 | ||
458 | STACK_GROW( L, 6); | 429 | STACK_GROW( L, 6); |
459 | // slot _i contains a table where we search for functions (or a full userdata with a metatable) | 430 | // slot _i contains a table where we search for functions (or a full userdata with a metatable) |
460 | STACK_CHECK( L, 0); // ... {_i} | 431 | STACK_CHECK_START_REL(L, 0); // ... {_i} |
461 | 432 | ||
462 | // if object is a userdata, replace it by its metatable | 433 | // if object is a userdata, replace it by its metatable |
463 | if( lua_type( L, _i) == LUA_TUSERDATA) | 434 | if( lua_type( L, _i) == LUA_TUSERDATA) |
464 | { | 435 | { |
465 | lua_getmetatable( L, _i); // ... {_i} mt | 436 | lua_getmetatable( L, _i); // ... {_i} mt |
466 | lua_replace( L, _i); // ... {_i} | 437 | lua_replace( L, _i); // ... {_i} |
467 | } | 438 | } |
468 | 439 | ||
469 | // if table is already visited, we are done | 440 | // if table is already visited, we are done |
@@ -471,11 +442,11 @@ static void populate_func_lookup_table_recur( DEBUGSPEW_PARAM_COMMA( Universe* U | |||
471 | lua_rawget( L, cache); // ... {_i} nil|n | 442 | lua_rawget( L, cache); // ... {_i} nil|n |
472 | visit_count = lua_tointeger( L, -1); // 0 if nil, else n | 443 | visit_count = lua_tointeger( L, -1); // 0 if nil, else n |
473 | lua_pop( L, 1); // ... {_i} | 444 | lua_pop( L, 1); // ... {_i} |
474 | STACK_MID( L, 0); | 445 | STACK_CHECK( L, 0); |
475 | if( visit_count > 0) | 446 | if( visit_count > 0) |
476 | { | 447 | { |
477 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "already visited\n" INDENT_END)); | 448 | DEBUGSPEW_CODE(fprintf( stderr, INDENT_BEGIN "already visited\n" INDENT_END)); |
478 | DEBUGSPEW_CODE( -- U->debugspew_indent_depth); | 449 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed)); |
479 | return; | 450 | return; |
480 | } | 451 | } |
481 | 452 | ||
@@ -483,7 +454,7 @@ static void populate_func_lookup_table_recur( DEBUGSPEW_PARAM_COMMA( Universe* U | |||
483 | lua_pushvalue( L, _i); // ... {_i} {} | 454 | lua_pushvalue( L, _i); // ... {_i} {} |
484 | lua_pushinteger( L, visit_count + 1); // ... {_i} {} 1 | 455 | lua_pushinteger( L, visit_count + 1); // ... {_i} {} 1 |
485 | lua_rawset( L, cache); // ... {_i} | 456 | lua_rawset( L, cache); // ... {_i} |
486 | STACK_MID( L, 0); | 457 | STACK_CHECK( L, 0); |
487 | 458 | ||
488 | // this table is at breadth_first_cache index | 459 | // this table is at breadth_first_cache index |
489 | lua_newtable( L); // ... {_i} {bfc} | 460 | lua_newtable( L); // ... {_i} {bfc} |
@@ -521,7 +492,7 @@ static void populate_func_lookup_table_recur( DEBUGSPEW_PARAM_COMMA( Universe* U | |||
521 | { | 492 | { |
522 | lua_pop( L, 1); // ... {_i} {bfc} k | 493 | lua_pop( L, 1); // ... {_i} {bfc} k |
523 | } | 494 | } |
524 | STACK_MID( L, 2); | 495 | STACK_CHECK( L, 2); |
525 | } | 496 | } |
526 | // now process the tables we encountered at that depth | 497 | // now process the tables we encountered at that depth |
527 | ++ _depth; | 498 | ++ _depth; |
@@ -530,7 +501,7 @@ static void populate_func_lookup_table_recur( DEBUGSPEW_PARAM_COMMA( Universe* U | |||
530 | { | 501 | { |
531 | DEBUGSPEW_CODE( char const* key = (lua_type( L, -2) == LUA_TSTRING) ? lua_tostring( L, -2) : "not a string"); | 502 | DEBUGSPEW_CODE( char const* key = (lua_type( L, -2) == LUA_TSTRING) ? lua_tostring( L, -2) : "not a string"); |
532 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "table '%s'\n" INDENT_END, key)); | 503 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "table '%s'\n" INDENT_END, key)); |
533 | DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); | 504 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed)); |
534 | // un-visit this table in case we do need to process it | 505 | // un-visit this table in case we do need to process it |
535 | lua_pushvalue( L, -1); // ... {_i} {bfc} k {} {} | 506 | lua_pushvalue( L, -1); // ... {_i} {bfc} k {} {} |
536 | lua_rawget( L, cache); // ... {_i} {bfc} k {} n | 507 | lua_rawget( L, cache); // ... {_i} {bfc} k {} n |
@@ -552,8 +523,8 @@ static void populate_func_lookup_table_recur( DEBUGSPEW_PARAM_COMMA( Universe* U | |||
552 | lua_rawseti( L, fqn, _depth); // ... {_i} {bfc} k {} | 523 | lua_rawseti( L, fqn, _depth); // ... {_i} {bfc} k {} |
553 | populate_func_lookup_table_recur( DEBUGSPEW_PARAM_COMMA( U) L, _ctx_base, lua_gettop( L), _depth); | 524 | populate_func_lookup_table_recur( DEBUGSPEW_PARAM_COMMA( U) L, _ctx_base, lua_gettop( L), _depth); |
554 | lua_pop( L, 1); // ... {_i} {bfc} k | 525 | lua_pop( L, 1); // ... {_i} {bfc} k |
555 | STACK_MID( L, 2); | 526 | STACK_CHECK( L, 2); |
556 | DEBUGSPEW_CODE( -- U->debugspew_indent_depth); | 527 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed)); |
557 | } | 528 | } |
558 | // remove table name from fqn stack | 529 | // remove table name from fqn stack |
559 | lua_pushnil( L); // ... {_i} {bfc} nil | 530 | lua_pushnil( L); // ... {_i} {bfc} nil |
@@ -561,30 +532,32 @@ static void populate_func_lookup_table_recur( DEBUGSPEW_PARAM_COMMA( Universe* U | |||
561 | -- _depth; | 532 | -- _depth; |
562 | // we are done with our cache | 533 | // we are done with our cache |
563 | lua_pop( L, 1); // ... {_i} | 534 | lua_pop( L, 1); // ... {_i} |
564 | STACK_END( L, 0); | 535 | STACK_CHECK( L, 0); |
565 | // we are done // ... {_i} {bfc} | 536 | // we are done // ... {_i} {bfc} |
566 | DEBUGSPEW_CODE( -- U->debugspew_indent_depth); | 537 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed)); |
567 | } | 538 | } |
568 | 539 | ||
540 | // ################################################################################################# | ||
541 | |||
569 | /* | 542 | /* |
570 | * create a "fully.qualified.name" <-> function equivalence database | 543 | * create a "fully.qualified.name" <-> function equivalence database |
571 | */ | 544 | */ |
572 | void populate_func_lookup_table( lua_State* L, int _i, char const* name_) | 545 | void populate_func_lookup_table(lua_State* L, int i_, char const* name_) |
573 | { | 546 | { |
574 | int const ctx_base = lua_gettop( L) + 1; | 547 | int const ctx_base = lua_gettop(L) + 1; |
575 | int const in_base = lua_absindex( L, _i); | 548 | int const in_base = lua_absindex(L, i_); |
576 | int start_depth = 0; | 549 | int start_depth = 0; |
577 | DEBUGSPEW_CODE( Universe* U = universe_get( L)); | 550 | DEBUGSPEW_CODE( Universe* U = universe_get( L)); |
578 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: populate_func_lookup_table('%s')\n" INDENT_END, L, name_ ? name_ : "NULL")); | 551 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: populate_func_lookup_table('%s')\n" INDENT_END, L, name_ ? name_ : "nullptr")); |
579 | DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); | 552 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed)); |
580 | STACK_GROW( L, 3); | 553 | STACK_GROW( L, 3); |
581 | STACK_CHECK( L, 0); | 554 | STACK_CHECK_START_REL(L, 0); |
582 | REGISTRY_GET( L, LOOKUP_REGKEY); // {} | 555 | LOOKUP_REGKEY.pushValue(L); // {} |
583 | STACK_MID( L, 1); | 556 | STACK_CHECK( L, 1); |
584 | ASSERT_L( lua_istable( L, -1)); | 557 | ASSERT_L( lua_istable( L, -1)); |
585 | if( lua_type( L, in_base) == LUA_TFUNCTION) // for example when a module is a simple function | 558 | if( lua_type( L, in_base) == LUA_TFUNCTION) // for example when a module is a simple function |
586 | { | 559 | { |
587 | name_ = name_ ? name_ : "NULL"; | 560 | name_ = name_ ? name_ : "nullptr"; |
588 | lua_pushvalue( L, in_base); // {} f | 561 | lua_pushvalue( L, in_base); // {} f |
589 | lua_pushstring( L, name_); // {} f _name | 562 | lua_pushstring( L, name_); // {} f _name |
590 | lua_rawset( L, -3); // {} | 563 | lua_rawset( L, -3); // {} |
@@ -595,10 +568,10 @@ void populate_func_lookup_table( lua_State* L, int _i, char const* name_) | |||
595 | } | 568 | } |
596 | else if( lua_type( L, in_base) == LUA_TTABLE) | 569 | else if( lua_type( L, in_base) == LUA_TTABLE) |
597 | { | 570 | { |
598 | lua_newtable( L); // {} {fqn} | 571 | lua_newtable(L); // {} {fqn} |
599 | if( name_) | 572 | if( name_) |
600 | { | 573 | { |
601 | STACK_MID( L, 2); | 574 | STACK_CHECK( L, 2); |
602 | lua_pushstring( L, name_); // {} {fqn} "name" | 575 | lua_pushstring( L, name_); // {} {fqn} "name" |
603 | // generate a name, and if we already had one name, keep whichever is the shorter | 576 | // generate a name, and if we already had one name, keep whichever is the shorter |
604 | lua_pushvalue( L, in_base); // {} {fqn} "name" t | 577 | lua_pushvalue( L, in_base); // {} {fqn} "name" t |
@@ -606,16 +579,16 @@ void populate_func_lookup_table( lua_State* L, int _i, char const* name_) | |||
606 | // don't forget to store the name at the bottom of the fqn stack | 579 | // don't forget to store the name at the bottom of the fqn stack |
607 | ++ start_depth; | 580 | ++ start_depth; |
608 | lua_rawseti( L, -2, start_depth); // {} {fqn} | 581 | lua_rawseti( L, -2, start_depth); // {} {fqn} |
609 | STACK_MID( L, 2); | 582 | STACK_CHECK( L, 2); |
610 | } | 583 | } |
611 | // retrieve the cache, create it if we haven't done it yet | 584 | // retrieve the cache, create it if we haven't done it yet |
612 | REGISTRY_GET( L, LOOKUPCACHE_REGKEY); // {} {fqn} {cache}? | 585 | LOOKUPCACHE_REGKEY.pushValue(L); // {} {fqn} {cache}? |
613 | if( lua_isnil( L, -1)) | 586 | if( lua_isnil( L, -1)) |
614 | { | 587 | { |
615 | lua_pop( L, 1); // {} {fqn} | 588 | lua_pop( L, 1); // {} {fqn} |
616 | lua_newtable( L); // {} {fqn} {cache} | 589 | lua_newtable( L); // {} {fqn} {cache} |
617 | REGISTRY_SET( L, LOOKUPCACHE_REGKEY, lua_pushvalue( L, -2)); | 590 | LOOKUPCACHE_REGKEY.setValue(L, [](lua_State* L) { lua_pushvalue(L, -2); }); |
618 | STACK_MID( L, 3); | 591 | STACK_CHECK( L, 3); |
619 | } | 592 | } |
620 | // process everything we find in that table, filling in lookup data for all functions and tables we see there | 593 | // process everything we find in that table, filling in lookup data for all functions and tables we see there |
621 | populate_func_lookup_table_recur( DEBUGSPEW_PARAM_COMMA( U) L, ctx_base, in_base, start_depth); | 594 | populate_func_lookup_table_recur( DEBUGSPEW_PARAM_COMMA( U) L, ctx_base, in_base, start_depth); |
@@ -626,19 +599,21 @@ void populate_func_lookup_table( lua_State* L, int _i, char const* name_) | |||
626 | lua_pop( L, 1); // | 599 | lua_pop( L, 1); // |
627 | (void) luaL_error( L, "unsupported module type %s", lua_typename( L, lua_type( L, in_base))); | 600 | (void) luaL_error( L, "unsupported module type %s", lua_typename( L, lua_type( L, in_base))); |
628 | } | 601 | } |
629 | STACK_END( L, 0); | 602 | STACK_CHECK( L, 0); |
630 | DEBUGSPEW_CODE( -- U->debugspew_indent_depth); | 603 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed)); |
631 | } | 604 | } |
632 | 605 | ||
606 | // ################################################################################################# | ||
607 | |||
633 | /*---=== Inter-state copying ===---*/ | 608 | /*---=== Inter-state copying ===---*/ |
634 | 609 | ||
635 | // crc64/we of string "REG_MTID" generated at http://www.nitrxgen.net/hashgen/ | 610 | // crc64/we of string "REG_MTID" generated at http://www.nitrxgen.net/hashgen/ |
636 | static DECLARE_CONST_UNIQUE_KEY( REG_MTID, 0x2e68f9b4751584dc); | 611 | static constexpr UniqueKey REG_MTID{ 0x2e68f9b4751584dcull }; |
637 | 612 | ||
638 | /* | 613 | /* |
639 | * Get a unique ID for metatable at [i]. | 614 | * Get a unique ID for metatable at [i]. |
640 | */ | 615 | */ |
641 | static lua_Integer get_mt_id( Universe* U, lua_State* L, int i) | 616 | [[nodiscard]] static lua_Integer get_mt_id(Universe* U, lua_State* L, int i) |
642 | { | 617 | { |
643 | lua_Integer id; | 618 | lua_Integer id; |
644 | 619 | ||
@@ -646,20 +621,18 @@ static lua_Integer get_mt_id( Universe* U, lua_State* L, int i) | |||
646 | 621 | ||
647 | STACK_GROW( L, 3); | 622 | STACK_GROW( L, 3); |
648 | 623 | ||
649 | STACK_CHECK( L, 0); | 624 | STACK_CHECK_START_REL(L, 0); |
650 | push_registry_subtable( L, REG_MTID); // ... _R[REG_MTID] | 625 | push_registry_subtable( L, REG_MTID); // ... _R[REG_MTID] |
651 | lua_pushvalue( L, i); // ... _R[REG_MTID] {mt} | 626 | lua_pushvalue( L, i); // ... _R[REG_MTID] {mt} |
652 | lua_rawget( L, -2); // ... _R[REG_MTID] mtk? | 627 | lua_rawget( L, -2); // ... _R[REG_MTID] mtk? |
653 | 628 | ||
654 | id = lua_tointeger( L, -1); // 0 for nil | 629 | id = lua_tointeger( L, -1); // 0 for nil |
655 | lua_pop( L, 1); // ... _R[REG_MTID] | 630 | lua_pop( L, 1); // ... _R[REG_MTID] |
656 | STACK_MID( L, 1); | 631 | STACK_CHECK( L, 1); |
657 | 632 | ||
658 | if( id == 0) | 633 | if( id == 0) |
659 | { | 634 | { |
660 | MUTEX_LOCK( &U->mtid_lock); | 635 | id = U->next_mt_id.fetch_add(1, std::memory_order_relaxed); |
661 | id = ++ U->last_mt_id; | ||
662 | MUTEX_UNLOCK( &U->mtid_lock); | ||
663 | 636 | ||
664 | /* Create two-way references: id_uint <-> table | 637 | /* Create two-way references: id_uint <-> table |
665 | */ | 638 | */ |
@@ -673,41 +646,48 @@ static lua_Integer get_mt_id( Universe* U, lua_State* L, int i) | |||
673 | } | 646 | } |
674 | lua_pop( L, 1); // ... | 647 | lua_pop( L, 1); // ... |
675 | 648 | ||
676 | STACK_END( L, 0); | 649 | STACK_CHECK( L, 0); |
677 | 650 | ||
678 | return id; | 651 | return id; |
679 | } | 652 | } |
680 | 653 | ||
654 | // ################################################################################################# | ||
655 | |||
681 | // function sentinel used to transfer native functions from/to keeper states | 656 | // function sentinel used to transfer native functions from/to keeper states |
682 | static int func_lookup_sentinel( lua_State* L) | 657 | [[nodiscard]] static int func_lookup_sentinel(lua_State* L) |
683 | { | 658 | { |
684 | return luaL_error( L, "function lookup sentinel for %s, should never be called", lua_tostring( L, lua_upvalueindex( 1))); | 659 | return luaL_error(L, "function lookup sentinel for %s, should never be called", lua_tostring(L, lua_upvalueindex(1))); |
685 | } | 660 | } |
686 | 661 | ||
662 | // ################################################################################################# | ||
687 | 663 | ||
688 | // function sentinel used to transfer native table from/to keeper states | 664 | // function sentinel used to transfer native table from/to keeper states |
689 | static int table_lookup_sentinel( lua_State* L) | 665 | [[nodiscard]] static int table_lookup_sentinel(lua_State* L) |
690 | { | 666 | { |
691 | return luaL_error( L, "table lookup sentinel for %s, should never be called", lua_tostring( L, lua_upvalueindex( 1))); | 667 | return luaL_error(L, "table lookup sentinel for %s, should never be called", lua_tostring(L, lua_upvalueindex(1))); |
692 | } | 668 | } |
693 | 669 | ||
670 | // ################################################################################################# | ||
671 | |||
694 | // function sentinel used to transfer cloned full userdata from/to keeper states | 672 | // function sentinel used to transfer cloned full userdata from/to keeper states |
695 | static int userdata_clone_sentinel( lua_State* L) | 673 | [[nodiscard]] static int userdata_clone_sentinel(lua_State* L) |
696 | { | 674 | { |
697 | return luaL_error( L, "userdata clone sentinel for %s, should never be called", lua_tostring( L, lua_upvalueindex( 1))); | 675 | return luaL_error(L, "userdata clone sentinel for %s, should never be called", lua_tostring(L, lua_upvalueindex(1))); |
698 | } | 676 | } |
699 | 677 | ||
678 | // ################################################################################################# | ||
679 | |||
700 | /* | 680 | /* |
701 | * retrieve the name of a function/table in the lookup database | 681 | * retrieve the name of a function/table in the lookup database |
702 | */ | 682 | */ |
703 | static char const* find_lookup_name( lua_State* L, uint_t i, LookupMode mode_, char const* upName_, size_t* len_) | 683 | [[nodiscard]] static char const* find_lookup_name(lua_State* L, int i, LookupMode mode_, char const* upName_, size_t* len_) |
704 | { | 684 | { |
705 | DEBUGSPEW_CODE( Universe* const U = universe_get( L)); | 685 | DEBUGSPEW_CODE( Universe* const U = universe_get( L)); |
706 | char const* fqn; | 686 | char const* fqn; |
707 | ASSERT_L( lua_isfunction( L, i) || lua_istable( L, i)); // ... v ... | 687 | ASSERT_L( lua_isfunction( L, i) || lua_istable( L, i)); // ... v ... |
708 | STACK_CHECK( L, 0); | 688 | STACK_CHECK_START_REL(L, 0); |
709 | STACK_GROW( L, 3); // up to 3 slots are necessary on error | 689 | STACK_GROW( L, 3); // up to 3 slots are necessary on error |
710 | if( mode_ == eLM_FromKeeper) | 690 | if (mode_ == LookupMode::FromKeeper) |
711 | { | 691 | { |
712 | lua_CFunction f = lua_tocfunction( L, i); // should *always* be func_lookup_sentinel or table_lookup_sentinel! | 692 | lua_CFunction f = lua_tocfunction( L, i); // should *always* be func_lookup_sentinel or table_lookup_sentinel! |
713 | if( f == func_lookup_sentinel || f == table_lookup_sentinel || f == userdata_clone_sentinel) | 693 | if( f == func_lookup_sentinel || f == table_lookup_sentinel || f == userdata_clone_sentinel) |
@@ -717,16 +697,16 @@ static char const* find_lookup_name( lua_State* L, uint_t i, LookupMode mode_, c | |||
717 | else | 697 | else |
718 | { | 698 | { |
719 | // if this is not a sentinel, this is some user-created table we wanted to lookup | 699 | // if this is not a sentinel, this is some user-created table we wanted to lookup |
720 | ASSERT_L( NULL == f && lua_istable( L, i)); | 700 | ASSERT_L(nullptr == f && lua_istable(L, i)); |
721 | // push anything that will convert to NULL string | 701 | // push anything that will convert to nullptr string |
722 | lua_pushnil( L); // ... v ... nil | 702 | lua_pushnil( L); // ... v ... nil |
723 | } | 703 | } |
724 | } | 704 | } |
725 | else | 705 | else |
726 | { | 706 | { |
727 | // fetch the name from the source state's lookup table | 707 | // fetch the name from the source state's lookup table |
728 | REGISTRY_GET( L, LOOKUP_REGKEY); // ... v ... {} | 708 | LOOKUP_REGKEY.pushValue(L); // ... v ... {} |
729 | STACK_MID( L, 1); | 709 | STACK_CHECK( L, 1); |
730 | ASSERT_L( lua_istable( L, -1)); | 710 | ASSERT_L( lua_istable( L, -1)); |
731 | lua_pushvalue( L, i); // ... v ... {} v | 711 | lua_pushvalue( L, i); // ... v ... {} v |
732 | lua_rawget( L, -2); // ... v ... {} "f.q.n" | 712 | lua_rawget( L, -2); // ... v ... {} "f.q.n" |
@@ -734,9 +714,9 @@ static char const* find_lookup_name( lua_State* L, uint_t i, LookupMode mode_, c | |||
734 | fqn = lua_tolstring( L, -1, len_); | 714 | fqn = lua_tolstring( L, -1, len_); |
735 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "function [C] %s \n" INDENT_END, fqn)); | 715 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "function [C] %s \n" INDENT_END, fqn)); |
736 | // popping doesn't invalidate the pointer since this is an interned string gotten from the lookup database | 716 | // popping doesn't invalidate the pointer since this is an interned string gotten from the lookup database |
737 | lua_pop( L, (mode_ == eLM_FromKeeper) ? 1 : 2); // ... v ... | 717 | lua_pop( L, (mode_ == LookupMode::FromKeeper) ? 1 : 2); // ... v ... |
738 | STACK_MID( L, 0); | 718 | STACK_CHECK( L, 0); |
739 | if( NULL == fqn && !lua_istable( L, i)) // raise an error if we try to send an unknown function (but not for tables) | 719 | if (nullptr == fqn && !lua_istable(L, i)) // raise an error if we try to send an unknown function (but not for tables) |
740 | { | 720 | { |
741 | char const *from, *typewhat, *what, *gotchaA, *gotchaB; | 721 | char const *from, *typewhat, *what, *gotchaA, *gotchaB; |
742 | // try to discover the name of the function we want to send | 722 | // try to discover the name of the function we want to send |
@@ -762,81 +742,83 @@ static char const* find_lookup_name( lua_State* L, uint_t i, LookupMode mode_, c | |||
762 | } | 742 | } |
763 | (void) luaL_error( L, "%s%s '%s' not found in %s origin transfer database.%s", typewhat, gotchaA, what, from ? from : "main", gotchaB); | 743 | (void) luaL_error( L, "%s%s '%s' not found in %s origin transfer database.%s", typewhat, gotchaA, what, from ? from : "main", gotchaB); |
764 | *len_ = 0; | 744 | *len_ = 0; |
765 | return NULL; | 745 | return nullptr; |
766 | } | 746 | } |
767 | STACK_END( L, 0); | 747 | STACK_CHECK( L, 0); |
768 | return fqn; | 748 | return fqn; |
769 | } | 749 | } |
770 | 750 | ||
751 | // ################################################################################################# | ||
771 | 752 | ||
772 | /* | 753 | /* |
773 | * Push a looked-up table, or nothing if we found nothing | 754 | * Push a looked-up table, or nothing if we found nothing |
774 | */ | 755 | */ |
775 | static bool_t lookup_table( lua_State* L2, lua_State* L, uint_t i, LookupMode mode_, char const* upName_) | 756 | [[nodiscard]] static bool lookup_table(Dest L2, Source L, int i, LookupMode mode_, char const* upName_) |
776 | { | 757 | { |
777 | // get the name of the table we want to send | 758 | // get the name of the table we want to send |
778 | size_t len; | 759 | size_t len; |
779 | char const* fqn = find_lookup_name( L, i, mode_, upName_, &len); | 760 | char const* fqn = find_lookup_name( L, i, mode_, upName_, &len); |
780 | if( NULL == fqn) // name not found, it is some user-created table | 761 | if (nullptr == fqn) // name not found, it is some user-created table |
781 | { | 762 | { |
782 | return FALSE; | 763 | return false; |
783 | } | 764 | } |
784 | // push the equivalent table in the destination's stack, retrieved from the lookup table | 765 | // push the equivalent table in the destination's stack, retrieved from the lookup table |
785 | STACK_CHECK( L2, 0); // L // L2 | 766 | STACK_CHECK_START_REL(L2, 0); // L // L2 |
786 | STACK_GROW( L2, 3); // up to 3 slots are necessary on error | 767 | STACK_GROW( L2, 3); // up to 3 slots are necessary on error |
787 | switch( mode_) | 768 | switch( mode_) |
788 | { | 769 | { |
789 | default: // shouldn't happen, in theory... | 770 | default: // shouldn't happen, in theory... |
790 | (void) luaL_error( L, "internal error: unknown lookup mode"); | 771 | (void) luaL_error( L, "internal error: unknown lookup mode"); |
791 | return FALSE; | 772 | return false; |
792 | 773 | ||
793 | case eLM_ToKeeper: | 774 | case LookupMode::ToKeeper: |
794 | // push a sentinel closure that holds the lookup name as upvalue | 775 | // push a sentinel closure that holds the lookup name as upvalue |
795 | lua_pushlstring( L2, fqn, len); // "f.q.n" | 776 | lua_pushlstring(L2, fqn, len); // "f.q.n" |
796 | lua_pushcclosure( L2, table_lookup_sentinel, 1); // f | 777 | lua_pushcclosure(L2, table_lookup_sentinel, 1); // f |
797 | break; | 778 | break; |
798 | 779 | ||
799 | case eLM_LaneBody: | 780 | case LookupMode::LaneBody: |
800 | case eLM_FromKeeper: | 781 | case LookupMode::FromKeeper: |
801 | REGISTRY_GET( L2, LOOKUP_REGKEY); // {} | 782 | LOOKUP_REGKEY.pushValue(L2); // {} |
802 | STACK_MID( L2, 1); | 783 | STACK_CHECK(L2, 1); |
803 | ASSERT_L( lua_istable( L2, -1)); | 784 | ASSERT_L(lua_istable(L2, -1)); |
804 | lua_pushlstring( L2, fqn, len); // {} "f.q.n" | 785 | lua_pushlstring(L2, fqn, len); // {} "f.q.n" |
805 | lua_rawget( L2, -2); // {} t | 786 | lua_rawget(L2, -2); // {} t |
806 | // we accept destination lookup failures in the case of transfering the Lanes body function (this will result in the source table being cloned instead) | 787 | // we accept destination lookup failures in the case of transfering the Lanes body function (this will result in the source table being cloned instead) |
807 | // but not when we extract something out of a keeper, as there is nothing to clone! | 788 | // but not when we extract something out of a keeper, as there is nothing to clone! |
808 | if( lua_isnil( L2, -1) && mode_ == eLM_LaneBody) | 789 | if (lua_isnil(L2, -1) && mode_ == LookupMode::LaneBody) |
809 | { | 790 | { |
810 | lua_pop( L2, 2); // | 791 | lua_pop(L2, 2); // |
811 | STACK_MID( L2, 0); | 792 | STACK_CHECK(L2, 0); |
812 | return FALSE; | 793 | return false; |
813 | } | 794 | } |
814 | else if( !lua_istable( L2, -1)) | 795 | else if( !lua_istable(L2, -1)) |
815 | { | 796 | { |
816 | char const* from, *to; | 797 | char const* from, *to; |
817 | lua_getglobal( L, "decoda_name"); // ... t ... decoda_name | 798 | lua_getglobal(L, "decoda_name"); // ... t ... decoda_name |
818 | from = lua_tostring( L, -1); | 799 | from = lua_tostring(L, -1); |
819 | lua_pop( L, 1); // ... t ... | 800 | lua_pop(L, 1); // ... t ... |
820 | lua_getglobal( L2, "decoda_name"); // {} t decoda_name | 801 | lua_getglobal(L2, "decoda_name"); // {} t decoda_name |
821 | to = lua_tostring( L2, -1); | 802 | to = lua_tostring( L2, -1); |
822 | lua_pop( L2, 1); // {} t | 803 | lua_pop(L2, 1); // {} t |
823 | // when mode_ == eLM_FromKeeper, L is a keeper state and L2 is not, therefore L2 is the state where we want to raise the error | 804 | // when mode_ == LookupMode::FromKeeper, L is a keeper state and L2 is not, therefore L2 is the state where we want to raise the error |
824 | (void) luaL_error( | 805 | (void) luaL_error( |
825 | (mode_ == eLM_FromKeeper) ? L2 : L | 806 | (mode_ == LookupMode::FromKeeper) ? L2 : L |
826 | , "INTERNAL ERROR IN %s: table '%s' not found in %s destination transfer database." | 807 | , "INTERNAL ERROR IN %s: table '%s' not found in %s destination transfer database." |
827 | , from ? from : "main" | 808 | , from ? from : "main" |
828 | , fqn | 809 | , fqn |
829 | , to ? to : "main" | 810 | , to ? to : "main" |
830 | ); | 811 | ); |
831 | return FALSE; | 812 | return false; |
832 | } | 813 | } |
833 | lua_remove( L2, -2); // t | 814 | lua_remove(L2, -2); // t |
834 | break; | 815 | break; |
835 | } | 816 | } |
836 | STACK_END( L2, 1); | 817 | STACK_CHECK( L2, 1); |
837 | return TRUE; | 818 | return true; |
838 | } | 819 | } |
839 | 820 | ||
821 | // ################################################################################################# | ||
840 | 822 | ||
841 | /* | 823 | /* |
842 | * Check if we've already copied the same table from 'L', and | 824 | * Check if we've already copied the same table from 'L', and |
@@ -845,121 +827,121 @@ static bool_t lookup_table( lua_State* L2, lua_State* L, uint_t i, LookupMode mo | |||
845 | * | 827 | * |
846 | * Always pushes a table to 'L2'. | 828 | * Always pushes a table to 'L2'. |
847 | * | 829 | * |
848 | * Returns TRUE if the table was cached (no need to fill it!); FALSE if | 830 | * Returns true if the table was cached (no need to fill it!); false if |
849 | * it's a virgin. | 831 | * it's a virgin. |
850 | */ | 832 | */ |
851 | static bool_t push_cached_table( lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i) | 833 | [[nodiscard]] static bool push_cached_table(Dest L2, int L2_cache_i, Source L, int i) |
852 | { | 834 | { |
853 | bool_t not_found_in_cache; // L2 | 835 | void const* p{ lua_topointer(L, i) }; |
854 | void const* p = lua_topointer( L, i); | ||
855 | 836 | ||
856 | ASSERT_L( L2_cache_i != 0); | 837 | ASSERT_L( L2_cache_i != 0); |
857 | STACK_GROW( L2, 3); | 838 | STACK_GROW( L2, 3); // L2 |
858 | STACK_CHECK( L2, 0); | 839 | STACK_CHECK_START_REL(L2, 0); |
859 | 840 | ||
860 | // We don't need to use the from state ('L') in ID since the life span | 841 | // We don't need to use the from state ('L') in ID since the life span |
861 | // is only for the duration of a copy (both states are locked). | 842 | // is only for the duration of a copy (both states are locked). |
862 | // push a light userdata uniquely representing the table | 843 | // push a light userdata uniquely representing the table |
863 | lua_pushlightuserdata( L2, (void*) p); // ... p | 844 | lua_pushlightuserdata(L2, const_cast<void*>(p)); // ... p |
864 | 845 | ||
865 | //fprintf( stderr, "<< ID: %s >>\n", lua_tostring( L2, -1)); | 846 | //fprintf( stderr, "<< ID: %s >>\n", lua_tostring( L2, -1)); |
866 | 847 | ||
867 | lua_rawget( L2, L2_cache_i); // ... {cached|nil} | 848 | lua_rawget( L2, L2_cache_i); // ... {cached|nil} |
868 | not_found_in_cache = lua_isnil( L2, -1); | 849 | bool const not_found_in_cache{ lua_isnil(L2, -1) }; |
869 | if( not_found_in_cache) | 850 | if( not_found_in_cache) |
870 | { | 851 | { |
871 | lua_pop( L2, 1); // ... | 852 | lua_pop( L2, 1); // ... |
872 | lua_newtable( L2); // ... {} | 853 | lua_newtable( L2); // ... {} |
873 | lua_pushlightuserdata( L2, (void*) p); // ... {} p | 854 | lua_pushlightuserdata(L2, const_cast<void*>(p)); // ... {} p |
874 | lua_pushvalue( L2, -2); // ... {} p {} | 855 | lua_pushvalue( L2, -2); // ... {} p {} |
875 | lua_rawset( L2, L2_cache_i); // ... {} | 856 | lua_rawset( L2, L2_cache_i); // ... {} |
876 | } | 857 | } |
877 | STACK_END( L2, 1); | 858 | STACK_CHECK( L2, 1); |
878 | ASSERT_L( lua_istable( L2, -1)); | 859 | ASSERT_L( lua_istable( L2, -1)); |
879 | return !not_found_in_cache; | 860 | return !not_found_in_cache; |
880 | } | 861 | } |
881 | 862 | ||
863 | // ################################################################################################# | ||
882 | 864 | ||
883 | /* | 865 | /* |
884 | * Return some name helping to identify an object | 866 | * Return some name helping to identify an object |
885 | */ | 867 | */ |
886 | static int discover_object_name_recur( lua_State* L, int shortest_, int depth_) | 868 | [[nodiscard]] static int discover_object_name_recur(lua_State* L, int shortest_, int depth_) |
887 | { | 869 | { |
888 | int const what = 1; // o "r" {c} {fqn} ... {?} | 870 | int const what = 1; // o "r" {c} {fqn} ... {?} |
889 | int const result = 2; | 871 | int const result = 2; |
890 | int const cache = 3; | 872 | int const cache = 3; |
891 | int const fqn = 4; | 873 | int const fqn = 4; |
892 | // no need to scan this table if the name we will discover is longer than one we already know | 874 | // no need to scan this table if the name we will discover is longer than one we already know |
893 | if( shortest_ <= depth_ + 1) | 875 | if (shortest_ <= depth_ + 1) |
894 | { | 876 | { |
895 | return shortest_; | 877 | return shortest_; |
896 | } | 878 | } |
897 | STACK_GROW( L, 3); | 879 | STACK_GROW(L, 3); |
898 | STACK_CHECK( L, 0); | 880 | STACK_CHECK_START_REL(L, 0); |
899 | // stack top contains the table to search in | 881 | // stack top contains the table to search in |
900 | lua_pushvalue( L, -1); // o "r" {c} {fqn} ... {?} {?} | 882 | lua_pushvalue(L, -1); // o "r" {c} {fqn} ... {?} {?} |
901 | lua_rawget( L, cache); // o "r" {c} {fqn} ... {?} nil/1 | 883 | lua_rawget(L, cache); // o "r" {c} {fqn} ... {?} nil/1 |
902 | // if table is already visited, we are done | 884 | // if table is already visited, we are done |
903 | if( !lua_isnil( L, -1)) | 885 | if( !lua_isnil(L, -1)) |
904 | { | 886 | { |
905 | lua_pop( L, 1); // o "r" {c} {fqn} ... {?} | 887 | lua_pop(L, 1); // o "r" {c} {fqn} ... {?} |
906 | return shortest_; | 888 | return shortest_; |
907 | } | 889 | } |
908 | // examined table is not in the cache, add it now | 890 | // examined table is not in the cache, add it now |
909 | lua_pop( L, 1); // o "r" {c} {fqn} ... {?} | 891 | lua_pop(L, 1); // o "r" {c} {fqn} ... {?} |
910 | lua_pushvalue( L, -1); // o "r" {c} {fqn} ... {?} {?} | 892 | lua_pushvalue(L, -1); // o "r" {c} {fqn} ... {?} {?} |
911 | lua_pushinteger( L, 1); // o "r" {c} {fqn} ... {?} {?} 1 | 893 | lua_pushinteger(L, 1); // o "r" {c} {fqn} ... {?} {?} 1 |
912 | lua_rawset( L, cache); // o "r" {c} {fqn} ... {?} | 894 | lua_rawset(L, cache); // o "r" {c} {fqn} ... {?} |
913 | // scan table contents | 895 | // scan table contents |
914 | lua_pushnil( L); // o "r" {c} {fqn} ... {?} nil | 896 | lua_pushnil(L); // o "r" {c} {fqn} ... {?} nil |
915 | while( lua_next( L, -2)) // o "r" {c} {fqn} ... {?} k v | 897 | while (lua_next(L, -2)) // o "r" {c} {fqn} ... {?} k v |
916 | { | 898 | { |
917 | //char const *const strKey = (lua_type( L, -2) == LUA_TSTRING) ? lua_tostring( L, -2) : NULL; // only for debugging | 899 | //char const *const strKey = (lua_type(L, -2) == LUA_TSTRING) ? lua_tostring(L, -2) : nullptr; // only for debugging |
918 | //lua_Number const numKey = (lua_type( L, -2) == LUA_TNUMBER) ? lua_tonumber( L, -2) : -6666; // only for debugging | 900 | //lua_Number const numKey = (lua_type(L, -2) == LUA_TNUMBER) ? lua_tonumber(L, -2) : -6666; // only for debugging |
919 | STACK_MID( L, 2); | 901 | STACK_CHECK(L, 2); |
920 | // append key name to fqn stack | 902 | // append key name to fqn stack |
921 | ++ depth_; | 903 | ++ depth_; |
922 | lua_pushvalue( L, -2); // o "r" {c} {fqn} ... {?} k v k | 904 | lua_pushvalue(L, -2); // o "r" {c} {fqn} ... {?} k v k |
923 | lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k v | 905 | lua_rawseti(L, fqn, depth_); // o "r" {c} {fqn} ... {?} k v |
924 | if( lua_rawequal( L, -1, what)) // is it what we are looking for? | 906 | if (lua_rawequal(L, -1, what)) // is it what we are looking for? |
925 | { | 907 | { |
926 | STACK_MID( L, 2); | 908 | STACK_CHECK(L, 2); |
927 | // update shortest name | 909 | // update shortest name |
928 | if( depth_ < shortest_) | 910 | if( depth_ < shortest_) |
929 | { | 911 | { |
930 | shortest_ = depth_; | 912 | shortest_ = depth_; |
931 | luaG_pushFQN( L, fqn, depth_, NULL); // o "r" {c} {fqn} ... {?} k v "fqn" | 913 | std::ignore = luaG_pushFQN(L, fqn, depth_, nullptr); // o "r" {c} {fqn} ... {?} k v "fqn" |
932 | lua_replace( L, result); // o "r" {c} {fqn} ... {?} k v | 914 | lua_replace(L, result); // o "r" {c} {fqn} ... {?} k v |
933 | } | 915 | } |
934 | // no need to search further at this level | 916 | // no need to search further at this level |
935 | lua_pop( L, 2); // o "r" {c} {fqn} ... {?} | 917 | lua_pop(L, 2); // o "r" {c} {fqn} ... {?} |
936 | STACK_MID( L, 0); | 918 | STACK_CHECK(L, 0); |
937 | break; | 919 | break; |
938 | } | 920 | } |
939 | switch( lua_type( L, -1)) // o "r" {c} {fqn} ... {?} k v | 921 | switch (lua_type(L, -1)) // o "r" {c} {fqn} ... {?} k v |
940 | { | 922 | { |
941 | default: // nil, boolean, light userdata, number and string aren't identifiable | 923 | default: // nil, boolean, light userdata, number and string aren't identifiable |
942 | break; | 924 | break; |
943 | 925 | ||
944 | case LUA_TTABLE: // o "r" {c} {fqn} ... {?} k {} | 926 | case LUA_TTABLE: // o "r" {c} {fqn} ... {?} k {} |
945 | STACK_MID( L, 2); | 927 | STACK_CHECK(L, 2); |
946 | shortest_ = discover_object_name_recur( L, shortest_, depth_); | 928 | shortest_ = discover_object_name_recur(L, shortest_, depth_); |
947 | // search in the table's metatable too | 929 | // search in the table's metatable too |
948 | if( lua_getmetatable( L, -1)) // o "r" {c} {fqn} ... {?} k {} {mt} | 930 | if (lua_getmetatable(L, -1)) // o "r" {c} {fqn} ... {?} k {} {mt} |
949 | { | 931 | { |
950 | if( lua_istable( L, -1)) | 932 | if( lua_istable(L, -1)) |
951 | { | 933 | { |
952 | ++ depth_; | 934 | ++ depth_; |
953 | lua_pushliteral( L, "__metatable"); // o "r" {c} {fqn} ... {?} k {} {mt} "__metatable" | 935 | lua_pushliteral(L, "__metatable"); // o "r" {c} {fqn} ... {?} k {} {mt} "__metatable" |
954 | lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k {} {mt} | 936 | lua_rawseti(L, fqn, depth_); // o "r" {c} {fqn} ... {?} k {} {mt} |
955 | shortest_ = discover_object_name_recur( L, shortest_, depth_); | 937 | shortest_ = discover_object_name_recur(L, shortest_, depth_); |
956 | lua_pushnil( L); // o "r" {c} {fqn} ... {?} k {} {mt} nil | 938 | lua_pushnil(L); // o "r" {c} {fqn} ... {?} k {} {mt} nil |
957 | lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k {} {mt} | 939 | lua_rawseti(L, fqn, depth_); // o "r" {c} {fqn} ... {?} k {} {mt} |
958 | -- depth_; | 940 | -- depth_; |
959 | } | 941 | } |
960 | lua_pop( L, 1); // o "r" {c} {fqn} ... {?} k {} | 942 | lua_pop(L, 1); // o "r" {c} {fqn} ... {?} k {} |
961 | } | 943 | } |
962 | STACK_MID( L, 2); | 944 | STACK_CHECK(L, 2); |
963 | break; | 945 | break; |
964 | 946 | ||
965 | case LUA_TTHREAD: // o "r" {c} {fqn} ... {?} k T | 947 | case LUA_TTHREAD: // o "r" {c} {fqn} ... {?} k T |
@@ -967,64 +949,65 @@ static int discover_object_name_recur( lua_State* L, int shortest_, int depth_) | |||
967 | break; | 949 | break; |
968 | 950 | ||
969 | case LUA_TUSERDATA: // o "r" {c} {fqn} ... {?} k U | 951 | case LUA_TUSERDATA: // o "r" {c} {fqn} ... {?} k U |
970 | STACK_MID( L, 2); | 952 | STACK_CHECK(L, 2); |
971 | // search in the object's metatable (some modules are built that way) | 953 | // search in the object's metatable (some modules are built that way) |
972 | if( lua_getmetatable( L, -1)) // o "r" {c} {fqn} ... {?} k U {mt} | 954 | if (lua_getmetatable(L, -1)) // o "r" {c} {fqn} ... {?} k U {mt} |
973 | { | 955 | { |
974 | if( lua_istable( L, -1)) | 956 | if (lua_istable(L, -1)) |
975 | { | 957 | { |
976 | ++ depth_; | 958 | ++ depth_; |
977 | lua_pushliteral( L, "__metatable"); // o "r" {c} {fqn} ... {?} k U {mt} "__metatable" | 959 | lua_pushliteral(L, "__metatable"); // o "r" {c} {fqn} ... {?} k U {mt} "__metatable" |
978 | lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k U {mt} | 960 | lua_rawseti(L, fqn, depth_); // o "r" {c} {fqn} ... {?} k U {mt} |
979 | shortest_ = discover_object_name_recur( L, shortest_, depth_); | 961 | shortest_ = discover_object_name_recur(L, shortest_, depth_); |
980 | lua_pushnil( L); // o "r" {c} {fqn} ... {?} k U {mt} nil | 962 | lua_pushnil(L); // o "r" {c} {fqn} ... {?} k U {mt} nil |
981 | lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k U {mt} | 963 | lua_rawseti(L, fqn, depth_); // o "r" {c} {fqn} ... {?} k U {mt} |
982 | -- depth_; | 964 | -- depth_; |
983 | } | 965 | } |
984 | lua_pop( L, 1); // o "r" {c} {fqn} ... {?} k U | 966 | lua_pop(L, 1); // o "r" {c} {fqn} ... {?} k U |
985 | } | 967 | } |
986 | STACK_MID( L, 2); | 968 | STACK_CHECK(L, 2); |
987 | // search in the object's uservalues | 969 | // search in the object's uservalues |
988 | { | 970 | { |
989 | int uvi = 1; | 971 | int uvi = 1; |
990 | while( lua_getiuservalue( L, -1, uvi) != LUA_TNONE) // o "r" {c} {fqn} ... {?} k U {u} | 972 | while (lua_getiuservalue(L, -1, uvi) != LUA_TNONE) // o "r" {c} {fqn} ... {?} k U {u} |
991 | { | 973 | { |
992 | if( lua_istable( L, -1)) // if it is a table, look inside | 974 | if( lua_istable(L, -1)) // if it is a table, look inside |
993 | { | 975 | { |
994 | ++ depth_; | 976 | ++ depth_; |
995 | lua_pushliteral( L, "uservalue"); // o "r" {c} {fqn} ... {?} k v {u} "uservalue" | 977 | lua_pushliteral(L, "uservalue"); // o "r" {c} {fqn} ... {?} k v {u} "uservalue" |
996 | lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k v {u} | 978 | lua_rawseti(L, fqn, depth_); // o "r" {c} {fqn} ... {?} k v {u} |
997 | shortest_ = discover_object_name_recur( L, shortest_, depth_); | 979 | shortest_ = discover_object_name_recur(L, shortest_, depth_); |
998 | lua_pushnil( L); // o "r" {c} {fqn} ... {?} k v {u} nil | 980 | lua_pushnil(L); // o "r" {c} {fqn} ... {?} k v {u} nil |
999 | lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k v {u} | 981 | lua_rawseti(L, fqn, depth_); // o "r" {c} {fqn} ... {?} k v {u} |
1000 | -- depth_; | 982 | -- depth_; |
1001 | } | 983 | } |
1002 | lua_pop( L, 1); // o "r" {c} {fqn} ... {?} k U | 984 | lua_pop(L, 1); // o "r" {c} {fqn} ... {?} k U |
1003 | ++ uvi; | 985 | ++ uvi; |
1004 | } | 986 | } |
1005 | // when lua_getiuservalue() returned LUA_TNONE, it pushed a nil. pop it now | 987 | // when lua_getiuservalue() returned LUA_TNONE, it pushed a nil. pop it now |
1006 | lua_pop( L, 1); // o "r" {c} {fqn} ... {?} k U | 988 | lua_pop(L, 1); // o "r" {c} {fqn} ... {?} k U |
1007 | } | 989 | } |
1008 | STACK_MID( L, 2); | 990 | STACK_CHECK(L, 2); |
1009 | break; | 991 | break; |
1010 | } | 992 | } |
1011 | // make ready for next iteration | 993 | // make ready for next iteration |
1012 | lua_pop( L, 1); // o "r" {c} {fqn} ... {?} k | 994 | lua_pop(L, 1); // o "r" {c} {fqn} ... {?} k |
1013 | // remove name from fqn stack | 995 | // remove name from fqn stack |
1014 | lua_pushnil( L); // o "r" {c} {fqn} ... {?} k nil | 996 | lua_pushnil(L); // o "r" {c} {fqn} ... {?} k nil |
1015 | lua_rawseti( L, fqn, depth_); // o "r" {c} {fqn} ... {?} k | 997 | lua_rawseti(L, fqn, depth_); // o "r" {c} {fqn} ... {?} k |
1016 | STACK_MID( L, 1); | 998 | STACK_CHECK(L, 1); |
1017 | -- depth_; | 999 | -- depth_; |
1018 | } // o "r" {c} {fqn} ... {?} | 1000 | } // o "r" {c} {fqn} ... {?} |
1019 | STACK_MID( L, 0); | 1001 | STACK_CHECK(L, 0); |
1020 | // remove the visited table from the cache, in case a shorter path to the searched object exists | 1002 | // remove the visited table from the cache, in case a shorter path to the searched object exists |
1021 | lua_pushvalue( L, -1); // o "r" {c} {fqn} ... {?} {?} | 1003 | lua_pushvalue(L, -1); // o "r" {c} {fqn} ... {?} {?} |
1022 | lua_pushnil( L); // o "r" {c} {fqn} ... {?} {?} nil | 1004 | lua_pushnil(L); // o "r" {c} {fqn} ... {?} {?} nil |
1023 | lua_rawset( L, cache); // o "r" {c} {fqn} ... {?} | 1005 | lua_rawset(L, cache); // o "r" {c} {fqn} ... {?} |
1024 | STACK_END( L, 0); | 1006 | STACK_CHECK(L, 0); |
1025 | return shortest_; | 1007 | return shortest_; |
1026 | } | 1008 | } |
1027 | 1009 | ||
1010 | // ################################################################################################# | ||
1028 | 1011 | ||
1029 | /* | 1012 | /* |
1030 | * "type", "name" = lanes.nameof( o) | 1013 | * "type", "name" = lanes.nameof( o) |
@@ -1046,7 +1029,7 @@ int luaG_nameof( lua_State* L) | |||
1046 | } | 1029 | } |
1047 | 1030 | ||
1048 | STACK_GROW( L, 4); | 1031 | STACK_GROW( L, 4); |
1049 | STACK_CHECK( L, 0); | 1032 | STACK_CHECK_START_REL(L, 0); |
1050 | // this slot will contain the shortest name we found when we are done | 1033 | // this slot will contain the shortest name we found when we are done |
1051 | lua_pushnil( L); // o nil | 1034 | lua_pushnil( L); // o nil |
1052 | // push a cache that will contain all already visited tables | 1035 | // push a cache that will contain all already visited tables |
@@ -1067,23 +1050,24 @@ int luaG_nameof( lua_State* L) | |||
1067 | (void) discover_object_name_recur( L, 6666, 1); | 1050 | (void) discover_object_name_recur( L, 6666, 1); |
1068 | } | 1051 | } |
1069 | lua_pop( L, 3); // o "result" | 1052 | lua_pop( L, 3); // o "result" |
1070 | STACK_END( L, 1); | 1053 | STACK_CHECK( L, 1); |
1071 | lua_pushstring( L, luaL_typename( L, 1)); // o "result" "type" | 1054 | lua_pushstring( L, luaL_typename( L, 1)); // o "result" "type" |
1072 | lua_replace( L, -3); // "type" "result" | 1055 | lua_replace( L, -3); // "type" "result" |
1073 | return 2; | 1056 | return 2; |
1074 | } | 1057 | } |
1075 | 1058 | ||
1059 | // ################################################################################################# | ||
1076 | 1060 | ||
1077 | /* | 1061 | /* |
1078 | * Push a looked-up native/LuaJIT function. | 1062 | * Push a looked-up native/LuaJIT function. |
1079 | */ | 1063 | */ |
1080 | static void lookup_native_func( lua_State* L2, lua_State* L, uint_t i, LookupMode mode_, char const* upName_) | 1064 | static void lookup_native_func(lua_State* L2, lua_State* L, int i, LookupMode mode_, char const* upName_) |
1081 | { | 1065 | { |
1082 | // get the name of the function we want to send | 1066 | // get the name of the function we want to send |
1083 | size_t len; | 1067 | size_t len; |
1084 | char const* fqn = find_lookup_name( L, i, mode_, upName_, &len); | 1068 | char const* fqn = find_lookup_name( L, i, mode_, upName_, &len); |
1085 | // push the equivalent function in the destination's stack, retrieved from the lookup table | 1069 | // push the equivalent function in the destination's stack, retrieved from the lookup table |
1086 | STACK_CHECK( L2, 0); // L // L2 | 1070 | STACK_CHECK_START_REL(L2, 0); // L // L2 |
1087 | STACK_GROW( L2, 3); // up to 3 slots are necessary on error | 1071 | STACK_GROW( L2, 3); // up to 3 slots are necessary on error |
1088 | switch( mode_) | 1072 | switch( mode_) |
1089 | { | 1073 | { |
@@ -1091,16 +1075,16 @@ static void lookup_native_func( lua_State* L2, lua_State* L, uint_t i, LookupMod | |||
1091 | (void) luaL_error( L, "internal error: unknown lookup mode"); | 1075 | (void) luaL_error( L, "internal error: unknown lookup mode"); |
1092 | return; | 1076 | return; |
1093 | 1077 | ||
1094 | case eLM_ToKeeper: | 1078 | case LookupMode::ToKeeper: |
1095 | // push a sentinel closure that holds the lookup name as upvalue | 1079 | // push a sentinel closure that holds the lookup name as upvalue |
1096 | lua_pushlstring( L2, fqn, len); // "f.q.n" | 1080 | lua_pushlstring( L2, fqn, len); // "f.q.n" |
1097 | lua_pushcclosure( L2, func_lookup_sentinel, 1); // f | 1081 | lua_pushcclosure( L2, func_lookup_sentinel, 1); // f |
1098 | break; | 1082 | break; |
1099 | 1083 | ||
1100 | case eLM_LaneBody: | 1084 | case LookupMode::LaneBody: |
1101 | case eLM_FromKeeper: | 1085 | case LookupMode::FromKeeper: |
1102 | REGISTRY_GET( L2, LOOKUP_REGKEY); // {} | 1086 | LOOKUP_REGKEY.pushValue(L2); // {} |
1103 | STACK_MID( L2, 1); | 1087 | STACK_CHECK( L2, 1); |
1104 | ASSERT_L( lua_istable( L2, -1)); | 1088 | ASSERT_L( lua_istable( L2, -1)); |
1105 | lua_pushlstring( L2, fqn, len); // {} "f.q.n" | 1089 | lua_pushlstring( L2, fqn, len); // {} "f.q.n" |
1106 | lua_rawget( L2, -2); // {} f | 1090 | lua_rawget( L2, -2); // {} f |
@@ -1115,9 +1099,9 @@ static void lookup_native_func( lua_State* L2, lua_State* L, uint_t i, LookupMod | |||
1115 | lua_getglobal( L2, "decoda_name"); // {} f decoda_name | 1099 | lua_getglobal( L2, "decoda_name"); // {} f decoda_name |
1116 | to = lua_tostring( L2, -1); | 1100 | to = lua_tostring( L2, -1); |
1117 | lua_pop( L2, 1); // {} f | 1101 | lua_pop( L2, 1); // {} f |
1118 | // when mode_ == eLM_FromKeeper, L is a keeper state and L2 is not, therefore L2 is the state where we want to raise the error | 1102 | // when mode_ == LookupMode::FromKeeper, L is a keeper state and L2 is not, therefore L2 is the state where we want to raise the error |
1119 | (void) luaL_error( | 1103 | (void) luaL_error( |
1120 | (mode_ == eLM_FromKeeper) ? L2 : L | 1104 | (mode_ == LookupMode::FromKeeper) ? L2 : L |
1121 | , "%s%s: function '%s' not found in %s destination transfer database." | 1105 | , "%s%s: function '%s' not found in %s destination transfer database." |
1122 | , lua_isnil( L2, -1) ? "" : "INTERNAL ERROR IN " | 1106 | , lua_isnil( L2, -1) ? "" : "INTERNAL ERROR IN " |
1123 | , from ? from : "main" | 1107 | , from ? from : "main" |
@@ -1130,13 +1114,13 @@ static void lookup_native_func( lua_State* L2, lua_State* L, uint_t i, LookupMod | |||
1130 | break; | 1114 | break; |
1131 | 1115 | ||
1132 | /* keep it in case I need it someday, who knows... | 1116 | /* keep it in case I need it someday, who knows... |
1133 | case eLM_RawFunctions: | 1117 | case LookupMode::RawFunctions: |
1134 | { | 1118 | { |
1135 | int n; | 1119 | int n; |
1136 | char const* upname; | 1120 | char const* upname; |
1137 | lua_CFunction f = lua_tocfunction( L, i); | 1121 | lua_CFunction f = lua_tocfunction( L, i); |
1138 | // copy upvalues | 1122 | // copy upvalues |
1139 | for( n = 0; (upname = lua_getupvalue( L, i, 1 + n)) != NULL; ++ n) | 1123 | for( n = 0; (upname = lua_getupvalue( L, i, 1 + n)) != nullptr; ++ n) |
1140 | { | 1124 | { |
1141 | luaG_inter_move( U, L, L2, 1, mode_); // [up[,up ...]] | 1125 | luaG_inter_move( U, L, L2, 1, mode_); // [up[,up ...]] |
1142 | } | 1126 | } |
@@ -1145,9 +1129,10 @@ static void lookup_native_func( lua_State* L2, lua_State* L, uint_t i, LookupMod | |||
1145 | break; | 1129 | break; |
1146 | */ | 1130 | */ |
1147 | } | 1131 | } |
1148 | STACK_END( L2, 1); | 1132 | STACK_CHECK( L2, 1); |
1149 | } | 1133 | } |
1150 | 1134 | ||
1135 | // ################################################################################################# | ||
1151 | 1136 | ||
1152 | /* | 1137 | /* |
1153 | * Copy a function over, which has not been found in the cache. | 1138 | * Copy a function over, which has not been found in the cache. |
@@ -1171,17 +1156,19 @@ static char const* lua_type_names[] = | |||
1171 | }; | 1156 | }; |
1172 | static char const* vt_names[] = | 1157 | static char const* vt_names[] = |
1173 | { | 1158 | { |
1174 | "VT_NORMAL" | 1159 | "VT::NORMAL" |
1175 | , "VT_KEY" | 1160 | , "VT::KEY" |
1176 | , "VT_METATABLE" | 1161 | , "VT::METATABLE" |
1177 | }; | 1162 | }; |
1178 | #endif // USE_DEBUG_SPEW() | 1163 | #endif // USE_DEBUG_SPEW() |
1179 | 1164 | ||
1165 | // ################################################################################################# | ||
1166 | |||
1180 | // Lua 5.4.3 style of dumping (see lstrlib.c) | 1167 | // Lua 5.4.3 style of dumping (see lstrlib.c) |
1181 | // we have to do it that way because we can't unbalance the stack between buffer operations | 1168 | // we have to do it that way because we can't unbalance the stack between buffer operations |
1182 | // namely, this means we can't push a function on top of the stack *after* we initialize the buffer! | 1169 | // namely, this means we can't push a function on top of the stack *after* we initialize the buffer! |
1183 | // luckily, this also works with earlier Lua versions | 1170 | // luckily, this also works with earlier Lua versions |
1184 | static int buf_writer( lua_State* L, void const* b, size_t size, void* ud) | 1171 | [[nodiscard]] static int buf_writer(lua_State* L, void const* b, size_t size, void* ud) |
1185 | { | 1172 | { |
1186 | luaL_Buffer* B = (luaL_Buffer*) ud; | 1173 | luaL_Buffer* B = (luaL_Buffer*) ud; |
1187 | if( !B->L) | 1174 | if( !B->L) |
@@ -1192,20 +1179,22 @@ static int buf_writer( lua_State* L, void const* b, size_t size, void* ud) | |||
1192 | return 0; | 1179 | return 0; |
1193 | } | 1180 | } |
1194 | 1181 | ||
1195 | static void copy_func( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, LookupMode mode_, char const* upName_) | 1182 | // ################################################################################################# |
1183 | |||
1184 | static void copy_func(Universe* U, Dest L2, int L2_cache_i, Source L, int i, LookupMode mode_, char const* upName_) | ||
1196 | { | 1185 | { |
1197 | int n, needToPush; | 1186 | int n, needToPush; |
1198 | luaL_Buffer B; | 1187 | luaL_Buffer B; |
1199 | B.L = NULL; | 1188 | B.L = nullptr; |
1200 | 1189 | ||
1201 | ASSERT_L( L2_cache_i != 0); // ... {cache} ... p | 1190 | ASSERT_L( L2_cache_i != 0); // ... {cache} ... p |
1202 | STACK_GROW( L, 2); | 1191 | STACK_GROW( L, 2); |
1203 | STACK_CHECK( L, 0); | 1192 | STACK_CHECK_START_REL(L, 0); |
1204 | 1193 | ||
1205 | 1194 | ||
1206 | // 'lua_dump()' needs the function at top of stack | 1195 | // 'lua_dump()' needs the function at top of stack |
1207 | // if already on top of the stack, no need to push again | 1196 | // if already on top of the stack, no need to push again |
1208 | needToPush = (i != (uint_t)lua_gettop( L)); | 1197 | needToPush = (i != lua_gettop( L)); |
1209 | if( needToPush) | 1198 | if( needToPush) |
1210 | { | 1199 | { |
1211 | lua_pushvalue( L, i); // ... f | 1200 | lua_pushvalue( L, i); // ... f |
@@ -1232,7 +1221,7 @@ static void copy_func( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* | |||
1232 | 1221 | ||
1233 | // transfer the bytecode, then the upvalues, to create a similar closure | 1222 | // transfer the bytecode, then the upvalues, to create a similar closure |
1234 | { | 1223 | { |
1235 | char const* name = NULL; | 1224 | char const* name = nullptr; |
1236 | 1225 | ||
1237 | #if LOG_FUNC_INFO | 1226 | #if LOG_FUNC_INFO |
1238 | // "To get information about a function you push it onto the | 1227 | // "To get information about a function you push it onto the |
@@ -1244,7 +1233,7 @@ static void copy_func( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* | |||
1244 | // fills 'name' 'namewhat' and 'linedefined', pops function | 1233 | // fills 'name' 'namewhat' and 'linedefined', pops function |
1245 | lua_getinfo( L, ">nS", &ar); // ... b | 1234 | lua_getinfo( L, ">nS", &ar); // ... b |
1246 | name = ar.namewhat; | 1235 | name = ar.namewhat; |
1247 | fprintf( stderr, INDENT_BEGIN "FNAME: %s @ %d\n", i, s_indent, ar.short_src, ar.linedefined); // just gives NULL | 1236 | fprintf( stderr, INDENT_BEGIN "FNAME: %s @ %d\n", i, s_indent, ar.short_src, ar.linedefined); // just gives nullptr |
1248 | } | 1237 | } |
1249 | #endif // LOG_FUNC_INFO | 1238 | #endif // LOG_FUNC_INFO |
1250 | { | 1239 | { |
@@ -1276,7 +1265,7 @@ static void copy_func( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* | |||
1276 | // cache[p] = function | 1265 | // cache[p] = function |
1277 | lua_rawset( L2, L2_cache_i); // ... {cache} ... function | 1266 | lua_rawset( L2, L2_cache_i); // ... {cache} ... function |
1278 | } | 1267 | } |
1279 | STACK_MID( L, 0); | 1268 | STACK_CHECK( L, 0); |
1280 | 1269 | ||
1281 | /* push over any upvalues; references to this function will come from | 1270 | /* push over any upvalues; references to this function will come from |
1282 | * cache so we don't end up in eternal loop. | 1271 | * cache so we don't end up in eternal loop. |
@@ -1291,7 +1280,7 @@ static void copy_func( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* | |||
1291 | // -> if we encounter an upvalue equal to the global table in the source, bind it to the destination's global table | 1280 | // -> if we encounter an upvalue equal to the global table in the source, bind it to the destination's global table |
1292 | lua_pushglobaltable( L); // ... _G | 1281 | lua_pushglobaltable( L); // ... _G |
1293 | #endif // LUA_VERSION_NUM | 1282 | #endif // LUA_VERSION_NUM |
1294 | for( n = 0; (upname = lua_getupvalue( L, i, 1 + n)) != NULL; ++ n) | 1283 | for (n = 0; (upname = lua_getupvalue(L, i, 1 + n)) != nullptr; ++n) |
1295 | { // ... _G up[n] | 1284 | { // ... _G up[n] |
1296 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "UPNAME[%d]: %s -> " INDENT_END, n, upname)); | 1285 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "UPNAME[%d]: %s -> " INDENT_END, n, upname)); |
1297 | #if LUA_VERSION_NUM >= 502 | 1286 | #if LUA_VERSION_NUM >= 502 |
@@ -1304,7 +1293,7 @@ static void copy_func( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* | |||
1304 | #endif // LUA_VERSION_NUM | 1293 | #endif // LUA_VERSION_NUM |
1305 | { | 1294 | { |
1306 | DEBUGSPEW_CODE( fprintf( stderr, "copying value\n")); | 1295 | DEBUGSPEW_CODE( fprintf( stderr, "copying value\n")); |
1307 | if( !inter_copy_one( U, L2, L2_cache_i, L, lua_gettop( L), VT_NORMAL, mode_, upname)) // ... {cache} ... function <upvalues> | 1296 | if( !inter_copy_one( U, L2, L2_cache_i, L, lua_gettop( L), VT::NORMAL, mode_, upname)) // ... {cache} ... function <upvalues> |
1308 | { | 1297 | { |
1309 | luaL_error( L, "Cannot copy upvalue type '%s'", luaL_typename( L, -1)); | 1298 | luaL_error( L, "Cannot copy upvalue type '%s'", luaL_typename( L, -1)); |
1310 | } | 1299 | } |
@@ -1317,7 +1306,7 @@ static void copy_func( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* | |||
1317 | } | 1306 | } |
1318 | // L2: function + 'n' upvalues (>=0) | 1307 | // L2: function + 'n' upvalues (>=0) |
1319 | 1308 | ||
1320 | STACK_MID( L, 0); | 1309 | STACK_CHECK( L, 0); |
1321 | 1310 | ||
1322 | // Set upvalues (originally set to 'nil' by 'lua_load') | 1311 | // Set upvalues (originally set to 'nil' by 'lua_load') |
1323 | { | 1312 | { |
@@ -1335,19 +1324,21 @@ static void copy_func( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* | |||
1335 | // with the function at the top of the stack // ... {cache} ... function | 1324 | // with the function at the top of the stack // ... {cache} ... function |
1336 | } | 1325 | } |
1337 | } | 1326 | } |
1338 | STACK_END( L, 0); | 1327 | STACK_CHECK( L, 0); |
1339 | } | 1328 | } |
1340 | 1329 | ||
1330 | // ################################################################################################# | ||
1331 | |||
1341 | /* | 1332 | /* |
1342 | * Check if we've already copied the same function from 'L', and reuse the old | 1333 | * Check if we've already copied the same function from 'L', and reuse the old |
1343 | * copy. | 1334 | * copy. |
1344 | * | 1335 | * |
1345 | * Always pushes a function to 'L2'. | 1336 | * Always pushes a function to 'L2'. |
1346 | */ | 1337 | */ |
1347 | static void copy_cached_func( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, LookupMode mode_, char const* upName_) | 1338 | static void copy_cached_func(Universe* U, Dest L2, int L2_cache_i, Source L, int i, LookupMode mode_, char const* upName_) |
1348 | { | 1339 | { |
1349 | FuncSubType funcSubType; | 1340 | FuncSubType funcSubType; |
1350 | /*lua_CFunction cfunc =*/ luaG_tocfunction( L, i, &funcSubType); // NULL for LuaJIT-fast && bytecode functions | 1341 | std::ignore = luaG_tocfunction(L, i, &funcSubType); // nullptr for LuaJIT-fast && bytecode functions |
1351 | if( funcSubType == FST_Bytecode) | 1342 | if( funcSubType == FST_Bytecode) |
1352 | { | 1343 | { |
1353 | void* const aspointer = (void*)lua_topointer( L, i); | 1344 | void* const aspointer = (void*)lua_topointer( L, i); |
@@ -1358,7 +1349,7 @@ static void copy_cached_func( Universe* U, lua_State* L2, uint_t L2_cache_i, lua | |||
1358 | 1349 | ||
1359 | // L2_cache[id_str]= function | 1350 | // L2_cache[id_str]= function |
1360 | // | 1351 | // |
1361 | STACK_CHECK( L2, 0); | 1352 | STACK_CHECK_START_REL(L2, 0); |
1362 | 1353 | ||
1363 | // We don't need to use the from state ('L') in ID since the life span | 1354 | // We don't need to use the from state ('L') in ID since the life span |
1364 | // is only for the duration of a copy (both states are locked). | 1355 | // is only for the duration of a copy (both states are locked). |
@@ -1386,7 +1377,7 @@ static void copy_cached_func( Universe* U, lua_State* L2, uint_t L2_cache_i, lua | |||
1386 | { | 1377 | { |
1387 | lua_remove( L2, -2); // ... {cache} ... function | 1378 | lua_remove( L2, -2); // ... {cache} ... function |
1388 | } | 1379 | } |
1389 | STACK_END( L2, 1); | 1380 | STACK_CHECK( L2, 1); |
1390 | ASSERT_L( lua_isfunction( L2, -1)); | 1381 | ASSERT_L( lua_isfunction( L2, -1)); |
1391 | } | 1382 | } |
1392 | else // function is native/LuaJIT: no need to cache | 1383 | else // function is native/LuaJIT: no need to cache |
@@ -1397,62 +1388,65 @@ static void copy_cached_func( Universe* U, lua_State* L2, uint_t L2_cache_i, lua | |||
1397 | } | 1388 | } |
1398 | } | 1389 | } |
1399 | 1390 | ||
1400 | static bool_t push_cached_metatable( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, enum eLookupMode mode_, char const* upName_) | 1391 | // ################################################################################################# |
1392 | |||
1393 | [[nodiscard]] static bool push_cached_metatable(Universe* U, Dest L2, int L2_cache_i, Source L, int i, LookupMode mode_, char const* upName_) | ||
1401 | { | 1394 | { |
1402 | STACK_CHECK( L, 0); | 1395 | STACK_CHECK_START_REL(L, 0); |
1403 | if( lua_getmetatable( L, i)) // ... mt | 1396 | if (!lua_getmetatable(L, i)) // ... mt |
1404 | { | 1397 | { |
1405 | lua_Integer const mt_id = get_mt_id( U, L, -1); // Unique id for the metatable | 1398 | STACK_CHECK( L, 0); |
1399 | return false; | ||
1400 | } | ||
1401 | STACK_CHECK(L, 1); | ||
1406 | 1402 | ||
1407 | STACK_CHECK( L2, 0); | 1403 | lua_Integer const mt_id{ get_mt_id(U, L, -1) }; // Unique id for the metatable |
1408 | STACK_GROW( L2, 4); | ||
1409 | // do we already know this metatable? | ||
1410 | push_registry_subtable( L2, REG_MTID); // _R[REG_MTID] | ||
1411 | lua_pushinteger( L2, mt_id); // _R[REG_MTID] id | ||
1412 | lua_rawget( L2, -2); // _R[REG_MTID] mt? | ||
1413 | 1404 | ||
1414 | STACK_MID( L2, 2); | 1405 | STACK_CHECK_START_REL(L2, 0); |
1406 | STACK_GROW(L2, 4); | ||
1407 | // do we already know this metatable? | ||
1408 | push_registry_subtable(L2, REG_MTID); // _R[REG_MTID] | ||
1409 | lua_pushinteger(L2, mt_id); // _R[REG_MTID] id | ||
1410 | lua_rawget(L2, -2); // _R[REG_MTID] mt|nil | ||
1411 | STACK_CHECK(L2, 2); | ||
1415 | 1412 | ||
1416 | if( lua_isnil( L2, -1)) | 1413 | if (lua_isnil(L2, -1)) |
1417 | { // L2 did not know the metatable | 1414 | { // L2 did not know the metatable |
1418 | lua_pop( L2, 1); // _R[REG_MTID] | 1415 | lua_pop(L2, 1); // _R[REG_MTID] |
1419 | if( inter_copy_one( U, L2, L2_cache_i, L, lua_gettop( L), VT_METATABLE, mode_, upName_)) // _R[REG_MTID] mt | 1416 | if (!inter_copy_one(U, L2, L2_cache_i, L, lua_gettop(L), VT::METATABLE, mode_, upName_)) // _R[REG_MTID] mt? |
1420 | { | 1417 | { |
1421 | STACK_MID( L2, 2); | 1418 | luaL_error(L, "Error copying a metatable"); // doesn't return |
1422 | // mt_id -> metatable | ||
1423 | lua_pushinteger( L2, mt_id); // _R[REG_MTID] mt id | ||
1424 | lua_pushvalue( L2, -2); // _R[REG_MTID] mt id mt | ||
1425 | lua_rawset( L2, -4); // _R[REG_MTID] mt | ||
1426 | |||
1427 | // metatable -> mt_id | ||
1428 | lua_pushvalue( L2, -1); // _R[REG_MTID] mt mt | ||
1429 | lua_pushinteger( L2, mt_id); // _R[REG_MTID] mt mt id | ||
1430 | lua_rawset( L2, -4); // _R[REG_MTID] mt | ||
1431 | } | ||
1432 | else | ||
1433 | { | ||
1434 | (void) luaL_error( L, "Error copying a metatable"); | ||
1435 | } | ||
1436 | STACK_MID( L2, 2); | ||
1437 | } | 1419 | } |
1438 | lua_remove( L2, -2); // mt | ||
1439 | 1420 | ||
1440 | lua_pop( L, 1); // ... | 1421 | STACK_CHECK(L2, 2); // _R[REG_MTID] mt |
1441 | STACK_END( L2, 1); | 1422 | // mt_id -> metatable |
1442 | STACK_MID( L, 0); | 1423 | lua_pushinteger(L2, mt_id); // _R[REG_MTID] mt id |
1443 | return TRUE; | 1424 | lua_pushvalue(L2, -2); // _R[REG_MTID] mt id mt |
1425 | lua_rawset(L2, -4); // _R[REG_MTID] mt | ||
1426 | |||
1427 | // metatable -> mt_id | ||
1428 | lua_pushvalue(L2, -1); // _R[REG_MTID] mt mt | ||
1429 | lua_pushinteger(L2, mt_id); // _R[REG_MTID] mt mt id | ||
1430 | lua_rawset(L2, -4); // _R[REG_MTID] mt | ||
1431 | STACK_CHECK(L2, 2); | ||
1444 | } | 1432 | } |
1445 | STACK_END( L, 0); | 1433 | lua_remove(L2, -2); // mt |
1446 | return FALSE; | 1434 | |
1435 | lua_pop(L, 1); // ... | ||
1436 | STACK_CHECK(L2, 1); | ||
1437 | STACK_CHECK(L, 0); | ||
1438 | return true; | ||
1447 | } | 1439 | } |
1448 | 1440 | ||
1449 | static void inter_copy_keyvaluepair( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, enum e_vt vt, LookupMode mode_, char const* upName_) | 1441 | // ################################################################################################# |
1442 | |||
1443 | [[nodiscard]] static void inter_copy_keyvaluepair(Universe* U, Dest L2, int L2_cache_i, Source L, VT vt_, LookupMode mode_, char const* upName_) | ||
1450 | { | 1444 | { |
1451 | uint_t val_i = lua_gettop( L); | 1445 | int val_i = lua_gettop(L); |
1452 | uint_t key_i = val_i - 1; | 1446 | int key_i = val_i - 1; |
1453 | 1447 | ||
1454 | // Only basic key types are copied over; others ignored | 1448 | // Only basic key types are copied over; others ignored |
1455 | if( inter_copy_one( U, L2, L2_cache_i, L, key_i, VT_KEY, mode_, upName_)) | 1449 | if (inter_copy_one(U, L2, L2_cache_i, L, key_i, VT::KEY, mode_, upName_)) |
1456 | { | 1450 | { |
1457 | char* valPath = (char*) upName_; | 1451 | char* valPath = (char*) upName_; |
1458 | if( U->verboseErrors) | 1452 | if( U->verboseErrors) |
@@ -1465,7 +1459,7 @@ static void inter_copy_keyvaluepair( Universe* U, lua_State* L2, uint_t L2_cache | |||
1465 | size_t const bufLen = strlen( upName_) + keyRawLen + 2; | 1459 | size_t const bufLen = strlen( upName_) + keyRawLen + 2; |
1466 | valPath = (char*) alloca( bufLen); | 1460 | valPath = (char*) alloca( bufLen); |
1467 | sprintf( valPath, "%s.%*s", upName_, (int) keyRawLen, key); | 1461 | sprintf( valPath, "%s.%*s", upName_, (int) keyRawLen, key); |
1468 | key = NULL; | 1462 | key = nullptr; |
1469 | } | 1463 | } |
1470 | #if defined LUA_LNUM || LUA_VERSION_NUM >= 503 | 1464 | #if defined LUA_LNUM || LUA_VERSION_NUM >= 503 |
1471 | else if( lua_isinteger( L, key_i)) | 1465 | else if( lua_isinteger( L, key_i)) |
@@ -1498,51 +1492,53 @@ static void inter_copy_keyvaluepair( Universe* U, lua_State* L2, uint_t L2_cache | |||
1498 | * Contents of metatables are copied with cache checking; | 1492 | * Contents of metatables are copied with cache checking; |
1499 | * important to detect loops. | 1493 | * important to detect loops. |
1500 | */ | 1494 | */ |
1501 | if( inter_copy_one( U, L2, L2_cache_i, L, val_i, VT_NORMAL, mode_, valPath)) | 1495 | if (inter_copy_one(U, L2, L2_cache_i, L, val_i, VT::NORMAL, mode_, valPath)) |
1502 | { | 1496 | { |
1503 | ASSERT_L( lua_istable( L2, -3)); | 1497 | ASSERT_L( lua_istable( L2, -3)); |
1504 | lua_rawset( L2, -3); // add to table (pops key & val) | 1498 | lua_rawset( L2, -3); // add to table (pops key & val) |
1505 | } | 1499 | } |
1506 | else | 1500 | else |
1507 | { | 1501 | { |
1508 | luaL_error( L, "Unable to copy %s entry '%s' because of value is of type '%s'", (vt == VT_NORMAL) ? "table" : "metatable", valPath, luaL_typename( L, val_i)); | 1502 | luaL_error(L, "Unable to copy %s entry '%s' because of value is of type '%s'", (vt_ == VT::NORMAL) ? "table" : "metatable", valPath, luaL_typename(L, val_i)); |
1509 | } | 1503 | } |
1510 | } | 1504 | } |
1511 | } | 1505 | } |
1512 | 1506 | ||
1507 | // ################################################################################################# | ||
1508 | |||
1513 | /* | 1509 | /* |
1514 | * The clone cache is a weak valued table listing all clones, indexed by their userdatapointer | 1510 | * The clone cache is a weak valued table listing all clones, indexed by their userdatapointer |
1515 | * fnv164 of string "CLONABLES_CACHE_KEY" generated at https://www.pelock.com/products/hash-calculator | 1511 | * fnv164 of string "CLONABLES_CACHE_KEY" generated at https://www.pelock.com/products/hash-calculator |
1516 | */ | 1512 | */ |
1517 | static DECLARE_CONST_UNIQUE_KEY( CLONABLES_CACHE_KEY, 0xD04EE018B3DEE8F5); | 1513 | static constexpr UniqueKey CLONABLES_CACHE_KEY{ 0xD04EE018B3DEE8F5ull }; |
1518 | 1514 | ||
1519 | static bool_t copyclone( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t source_i_, LookupMode mode_, char const* upName_) | 1515 | [[nodiscard]] static bool copyclone(Universe* U, Dest L2, int L2_cache_i, Source L, int source_i_, LookupMode mode_, char const* upName_) |
1520 | { | 1516 | { |
1521 | void* const source = lua_touserdata( L, source_i_); | 1517 | void* const source = lua_touserdata( L, source_i_); |
1522 | source_i_ = lua_absindex( L, source_i_); | 1518 | source_i_ = lua_absindex( L, source_i_); |
1523 | 1519 | ||
1524 | STACK_CHECK( L, 0); // L (source) // L2 (destination) | 1520 | STACK_CHECK_START_REL(L, 0); // L (source) // L2 (destination) |
1525 | STACK_CHECK( L2, 0); | 1521 | STACK_CHECK_START_REL(L2, 0); |
1526 | 1522 | ||
1527 | // Check if the source was already cloned during this copy | 1523 | // Check if the source was already cloned during this copy |
1528 | lua_pushlightuserdata( L2, source); // ... source | 1524 | lua_pushlightuserdata( L2, source); // ... source |
1529 | lua_rawget( L2, L2_cache_i); // ... clone? | 1525 | lua_rawget( L2, L2_cache_i); // ... clone? |
1530 | if ( !lua_isnil( L2, -1)) | 1526 | if ( !lua_isnil( L2, -1)) |
1531 | { | 1527 | { |
1532 | STACK_MID( L2, 1); | 1528 | STACK_CHECK( L2, 1); |
1533 | return TRUE; | 1529 | return true; |
1534 | } | 1530 | } |
1535 | else | 1531 | else |
1536 | { | 1532 | { |
1537 | lua_pop( L2, 1); // ... | 1533 | lua_pop( L2, 1); // ... |
1538 | } | 1534 | } |
1539 | STACK_MID( L2, 0); | 1535 | STACK_CHECK( L2, 0); |
1540 | 1536 | ||
1541 | // no metatable? -> not clonable | 1537 | // no metatable? -> not clonable |
1542 | if( !lua_getmetatable( L, source_i_)) // ... mt? | 1538 | if( !lua_getmetatable( L, source_i_)) // ... mt? |
1543 | { | 1539 | { |
1544 | STACK_MID( L, 0); | 1540 | STACK_CHECK( L, 0); |
1545 | return FALSE; | 1541 | return false; |
1546 | } | 1542 | } |
1547 | 1543 | ||
1548 | // no __lanesclone? -> not clonable | 1544 | // no __lanesclone? -> not clonable |
@@ -1550,15 +1546,15 @@ static bool_t copyclone( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_Stat | |||
1550 | if( lua_isnil( L, -1)) | 1546 | if( lua_isnil( L, -1)) |
1551 | { | 1547 | { |
1552 | lua_pop( L, 2); // ... | 1548 | lua_pop( L, 2); // ... |
1553 | STACK_MID( L, 0); | 1549 | STACK_CHECK( L, 0); |
1554 | return FALSE; | 1550 | return false; |
1555 | } | 1551 | } |
1556 | 1552 | ||
1557 | // we need to copy over the uservalues of the userdata as well | 1553 | // we need to copy over the uservalues of the userdata as well |
1558 | { | 1554 | { |
1559 | int const mt = lua_absindex( L, -2); // ... mt __lanesclone | 1555 | int const mt = lua_absindex( L, -2); // ... mt __lanesclone |
1560 | size_t const userdata_size = (size_t) lua_rawlen( L, source_i_); | 1556 | size_t const userdata_size = (size_t) lua_rawlen( L, source_i_); |
1561 | void* clone = NULL; | 1557 | void* clone = nullptr; |
1562 | // extract all the uservalues, but don't transfer them yet | 1558 | // extract all the uservalues, but don't transfer them yet |
1563 | int uvi = 0; | 1559 | int uvi = 0; |
1564 | while( lua_getiuservalue( L, source_i_, ++ uvi) != LUA_TNONE) {} // ... mt __lanesclone [uv]+ nil | 1560 | while( lua_getiuservalue( L, source_i_, ++ uvi) != LUA_TNONE) {} // ... mt __lanesclone [uv]+ nil |
@@ -1568,9 +1564,9 @@ static bool_t copyclone( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_Stat | |||
1568 | // create the clone userdata with the required number of uservalue slots | 1564 | // create the clone userdata with the required number of uservalue slots |
1569 | clone = lua_newuserdatauv( L2, userdata_size, uvi); // ... u | 1565 | clone = lua_newuserdatauv( L2, userdata_size, uvi); // ... u |
1570 | // copy the metatable in the target state, and give it to the clone we put there | 1566 | // copy the metatable in the target state, and give it to the clone we put there |
1571 | if( inter_copy_one( U, L2, L2_cache_i, L, mt, VT_NORMAL, mode_, upName_)) // ... u mt|sentinel | 1567 | if (inter_copy_one(U, L2, L2_cache_i, L, mt, VT::NORMAL, mode_, upName_)) // ... u mt|sentinel |
1572 | { | 1568 | { |
1573 | if( eLM_ToKeeper == mode_) // ... u sentinel | 1569 | if( LookupMode::ToKeeper == mode_) // ... u sentinel |
1574 | { | 1570 | { |
1575 | ASSERT_L( lua_tocfunction( L2, -1) == table_lookup_sentinel); | 1571 | ASSERT_L( lua_tocfunction( L2, -1) == table_lookup_sentinel); |
1576 | // we want to create a new closure with a 'clone sentinel' function, where the upvalues are the userdata and the metatable fqn | 1572 | // we want to create a new closure with a 'clone sentinel' function, where the upvalues are the userdata and the metatable fqn |
@@ -1584,7 +1580,7 @@ static bool_t copyclone( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_Stat | |||
1584 | ASSERT_L( lua_istable( L2, -1)); | 1580 | ASSERT_L( lua_istable( L2, -1)); |
1585 | lua_setmetatable( L2, -2); // ... u | 1581 | lua_setmetatable( L2, -2); // ... u |
1586 | } | 1582 | } |
1587 | STACK_MID( L2, 1); | 1583 | STACK_CHECK( L2, 1); |
1588 | } | 1584 | } |
1589 | else | 1585 | else |
1590 | { | 1586 | { |
@@ -1595,206 +1591,210 @@ static bool_t copyclone( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_Stat | |||
1595 | lua_pushvalue( L2, -2); // ... u source u | 1591 | lua_pushvalue( L2, -2); // ... u source u |
1596 | lua_rawset( L2, L2_cache_i); // ... u | 1592 | lua_rawset( L2, L2_cache_i); // ... u |
1597 | // make sure we have the userdata now | 1593 | // make sure we have the userdata now |
1598 | if( eLM_ToKeeper == mode_) // ... userdata_clone_sentinel | 1594 | if( LookupMode::ToKeeper == mode_) // ... userdata_clone_sentinel |
1599 | { | 1595 | { |
1600 | lua_getupvalue( L2, -1, 2); // ... userdata_clone_sentinel u | 1596 | lua_getupvalue( L2, -1, 2); // ... userdata_clone_sentinel u |
1601 | } | 1597 | } |
1602 | // assign uservalues | 1598 | // assign uservalues |
1603 | while( uvi > 0) | 1599 | while( uvi > 0) |
1604 | { | 1600 | { |
1605 | if(!inter_copy_one( U, L2, L2_cache_i, L, lua_absindex( L, -1), VT_NORMAL, mode_, upName_)) // ... u uv | 1601 | if (!inter_copy_one(U, L2, L2_cache_i, L, lua_absindex(L, -1), VT::NORMAL, mode_, upName_)) // ... u uv |
1606 | { | 1602 | { |
1607 | (void) luaL_error(L, "Cannot copy upvalue type '%s'", luaL_typename(L, -1)); | 1603 | luaL_error(L, "Cannot copy upvalue type '%s'", luaL_typename(L, -1)); // doesn't return |
1608 | } | 1604 | } |
1609 | lua_pop( L, 1); // ... mt __lanesclone [uv]* | 1605 | lua_pop( L, 1); // ... mt __lanesclone [uv]* |
1610 | // this pops the value from the stack | 1606 | // this pops the value from the stack |
1611 | lua_setiuservalue( L2, -2, uvi); // ... u | 1607 | lua_setiuservalue( L2, -2, uvi); // ... u |
1612 | -- uvi; | 1608 | -- uvi; |
1613 | } | 1609 | } |
1614 | // when we are done, all uservalues are popped from the source stack, and we want only the single transferred value in the destination | 1610 | // when we are done, all uservalues are popped from the source stack, and we want only the single transferred value in the destination |
1615 | if( eLM_ToKeeper == mode_) // ... userdata_clone_sentinel u | 1611 | if( LookupMode::ToKeeper == mode_) // ... userdata_clone_sentinel u |
1616 | { | 1612 | { |
1617 | lua_pop( L2, 1); // ... userdata_clone_sentinel | 1613 | lua_pop( L2, 1); // ... userdata_clone_sentinel |
1618 | } | 1614 | } |
1619 | STACK_MID( L2, 1); | 1615 | STACK_CHECK( L2, 1); |
1620 | STACK_MID( L, 2); | 1616 | STACK_CHECK( L, 2); |
1621 | // call cloning function in source state to perform the actual memory cloning | 1617 | // call cloning function in source state to perform the actual memory cloning |
1622 | lua_pushlightuserdata( L, clone); // ... mt __lanesclone clone | 1618 | lua_pushlightuserdata( L, clone); // ... mt __lanesclone clone |
1623 | lua_pushlightuserdata( L, source); // ... mt __lanesclone clone source | 1619 | lua_pushlightuserdata( L, source); // ... mt __lanesclone clone source |
1624 | lua_pushinteger( L, userdata_size); // ... mt __lanesclone clone source size | 1620 | lua_pushinteger( L, userdata_size); // ... mt __lanesclone clone source size |
1625 | lua_call( L, 3, 0); // ... mt | 1621 | lua_call( L, 3, 0); // ... mt |
1626 | STACK_MID( L, 1); | 1622 | STACK_CHECK( L, 1); |
1627 | } | 1623 | } |
1628 | 1624 | ||
1629 | STACK_END( L2, 1); | 1625 | STACK_CHECK( L2, 1); |
1630 | lua_pop( L, 1); // ... | 1626 | lua_pop( L, 1); // ... |
1631 | STACK_END( L, 0); | 1627 | STACK_CHECK( L, 0); |
1632 | return TRUE; | 1628 | return true; |
1633 | } | 1629 | } |
1634 | 1630 | ||
1635 | static bool_t inter_copy_userdata( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, enum e_vt vt, LookupMode mode_, char const* upName_) | 1631 | // ################################################################################################# |
1632 | |||
1633 | [[nodiscard]] static bool inter_copy_userdata(Universe* U, Dest L2, int L2_cache_i, Source L, int i, VT vt_, LookupMode mode_, char const* upName_) | ||
1636 | { | 1634 | { |
1637 | STACK_CHECK( L, 0); | 1635 | STACK_CHECK_START_REL(L, 0); |
1638 | STACK_CHECK( L2, 0); | 1636 | STACK_CHECK_START_REL(L2, 0); |
1639 | if( vt == VT_KEY) | 1637 | if (vt_ == VT::KEY) |
1640 | { | 1638 | { |
1641 | return FALSE; | 1639 | return false; |
1642 | } | 1640 | } |
1643 | 1641 | ||
1644 | // try clonable userdata first | 1642 | // try clonable userdata first |
1645 | if( copyclone( U, L2, L2_cache_i, L, i, mode_, upName_)) | 1643 | if( copyclone( U, L2, L2_cache_i, L, i, mode_, upName_)) |
1646 | { | 1644 | { |
1647 | STACK_MID( L, 0); | 1645 | STACK_CHECK(L, 0); |
1648 | STACK_MID( L2, 1); | 1646 | STACK_CHECK(L2, 1); |
1649 | return TRUE; | 1647 | return true; |
1650 | } | 1648 | } |
1651 | 1649 | ||
1652 | STACK_MID( L, 0); | 1650 | STACK_CHECK(L, 0); |
1653 | STACK_MID( L2, 0); | 1651 | STACK_CHECK(L2, 0); |
1654 | 1652 | ||
1655 | // Allow only deep userdata entities to be copied across | 1653 | // Allow only deep userdata entities to be copied across |
1656 | DEBUGSPEW_CODE( fprintf( stderr, "USERDATA\n")); | 1654 | DEBUGSPEW_CODE(fprintf(stderr, "USERDATA\n")); |
1657 | if( copydeep( U, L2, L2_cache_i, L, i, mode_, upName_)) | 1655 | if (copydeep(U, L2, L2_cache_i, L, i, mode_, upName_)) |
1658 | { | 1656 | { |
1659 | STACK_MID( L, 0); | 1657 | STACK_CHECK(L, 0); |
1660 | STACK_MID( L2, 1); | 1658 | STACK_CHECK(L2, 1); |
1661 | return TRUE; | 1659 | return true; |
1662 | } | 1660 | } |
1663 | 1661 | ||
1664 | STACK_MID( L, 0); | 1662 | STACK_CHECK(L, 0); |
1665 | STACK_MID( L2, 0); | 1663 | STACK_CHECK(L2, 0); |
1666 | 1664 | ||
1667 | // Not a deep or clonable full userdata | 1665 | // Not a deep or clonable full userdata |
1668 | if( U->demoteFullUserdata) // attempt demotion to light userdata | 1666 | if (U->demoteFullUserdata) // attempt demotion to light userdata |
1669 | { | 1667 | { |
1670 | void* lud = lua_touserdata( L, i); | 1668 | void* lud = lua_touserdata(L, i); |
1671 | lua_pushlightuserdata( L2, lud); | 1669 | lua_pushlightuserdata(L2, lud); |
1672 | } | 1670 | } |
1673 | else // raise an error | 1671 | else // raise an error |
1674 | { | 1672 | { |
1675 | (void) luaL_error( L, "can't copy non-deep full userdata across lanes"); | 1673 | luaL_error(L, "can't copy non-deep full userdata across lanes"); // doesn't return |
1676 | } | 1674 | } |
1677 | 1675 | ||
1678 | STACK_END( L2, 1); | 1676 | STACK_CHECK(L2, 1); |
1679 | STACK_END( L, 0); | 1677 | STACK_CHECK(L, 0); |
1680 | return TRUE; | 1678 | return true; |
1681 | } | 1679 | } |
1682 | 1680 | ||
1683 | static bool_t inter_copy_function( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t source_i_, enum e_vt vt, LookupMode mode_, char const* upName_) | 1681 | // ################################################################################################# |
1682 | |||
1683 | [[nodiscard]] static bool inter_copy_function(Universe* U, Dest L2, int L2_cache_i, Source L, int source_i_, VT vt_, LookupMode mode_, char const* upName_) | ||
1684 | { | 1684 | { |
1685 | if( vt == VT_KEY) | 1685 | if (vt_ == VT::KEY) |
1686 | { | 1686 | { |
1687 | return FALSE; | 1687 | return false; |
1688 | } | 1688 | } |
1689 | 1689 | ||
1690 | STACK_CHECK( L, 0); // L (source) // L2 (destination) | 1690 | STACK_CHECK_START_REL(L, 0); // L (source) // L2 (destination) |
1691 | STACK_CHECK( L2, 0); | 1691 | STACK_CHECK_START_REL(L2, 0); |
1692 | DEBUGSPEW_CODE( fprintf( stderr, "FUNCTION %s\n", upName_)); | 1692 | DEBUGSPEW_CODE(fprintf(stderr, "FUNCTION %s\n", upName_)); |
1693 | 1693 | ||
1694 | if( lua_tocfunction( L, source_i_) == userdata_clone_sentinel) // we are actually copying a clonable full userdata from a keeper | 1694 | if (lua_tocfunction(L, source_i_) == userdata_clone_sentinel) // we are actually copying a clonable full userdata from a keeper |
1695 | { | 1695 | { |
1696 | // clone the full userdata again | 1696 | // clone the full userdata again |
1697 | size_t userdata_size = 0; | ||
1698 | void* source; | ||
1699 | void* clone; | ||
1700 | 1697 | ||
1701 | // let's see if we already restored this userdata | 1698 | // let's see if we already restored this userdata |
1702 | lua_getupvalue( L, source_i_, 2); // ... u | 1699 | lua_getupvalue(L, source_i_, 2); // ... u |
1703 | source = lua_touserdata( L, -1); | 1700 | void* source = lua_touserdata(L, -1); |
1704 | lua_pushlightuserdata( L2, source); // ... source | 1701 | lua_pushlightuserdata(L2, source); // ... source |
1705 | lua_rawget( L2, L2_cache_i); // ... u? | 1702 | lua_rawget(L2, L2_cache_i); // ... u? |
1706 | if( !lua_isnil( L2, -1)) | 1703 | if (!lua_isnil(L2, -1)) |
1707 | { | 1704 | { |
1708 | lua_pop( L, 1); // ... | 1705 | lua_pop(L, 1); // ... |
1709 | STACK_MID( L, 0); | 1706 | STACK_CHECK(L, 0); |
1710 | STACK_MID( L2, 1); | 1707 | STACK_CHECK(L2, 1); |
1711 | return TRUE; | 1708 | return true; |
1712 | } | 1709 | } |
1713 | lua_pop( L2, 1); // ... | 1710 | lua_pop(L2, 1); // ... |
1714 | 1711 | ||
1715 | // this function has 2 upvalues: the fqn of its metatable, and the userdata itself | 1712 | // this function has 2 upvalues: the fqn of its metatable, and the userdata itself |
1716 | lookup_table( L2, L, source_i_, mode_, upName_); // ... mt | 1713 | std::ignore = lookup_table(L2, L, source_i_, mode_, upName_); // ... mt |
1717 | // originally 'source_i_' slot was the proxy closure, but from now on it indexes the actual userdata we extracted from it | 1714 | // originally 'source_i_' slot was the proxy closure, but from now on it indexes the actual userdata we extracted from it |
1718 | source_i_ = lua_gettop( L); | 1715 | source_i_ = lua_gettop(L); |
1719 | source = lua_touserdata( L, -1); | 1716 | source = lua_touserdata(L, -1); |
1717 | void* clone{ nullptr }; | ||
1720 | // get the number of bytes to allocate for the clone | 1718 | // get the number of bytes to allocate for the clone |
1721 | userdata_size = (size_t) lua_rawlen( L, -1); | 1719 | size_t const userdata_size{ lua_rawlen(L, -1) }; |
1722 | { | 1720 | { |
1723 | // extract uservalues (don't transfer them yet) | 1721 | // extract uservalues (don't transfer them yet) |
1724 | int uvi = 0; | 1722 | int uvi = 0; |
1725 | while( lua_getiuservalue( L, source_i_, ++ uvi) != LUA_TNONE) {} // ... u uv | 1723 | while (lua_getiuservalue(L, source_i_, ++uvi) != LUA_TNONE) {} // ... u uv |
1726 | // when lua_getiuservalue() returned LUA_TNONE, it pushed a nil. pop it now | 1724 | // when lua_getiuservalue() returned LUA_TNONE, it pushed a nil. pop it now |
1727 | lua_pop( L, 1); // ... u [uv]* | 1725 | lua_pop(L, 1); // ... u [uv]* |
1728 | -- uvi; | 1726 | --uvi; |
1729 | STACK_MID( L, uvi + 1); | 1727 | STACK_CHECK(L, uvi + 1); |
1730 | // create the clone userdata with the required number of uservalue slots | 1728 | // create the clone userdata with the required number of uservalue slots |
1731 | clone = lua_newuserdatauv( L2, userdata_size, uvi); // ... mt u | 1729 | clone = lua_newuserdatauv(L2, userdata_size, uvi); // ... mt u |
1732 | // add it in the cache | 1730 | // add it in the cache |
1733 | lua_pushlightuserdata( L2, source); // ... mt u source | 1731 | lua_pushlightuserdata(L2, source); // ... mt u source |
1734 | lua_pushvalue( L2, -2); // ... mt u source u | 1732 | lua_pushvalue(L2, -2); // ... mt u source u |
1735 | lua_rawset( L2, L2_cache_i); // ... mt u | 1733 | lua_rawset(L2, L2_cache_i); // ... mt u |
1736 | // set metatable | 1734 | // set metatable |
1737 | lua_pushvalue( L2, -2); // ... mt u mt | 1735 | lua_pushvalue(L2, -2); // ... mt u mt |
1738 | lua_setmetatable( L2, -2); // ... mt u | 1736 | lua_setmetatable(L2, -2); // ... mt u |
1739 | // transfer and assign uservalues | 1737 | // transfer and assign uservalues |
1740 | while( uvi > 0) | 1738 | while (uvi > 0) |
1741 | { | 1739 | { |
1742 | if(!inter_copy_one( U, L2, L2_cache_i, L, lua_absindex( L, -1), vt, mode_, upName_)) // ... mt u uv | 1740 | if (!inter_copy_one(U, L2, L2_cache_i, L, lua_absindex(L, -1), vt_, mode_, upName_)) // ... mt u uv |
1743 | { | 1741 | { |
1744 | (void) luaL_error(L, "Cannot copy upvalue type '%s'", luaL_typename(L, -1)); | 1742 | luaL_error(L, "Cannot copy upvalue type '%s'", luaL_typename(L, -1)); // doesn't return |
1745 | } | 1743 | } |
1746 | lua_pop( L, 1); // ... u [uv]* | 1744 | lua_pop(L, 1); // ... u [uv]* |
1747 | // this pops the value from the stack | 1745 | // this pops the value from the stack |
1748 | lua_setiuservalue( L2, -2, uvi); // ... mt u | 1746 | lua_setiuservalue(L2, -2, uvi); // ... mt u |
1749 | -- uvi; | 1747 | -- uvi; |
1750 | } | 1748 | } |
1751 | // when we are done, all uservalues are popped from the stack, we can pop the source as well | 1749 | // when we are done, all uservalues are popped from the stack, we can pop the source as well |
1752 | lua_pop( L, 1); // ... | 1750 | lua_pop(L, 1); // ... |
1753 | STACK_MID( L, 0); | 1751 | STACK_CHECK(L, 0); |
1754 | STACK_MID( L2, 2); // ... mt u | 1752 | STACK_CHECK(L2, 2); // ... mt u |
1755 | } | 1753 | } |
1756 | // perform the custom cloning part | 1754 | // perform the custom cloning part |
1757 | lua_insert( L2, -2); // ... u mt | 1755 | lua_insert(L2, -2); // ... u mt |
1758 | // __lanesclone should always exist because we wouldn't be restoring data from a userdata_clone_sentinel closure to begin with | 1756 | // __lanesclone should always exist because we wouldn't be restoring data from a userdata_clone_sentinel closure to begin with |
1759 | lua_getfield(L2, -1, "__lanesclone"); // ... u mt __lanesclone | 1757 | lua_getfield(L2, -1, "__lanesclone"); // ... u mt __lanesclone |
1760 | lua_remove( L2, -2); // ... u __lanesclone | 1758 | lua_remove(L2, -2); // ... u __lanesclone |
1761 | lua_pushlightuserdata( L2, clone); // ... u __lanesclone clone | 1759 | lua_pushlightuserdata(L2, clone); // ... u __lanesclone clone |
1762 | lua_pushlightuserdata( L2, source); // ... u __lanesclone clone source | 1760 | lua_pushlightuserdata(L2, source); // ... u __lanesclone clone source |
1763 | lua_pushinteger( L2, userdata_size); // ... u __lanesclone clone source size | 1761 | lua_pushinteger(L2, userdata_size); // ... u __lanesclone clone source size |
1764 | // clone:__lanesclone(dest, source, size) | 1762 | // clone:__lanesclone(dest, source, size) |
1765 | lua_call( L2, 3, 0); // ... u | 1763 | lua_call(L2, 3, 0); // ... u |
1766 | } | 1764 | } |
1767 | else // regular function | 1765 | else // regular function |
1768 | { | 1766 | { |
1769 | DEBUGSPEW_CODE( fprintf( stderr, "FUNCTION %s\n", upName_)); | 1767 | DEBUGSPEW_CODE(fprintf( stderr, "FUNCTION %s\n", upName_)); |
1770 | DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); | 1768 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed)); |
1771 | copy_cached_func( U, L2, L2_cache_i, L, source_i_, mode_, upName_); // ... f | 1769 | copy_cached_func(U, L2, L2_cache_i, L, source_i_, mode_, upName_); // ... f |
1772 | DEBUGSPEW_CODE( -- U->debugspew_indent_depth); | 1770 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed)); |
1773 | } | 1771 | } |
1774 | STACK_END( L2, 1); | 1772 | STACK_CHECK(L2, 1); |
1775 | STACK_END( L, 0); | 1773 | STACK_CHECK(L, 0); |
1776 | return TRUE; | 1774 | return true; |
1777 | } | 1775 | } |
1778 | 1776 | ||
1779 | static bool_t inter_copy_table( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, enum e_vt vt, LookupMode mode_, char const* upName_) | 1777 | // ################################################################################################# |
1778 | |||
1779 | [[nodiscard]] static bool inter_copy_table(Universe* U, Dest L2, int L2_cache_i, Source L, int i, VT vt_, LookupMode mode_, char const* upName_) | ||
1780 | { | 1780 | { |
1781 | if( vt == VT_KEY) | 1781 | if (vt_ == VT::KEY) |
1782 | { | 1782 | { |
1783 | return FALSE; | 1783 | return false; |
1784 | } | 1784 | } |
1785 | 1785 | ||
1786 | STACK_CHECK( L, 0); | 1786 | STACK_CHECK_START_REL(L, 0); |
1787 | STACK_CHECK( L2, 0); | 1787 | STACK_CHECK_START_REL(L2, 0); |
1788 | DEBUGSPEW_CODE( fprintf( stderr, "TABLE %s\n", upName_)); | 1788 | DEBUGSPEW_CODE(fprintf(stderr, "TABLE %s\n", upName_)); |
1789 | 1789 | ||
1790 | /* | 1790 | /* |
1791 | * First, let's try to see if this table is special (aka is it some table that we registered in our lookup databases during module registration?) | 1791 | * First, let's try to see if this table is special (aka is it some table that we registered in our lookup databases during module registration?) |
1792 | * Note that this table CAN be a module table, but we just didn't register it, in which case we'll send it through the table cloning mechanism | 1792 | * Note that this table CAN be a module table, but we just didn't register it, in which case we'll send it through the table cloning mechanism |
1793 | */ | 1793 | */ |
1794 | if( lookup_table( L2, L, i, mode_, upName_)) | 1794 | if (lookup_table(L2, L, i, mode_, upName_)) |
1795 | { | 1795 | { |
1796 | ASSERT_L( lua_istable( L2, -1) || (lua_tocfunction( L2, -1) == table_lookup_sentinel)); // from lookup datables // can also be table_lookup_sentinel if this is a table we know | 1796 | ASSERT_L(lua_istable(L2, -1) || (lua_tocfunction(L2, -1) == table_lookup_sentinel)); // from lookup data. can also be table_lookup_sentinel if this is a table we know |
1797 | return TRUE; | 1797 | return true; |
1798 | } | 1798 | } |
1799 | 1799 | ||
1800 | /* Check if we've already copied the same table from 'L' (during this transmission), and | 1800 | /* Check if we've already copied the same table from 'L' (during this transmission), and |
@@ -1806,36 +1806,38 @@ static bool_t inter_copy_table( Universe* U, lua_State* L2, uint_t L2_cache_i, l | |||
1806 | * Note: Even metatables need to go through this test; to detect | 1806 | * Note: Even metatables need to go through this test; to detect |
1807 | * loops such as those in required module tables (getmetatable(lanes).lanes == lanes) | 1807 | * loops such as those in required module tables (getmetatable(lanes).lanes == lanes) |
1808 | */ | 1808 | */ |
1809 | if( push_cached_table( L2, L2_cache_i, L, i)) | 1809 | if (push_cached_table(L2, L2_cache_i, L, i)) |
1810 | { | 1810 | { |
1811 | ASSERT_L( lua_istable( L2, -1)); // from cache | 1811 | ASSERT_L(lua_istable(L2, -1)); // from cache |
1812 | return TRUE; | 1812 | return true; |
1813 | } | 1813 | } |
1814 | ASSERT_L( lua_istable( L2, -1)); | 1814 | ASSERT_L(lua_istable(L2, -1)); |
1815 | 1815 | ||
1816 | STACK_GROW( L, 2); | 1816 | STACK_GROW(L, 2); |
1817 | STACK_GROW( L2, 2); | 1817 | STACK_GROW(L2, 2); |
1818 | 1818 | ||
1819 | lua_pushnil( L); // start iteration | 1819 | lua_pushnil(L); // start iteration |
1820 | while( lua_next( L, i)) | 1820 | while (lua_next(L, i)) |
1821 | { | 1821 | { |
1822 | // need a function to prevent overflowing the stack with verboseErrors-induced alloca() | 1822 | // need a function to prevent overflowing the stack with verboseErrors-induced alloca() |
1823 | inter_copy_keyvaluepair( U, L2, L2_cache_i, L, vt, mode_, upName_); | 1823 | inter_copy_keyvaluepair(U, L2, L2_cache_i, L, vt_, mode_, upName_); |
1824 | lua_pop( L, 1); // pop value (next round) | 1824 | lua_pop(L, 1); // pop value (next round) |
1825 | } | 1825 | } |
1826 | STACK_MID( L, 0); | 1826 | STACK_CHECK(L, 0); |
1827 | STACK_MID( L2, 1); | 1827 | STACK_CHECK(L2, 1); |
1828 | 1828 | ||
1829 | // Metatables are expected to be immutable, and copied only once. | 1829 | // Metatables are expected to be immutable, and copied only once. |
1830 | if( push_cached_metatable( U, L2, L2_cache_i, L, i, mode_, upName_)) // ... t mt? | 1830 | if (push_cached_metatable(U, L2, L2_cache_i, L, i, mode_, upName_)) // ... t mt? |
1831 | { | 1831 | { |
1832 | lua_setmetatable( L2, -2); // ... t | 1832 | lua_setmetatable(L2, -2); // ... t |
1833 | } | 1833 | } |
1834 | STACK_END( L2, 1); | 1834 | STACK_CHECK(L2, 1); |
1835 | STACK_END( L, 0); | 1835 | STACK_CHECK(L, 0); |
1836 | return TRUE; | 1836 | return true; |
1837 | } | 1837 | } |
1838 | 1838 | ||
1839 | // ################################################################################################# | ||
1840 | |||
1839 | /* | 1841 | /* |
1840 | * Copies a value from 'L' state (at index 'i') to 'L2' state. Does not remove | 1842 | * Copies a value from 'L' state (at index 'i') to 'L2' state. Does not remove |
1841 | * the original value. | 1843 | * the original value. |
@@ -1844,23 +1846,23 @@ static bool_t inter_copy_table( Universe* U, lua_State* L2, uint_t L2_cache_i, l | |||
1844 | * | 1846 | * |
1845 | * 'i' is an absolute index (no -1, ...) | 1847 | * 'i' is an absolute index (no -1, ...) |
1846 | * | 1848 | * |
1847 | * Returns TRUE if value was pushed, FALSE if its type is non-supported. | 1849 | * Returns true if value was pushed, false if its type is non-supported. |
1848 | */ | 1850 | */ |
1849 | bool_t inter_copy_one( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, enum e_vt vt, LookupMode mode_, char const* upName_) | 1851 | [[nodiscard]] bool inter_copy_one(Universe* U, Dest L2, int L2_cache_i, Source L, int i, VT vt_, LookupMode mode_, char const* upName_) |
1850 | { | 1852 | { |
1851 | bool_t ret = TRUE; | 1853 | bool ret{ true }; |
1852 | int val_type = lua_type( L, i); | 1854 | LuaType val_type{ lua_type_as_enum(L, i) }; |
1853 | static int const pod_mask = (1 << LUA_TNIL) | (1 << LUA_TBOOLEAN) | (1 << LUA_TLIGHTUSERDATA) | (1 << LUA_TNUMBER) | (1 << LUA_TSTRING); | 1855 | static constexpr int pod_mask = (1 << LUA_TNIL) | (1 << LUA_TBOOLEAN) | (1 << LUA_TLIGHTUSERDATA) | (1 << LUA_TNUMBER) | (1 << LUA_TSTRING); |
1854 | STACK_GROW( L2, 1); | 1856 | STACK_GROW( L2, 1); |
1855 | STACK_CHECK( L, 0); // L // L2 | 1857 | STACK_CHECK_START_REL(L, 0); // L // L2 |
1856 | STACK_CHECK( L2, 0); // L // L2 | 1858 | STACK_CHECK_START_REL(L2, 0); // L // L2 |
1857 | 1859 | ||
1858 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "inter_copy_one()\n" INDENT_END)); | 1860 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "inter_copy_one()\n" INDENT_END)); |
1859 | DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); | 1861 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed)); |
1860 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%s %s: " INDENT_END, lua_type_names[val_type], vt_names[vt])); | 1862 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%s %s: " INDENT_END, lua_type_names[val_type], vt_names[static_cast<int>(vt_)])); |
1861 | 1863 | ||
1862 | // Non-POD can be skipped if its metatable contains { __lanesignore = true } | 1864 | // Non-POD can be skipped if its metatable contains { __lanesignore = true } |
1863 | if( ((1 << val_type) & pod_mask) == 0) | 1865 | if( ((1 << static_cast<int>(val_type)) & pod_mask) == 0) |
1864 | { | 1866 | { |
1865 | if( lua_getmetatable( L, i)) // ... mt | 1867 | if( lua_getmetatable( L, i)) // ... mt |
1866 | { | 1868 | { |
@@ -1868,27 +1870,27 @@ bool_t inter_copy_one( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* | |||
1868 | if( lua_isboolean( L, -1) && lua_toboolean( L, -1)) | 1870 | if( lua_isboolean( L, -1) && lua_toboolean( L, -1)) |
1869 | { | 1871 | { |
1870 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "__lanesignore -> LUA_TNIL\n" INDENT_END)); | 1872 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "__lanesignore -> LUA_TNIL\n" INDENT_END)); |
1871 | val_type = LUA_TNIL; | 1873 | val_type = LuaType::NIL; |
1872 | } | 1874 | } |
1873 | lua_pop( L, 2); // ... | 1875 | lua_pop( L, 2); // ... |
1874 | } | 1876 | } |
1875 | } | 1877 | } |
1876 | STACK_MID( L, 0); | 1878 | STACK_CHECK( L, 0); |
1877 | 1879 | ||
1878 | /* Lets push nil to L2 if the object should be ignored */ | 1880 | /* Lets push nil to L2 if the object should be ignored */ |
1879 | switch( val_type) | 1881 | switch( val_type) |
1880 | { | 1882 | { |
1881 | /* Basic types allowed both as values, and as table keys */ | 1883 | /* Basic types allowed both as values, and as table keys */ |
1882 | 1884 | ||
1883 | case LUA_TBOOLEAN: | 1885 | case LuaType::BOOLEAN: |
1884 | { | 1886 | { |
1885 | bool_t v = lua_toboolean( L, i); | 1887 | int const v{ lua_toboolean(L, i) }; |
1886 | DEBUGSPEW_CODE( fprintf( stderr, "%s\n", v ? "true" : "false")); | 1888 | DEBUGSPEW_CODE( fprintf( stderr, "%s\n", v ? "true" : "false")); |
1887 | lua_pushboolean( L2, v); | 1889 | lua_pushboolean( L2, v); |
1888 | } | 1890 | } |
1889 | break; | 1891 | break; |
1890 | 1892 | ||
1891 | case LUA_TNUMBER: | 1893 | case LuaType::NUMBER: |
1892 | /* LNUM patch support (keeping integer accuracy) */ | 1894 | /* LNUM patch support (keeping integer accuracy) */ |
1893 | #if defined LUA_LNUM || LUA_VERSION_NUM >= 503 | 1895 | #if defined LUA_LNUM || LUA_VERSION_NUM >= 503 |
1894 | if( lua_isinteger( L, i)) | 1896 | if( lua_isinteger( L, i)) |
@@ -1907,7 +1909,7 @@ bool_t inter_copy_one( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* | |||
1907 | } | 1909 | } |
1908 | break; | 1910 | break; |
1909 | 1911 | ||
1910 | case LUA_TSTRING: | 1912 | case LuaType::STRING: |
1911 | { | 1913 | { |
1912 | size_t len; | 1914 | size_t len; |
1913 | char const* s = lua_tolstring( L, i, &len); | 1915 | char const* s = lua_tolstring( L, i, &len); |
@@ -1916,7 +1918,7 @@ bool_t inter_copy_one( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* | |||
1916 | } | 1918 | } |
1917 | break; | 1919 | break; |
1918 | 1920 | ||
1919 | case LUA_TLIGHTUSERDATA: | 1921 | case LuaType::LIGHTUSERDATA: |
1920 | { | 1922 | { |
1921 | void* p = lua_touserdata( L, i); | 1923 | void* p = lua_touserdata( L, i); |
1922 | DEBUGSPEW_CODE( fprintf( stderr, "%p\n", p)); | 1924 | DEBUGSPEW_CODE( fprintf( stderr, "%p\n", p)); |
@@ -1926,42 +1928,44 @@ bool_t inter_copy_one( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* | |||
1926 | 1928 | ||
1927 | /* The following types are not allowed as table keys */ | 1929 | /* The following types are not allowed as table keys */ |
1928 | 1930 | ||
1929 | case LUA_TUSERDATA: | 1931 | case LuaType::USERDATA: |
1930 | ret = inter_copy_userdata( U, L2, L2_cache_i, L, i, vt, mode_, upName_); | 1932 | ret = inter_copy_userdata(U, L2, L2_cache_i, L, i, vt_, mode_, upName_); |
1931 | break; | 1933 | break; |
1932 | 1934 | ||
1933 | case LUA_TNIL: | 1935 | case LuaType::NIL: |
1934 | if( vt == VT_KEY) | 1936 | if (vt_ == VT::KEY) |
1935 | { | 1937 | { |
1936 | ret = FALSE; | 1938 | ret = false; |
1937 | break; | 1939 | break; |
1938 | } | 1940 | } |
1939 | lua_pushnil( L2); | 1941 | lua_pushnil( L2); |
1940 | break; | 1942 | break; |
1941 | 1943 | ||
1942 | case LUA_TFUNCTION: | 1944 | case LuaType::FUNCTION: |
1943 | ret = inter_copy_function( U, L2, L2_cache_i, L, i, vt, mode_, upName_); | 1945 | ret = inter_copy_function(U, L2, L2_cache_i, L, i, vt_, mode_, upName_); |
1944 | break; | 1946 | break; |
1945 | 1947 | ||
1946 | case LUA_TTABLE: | 1948 | case LuaType::TABLE: |
1947 | ret = inter_copy_table( U, L2, L2_cache_i, L, i, vt, mode_, upName_); | 1949 | ret = inter_copy_table(U, L2, L2_cache_i, L, i, vt_, mode_, upName_); |
1948 | break; | 1950 | break; |
1949 | 1951 | ||
1950 | /* The following types cannot be copied */ | 1952 | /* The following types cannot be copied */ |
1951 | 1953 | ||
1952 | case 10: // LuaJIT CDATA | 1954 | case LuaType::CDATA: |
1953 | case LUA_TTHREAD: | 1955 | case LuaType::THREAD: |
1954 | ret = FALSE; | 1956 | ret = false; |
1955 | break; | 1957 | break; |
1956 | } | 1958 | } |
1957 | 1959 | ||
1958 | DEBUGSPEW_CODE( -- U->debugspew_indent_depth); | 1960 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed)); |
1959 | 1961 | ||
1960 | STACK_END( L2, ret ? 1 : 0); | 1962 | STACK_CHECK( L2, ret ? 1 : 0); |
1961 | STACK_END( L, 0); | 1963 | STACK_CHECK( L, 0); |
1962 | return ret; | 1964 | return ret; |
1963 | } | 1965 | } |
1964 | 1966 | ||
1967 | // ################################################################################################# | ||
1968 | |||
1965 | /* | 1969 | /* |
1966 | * Akin to 'lua_xmove' but copies values between _any_ Lua states. | 1970 | * Akin to 'lua_xmove' but copies values between _any_ Lua states. |
1967 | * | 1971 | * |
@@ -1969,128 +1973,137 @@ bool_t inter_copy_one( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* | |||
1969 | * | 1973 | * |
1970 | * Note: Parameters are in this order ('L' = from first) to be same as 'lua_xmove'. | 1974 | * Note: Parameters are in this order ('L' = from first) to be same as 'lua_xmove'. |
1971 | */ | 1975 | */ |
1972 | InterCopyResult luaG_inter_copy( Universe* U, lua_State* L, lua_State* L2, uint_t n, LookupMode mode_) | 1976 | [[nodiscard]] InterCopyResult luaG_inter_copy(Universe* U, Source L, Dest L2, int n, LookupMode mode_) |
1973 | { | 1977 | { |
1974 | uint_t top_L = lua_gettop( L); // ... {}n | 1978 | int const top_L{ lua_gettop(L) }; // ... {}n |
1975 | uint_t top_L2 = lua_gettop( L2); // ... | 1979 | int const top_L2{ lua_gettop(L2) }; // ... |
1976 | uint_t i, j; | ||
1977 | char tmpBuf[16]; | 1980 | char tmpBuf[16]; |
1978 | char const* pBuf = U->verboseErrors ? tmpBuf : "?"; | 1981 | char const* pBuf{ U->verboseErrors ? tmpBuf : "?" }; |
1979 | bool_t copyok = TRUE; | ||
1980 | 1982 | ||
1981 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "luaG_inter_copy()\n" INDENT_END)); | 1983 | DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "luaG_inter_copy()\n" INDENT_END)); |
1982 | DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); | 1984 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed)); |
1983 | 1985 | ||
1984 | if( n > top_L) | 1986 | if (n > top_L) |
1985 | { | 1987 | { |
1986 | // requesting to copy more than is available? | 1988 | // requesting to copy more than is available? |
1987 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "nothing to copy()\n" INDENT_END)); | 1989 | DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "nothing to copy()\n" INDENT_END)); |
1988 | DEBUGSPEW_CODE( -- U->debugspew_indent_depth); | 1990 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed)); |
1989 | return eICR_NotEnoughValues; | 1991 | return InterCopyResult::NotEnoughValues; |
1990 | } | 1992 | } |
1991 | 1993 | ||
1992 | STACK_CHECK( L2, 0); | 1994 | STACK_CHECK_START_REL(L2, 0); |
1993 | STACK_GROW( L2, n + 1); | 1995 | STACK_GROW(L2, n + 1); |
1994 | 1996 | ||
1995 | /* | 1997 | /* |
1996 | * Make a cache table for the duration of this copy. Collects tables and | 1998 | * Make a cache table for the duration of this copy. Collects tables and |
1997 | * function entries, avoiding the same entries to be passed on as multiple | 1999 | * function entries, avoiding the same entries to be passed on as multiple |
1998 | * copies. ESSENTIAL i.e. for handling upvalue tables in the right manner! | 2000 | * copies. ESSENTIAL i.e. for handling upvalue tables in the right manner! |
1999 | */ | 2001 | */ |
2000 | lua_newtable( L2); // ... cache | 2002 | lua_newtable(L2); // ... cache |
2001 | 2003 | ||
2002 | STACK_CHECK( L, 0); | 2004 | STACK_CHECK_START_REL(L, 0); |
2003 | for( i = top_L - n + 1, j = 1; i <= top_L; ++ i, ++ j) | 2005 | bool copyok{ true }; |
2006 | for (int i = top_L - n + 1, j = 1; i <= top_L; ++i, ++j) | ||
2004 | { | 2007 | { |
2005 | if( U->verboseErrors) | 2008 | if (U->verboseErrors) |
2006 | { | 2009 | { |
2007 | sprintf( tmpBuf, "arg_%d", j); | 2010 | sprintf(tmpBuf, "arg_%d", j); |
2008 | } | 2011 | } |
2009 | copyok = inter_copy_one( U, L2, top_L2 + 1, L, i, VT_NORMAL, mode_, pBuf); // ... cache {}n | 2012 | copyok = inter_copy_one(U, L2, top_L2 + 1, L, i, VT::NORMAL, mode_, pBuf); // ... cache {}n |
2010 | if( !copyok) | 2013 | if (!copyok) |
2011 | { | 2014 | { |
2012 | break; | 2015 | break; |
2013 | } | 2016 | } |
2014 | } | 2017 | } |
2015 | STACK_END( L, 0); | 2018 | STACK_CHECK(L, 0); |
2016 | 2019 | ||
2017 | DEBUGSPEW_CODE( -- U->debugspew_indent_depth); | 2020 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed)); |
2018 | 2021 | ||
2019 | if( copyok) | 2022 | if (copyok) |
2020 | { | 2023 | { |
2021 | STACK_MID( L2, n + 1); | 2024 | STACK_CHECK(L2, n + 1); |
2022 | // Remove the cache table. Persistent caching would cause i.e. multiple | 2025 | // Remove the cache table. Persistent caching would cause i.e. multiple |
2023 | // messages passed in the same table to use the same table also in receiving end. | 2026 | // messages passed in the same table to use the same table also in receiving end. |
2024 | lua_remove( L2, top_L2 + 1); | 2027 | lua_remove(L2, top_L2 + 1); |
2025 | return eICR_Success; | 2028 | return InterCopyResult::Success; |
2026 | } | 2029 | } |
2027 | 2030 | ||
2028 | // error -> pop everything from the target state stack | 2031 | // error -> pop everything from the target state stack |
2029 | lua_settop( L2, top_L2); | 2032 | lua_settop(L2, top_L2); |
2030 | STACK_END( L2, 0); | 2033 | STACK_CHECK(L2, 0); |
2031 | return eICR_Error; | 2034 | return InterCopyResult::Error; |
2032 | } | 2035 | } |
2033 | 2036 | ||
2037 | // ################################################################################################# | ||
2034 | 2038 | ||
2035 | InterCopyResult luaG_inter_move( Universe* U, lua_State* L, lua_State* L2, uint_t n, LookupMode mode_) | 2039 | [[nodiscard]] InterCopyResult luaG_inter_move(Universe* U, Source L, Dest L2, int n_, LookupMode mode_) |
2036 | { | 2040 | { |
2037 | InterCopyResult ret = luaG_inter_copy( U, L, L2, n, mode_); | 2041 | InterCopyResult const ret{ luaG_inter_copy(U, L, L2, n_, mode_) }; |
2038 | lua_pop( L, (int) n); | 2042 | lua_pop( L, n_); |
2039 | return ret; | 2043 | return ret; |
2040 | } | 2044 | } |
2041 | 2045 | ||
2042 | InterCopyResult luaG_inter_copy_package( Universe* U, lua_State* L, lua_State* L2, int package_idx_, LookupMode mode_) | 2046 | // ################################################################################################# |
2047 | |||
2048 | // transfers stuff from L->_G["package"] to L2->_G["package"] | ||
2049 | // returns InterCopyResult::Success if everything is fine | ||
2050 | // returns InterCopyResult::Error if pushed an error message in L | ||
2051 | // else raise an error in L | ||
2052 | [[nodiscard]] InterCopyResult luaG_inter_copy_package(Universe* U, Source L, Dest L2, int package_idx_, LookupMode mode_) | ||
2043 | { | 2053 | { |
2044 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "luaG_inter_copy_package()\n" INDENT_END)); | 2054 | DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "luaG_inter_copy_package()\n" INDENT_END)); |
2045 | DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); | 2055 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed)); |
2046 | // package | 2056 | // package |
2047 | STACK_CHECK( L, 0); | 2057 | STACK_CHECK_START_REL(L, 0); |
2048 | STACK_CHECK( L2, 0); | 2058 | STACK_CHECK_START_REL(L2, 0); |
2049 | package_idx_ = lua_absindex( L, package_idx_); | 2059 | package_idx_ = lua_absindex(L, package_idx_); |
2050 | if( lua_type( L, package_idx_) != LUA_TTABLE) | 2060 | if (lua_type(L, package_idx_) != LUA_TTABLE) |
2051 | { | 2061 | { |
2052 | lua_pushfstring( L, "expected package as table, got %s", luaL_typename( L, package_idx_)); | 2062 | lua_pushfstring(L, "expected package as table, got %s", luaL_typename(L, package_idx_)); |
2053 | STACK_MID( L, 1); | 2063 | STACK_CHECK(L, 1); |
2054 | // raise the error when copying from lane to lane, else just leave it on the stack to be raised later | 2064 | // raise the error when copying from lane to lane, else just leave it on the stack to be raised later |
2055 | if (mode_ == eLM_LaneBody) | 2065 | if (mode_ == LookupMode::LaneBody) |
2056 | { | 2066 | { |
2057 | lua_error(L); // doesn't return | 2067 | lua_error(L); // doesn't return |
2058 | } | 2068 | } |
2059 | return eICR_Error; | 2069 | return InterCopyResult::Error; |
2060 | } | 2070 | } |
2061 | lua_getglobal( L2, "package"); | 2071 | lua_getglobal(L2, "package"); |
2062 | if( !lua_isnil( L2, -1)) // package library not loaded: do nothing | 2072 | if (!lua_isnil(L2, -1)) // package library not loaded: do nothing |
2063 | { | 2073 | { |
2064 | int i; | ||
2065 | // package.loaders is renamed package.searchers in Lua 5.2 | 2074 | // package.loaders is renamed package.searchers in Lua 5.2 |
2066 | // but don't copy it anyway, as the function names change depending on the slot index! | 2075 | // but don't copy it anyway, as the function names change depending on the slot index! |
2067 | // users should provide an on_state_create function to setup custom loaders instead | 2076 | // users should provide an on_state_create function to setup custom loaders instead |
2068 | // don't copy package.preload in keeper states (they don't know how to translate functions) | 2077 | // don't copy package.preload in keeper states (they don't know how to translate functions) |
2069 | char const* entries[] = { "path", "cpath", (mode_ == eLM_LaneBody) ? "preload" : NULL/*, (LUA_VERSION_NUM == 501) ? "loaders" : "searchers"*/, NULL}; | 2078 | char const* entries[] = { "path", "cpath", (mode_ == LookupMode::LaneBody) ? "preload" : nullptr /*, (LUA_VERSION_NUM == 501) ? "loaders" : "searchers"*/, nullptr }; |
2070 | for( i = 0; entries[i]; ++ i) | 2079 | for (char const* const entry : entries) |
2071 | { | 2080 | { |
2072 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "package.%s\n" INDENT_END, entries[i])); | 2081 | if (!entry) |
2073 | lua_getfield( L, package_idx_, entries[i]); | 2082 | { |
2074 | if( lua_isnil( L, -1)) | 2083 | continue; |
2084 | } | ||
2085 | DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "package.%s\n" INDENT_END, entry)); | ||
2086 | lua_getfield(L, package_idx_, entry); | ||
2087 | if (lua_isnil(L, -1)) | ||
2075 | { | 2088 | { |
2076 | lua_pop( L, 1); | 2089 | lua_pop(L, 1); |
2077 | } | 2090 | } |
2078 | else | 2091 | else |
2079 | { | 2092 | { |
2080 | DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); | 2093 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed)); |
2081 | luaG_inter_move( U, L, L2, 1, mode_); // moves the entry to L2 | 2094 | std::ignore = luaG_inter_move(U, L, L2, 1, mode_); // moves the entry to L2 |
2082 | DEBUGSPEW_CODE( -- U->debugspew_indent_depth); | 2095 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed)); |
2083 | lua_setfield( L2, -2, entries[i]); // set package[entries[i]] | 2096 | lua_setfield(L2, -2, entry); // set package[entry] |
2084 | } | 2097 | } |
2085 | } | 2098 | } |
2086 | } | 2099 | } |
2087 | else | 2100 | else |
2088 | { | 2101 | { |
2089 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "'package' not loaded, nothing to do\n" INDENT_END)); | 2102 | DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "'package' not loaded, nothing to do\n" INDENT_END)); |
2090 | } | 2103 | } |
2091 | lua_pop( L2, 1); | 2104 | lua_pop(L2, 1); |
2092 | STACK_END( L2, 0); | 2105 | STACK_CHECK(L2, 0); |
2093 | STACK_END( L, 0); | 2106 | STACK_CHECK(L, 0); |
2094 | DEBUGSPEW_CODE( -- U->debugspew_indent_depth); | 2107 | DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed)); |
2095 | return eICR_Success; | 2108 | return InterCopyResult::Success; |
2096 | } | 2109 | } |
diff --git a/src/tools.h b/src/tools.h index 6c08734..dce7378 100644 --- a/src/tools.h +++ b/src/tools.h | |||
@@ -1,65 +1,53 @@ | |||
1 | #ifndef __LANES_TOOLS_H__ | 1 | #pragma once |
2 | #define __LANES_TOOLS_H__ | ||
3 | 2 | ||
4 | //#include "lauxlib.h" | ||
5 | #include "threading.h" | ||
6 | #include "deep.h" | 3 | #include "deep.h" |
7 | |||
8 | #include "macros_and_utils.h" | 4 | #include "macros_and_utils.h" |
9 | 5 | ||
10 | // forwards | 6 | // forwards |
11 | struct s_Universe; | 7 | class Universe; |
12 | typedef struct s_Universe Universe; | ||
13 | 8 | ||
14 | // ################################################################################################ | 9 | // ################################################################################################ |
15 | 10 | ||
16 | #define luaG_optunsigned(L,i,d) ((uint_t) luaL_optinteger(L,i,d)) | ||
17 | #define luaG_tounsigned(L,i) ((uint_t) lua_tointeger(L,i)) | ||
18 | |||
19 | #ifdef _DEBUG | 11 | #ifdef _DEBUG |
20 | void luaG_dump( lua_State* L); | 12 | void luaG_dump(lua_State* L); |
21 | #endif // _DEBUG | 13 | #endif // _DEBUG |
22 | 14 | ||
23 | // ################################################################################################ | 15 | // ################################################################################################ |
24 | 16 | ||
25 | void push_registry_subtable_mode( lua_State* L, UniqueKey key_, const char* mode_); | 17 | void push_registry_subtable_mode(lua_State* L, UniqueKey key_, const char* mode_); |
26 | void push_registry_subtable( lua_State* L, UniqueKey key_); | 18 | void push_registry_subtable(lua_State* L, UniqueKey key_); |
27 | 19 | ||
28 | enum e_vt | 20 | enum class VT |
29 | { | 21 | { |
30 | VT_NORMAL, | 22 | NORMAL, |
31 | VT_KEY, | 23 | KEY, |
32 | VT_METATABLE | 24 | METATABLE |
33 | }; | 25 | }; |
34 | 26 | ||
35 | enum eInterCopyResult | 27 | enum class InterCopyResult |
36 | { | 28 | { |
37 | eICR_Success, | 29 | Success, |
38 | eICR_NotEnoughValues, | 30 | NotEnoughValues, |
39 | eICR_Error | 31 | Error |
40 | }; | 32 | }; |
41 | typedef enum eInterCopyResult InterCopyResult; | ||
42 | 33 | ||
43 | bool_t inter_copy_one( Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, enum e_vt vt, LookupMode mode_, char const* upName_); | 34 | [[nodiscard]] bool inter_copy_one(Universe* U, Dest L2, int L2_cache_i, Source L, int i, VT vt_, LookupMode mode_, char const* upName_); |
44 | 35 | ||
45 | // ################################################################################################ | 36 | // ################################################################################################ |
46 | 37 | ||
47 | InterCopyResult luaG_inter_copy_package( Universe* U, lua_State* L, lua_State* L2, int package_idx_, LookupMode mode_); | 38 | [[nodiscard]] InterCopyResult luaG_inter_copy_package(Universe* U, Source L, Dest L2, int package_idx_, LookupMode mode_); |
48 | InterCopyResult luaG_inter_copy( Universe* U, lua_State* L, lua_State* L2, uint_t n, LookupMode mode_); | 39 | [[nodiscard]] InterCopyResult luaG_inter_copy(Universe* U, Source L, Dest L2, int n, LookupMode mode_); |
49 | InterCopyResult luaG_inter_move( Universe* U, lua_State* L, lua_State* L2, uint_t n, LookupMode mode_); | 40 | [[nodiscard]] InterCopyResult luaG_inter_move(Universe* U, Source L, Dest L2, int n, LookupMode mode_); |
50 | 41 | ||
51 | int luaG_nameof( lua_State* L); | 42 | [[nodiscard]] int luaG_nameof(lua_State* L); |
52 | 43 | ||
53 | void populate_func_lookup_table( lua_State* L, int _i, char const* _name); | 44 | void populate_func_lookup_table(lua_State* L, int _i, char const* _name); |
54 | void initialize_allocator_function( Universe* U, lua_State* L); | 45 | void initialize_allocator_function(Universe* U, lua_State* L); |
55 | void cleanup_allocator_function( Universe* U, lua_State* L); | ||
56 | 46 | ||
57 | // ################################################################################################ | 47 | // ################################################################################################ |
58 | 48 | ||
59 | // crc64/we of string "CONFIG_REGKEY" generated at http://www.nitrxgen.net/hashgen/ | 49 | // crc64/we of string "CONFIG_REGKEY" generated at http://www.nitrxgen.net/hashgen/ |
60 | static DECLARE_CONST_UNIQUE_KEY( CONFIG_REGKEY, 0x31cd24894eae8624); // 'cancel_error' sentinel | 50 | static constexpr UniqueKey CONFIG_REGKEY{ 0x31cd24894eae8624ull }; // registry key to access the configuration |
61 | 51 | ||
62 | // crc64/we of string "LOOKUP_REGKEY" generated at http://www.nitrxgen.net/hashgen/ | 52 | // crc64/we of string "LOOKUP_REGKEY" generated at http://www.nitrxgen.net/hashgen/ |
63 | static DECLARE_CONST_UNIQUE_KEY( LOOKUP_REGKEY, 0x5051ed67ee7b51a1); // 'cancel_error' sentinel | 53 | static constexpr UniqueKey LOOKUP_REGKEY{ 0x5051ed67ee7b51a1ull }; // registry key to access the lookup database |
64 | |||
65 | #endif // __LANES_TOOLS_H__ | ||
diff --git a/src/uniquekey.h b/src/uniquekey.h index 7162753..a89ecd3 100644 --- a/src/uniquekey.h +++ b/src/uniquekey.h | |||
@@ -1,25 +1,76 @@ | |||
1 | #if !defined __LANES_UNIQUEKEY_H__ | 1 | #pragma once |
2 | #define __LANES_UNIQUEKEY_H__ 1 | ||
3 | 2 | ||
4 | #include "compat.h" | 3 | #include "compat.h" |
4 | #include "macros_and_utils.h" | ||
5 | 5 | ||
6 | // Lua light userdata can hold a pointer. | 6 | #include <bit> |
7 | struct s_UniqueKey | 7 | |
8 | class UniqueKey | ||
8 | { | 9 | { |
9 | void* value; | 10 | private: |
10 | }; | 11 | |
11 | typedef struct s_UniqueKey UniqueKey; | 12 | uintptr_t m_storage; |
13 | |||
14 | public: | ||
12 | 15 | ||
16 | constexpr explicit UniqueKey(uint64_t val_) | ||
13 | #if LUAJIT_FLAVOR() == 64 // building against LuaJIT headers for 64 bits, light userdata is restricted to 47 significant bits, because LuaJIT uses the other bits for internal optimizations | 17 | #if LUAJIT_FLAVOR() == 64 // building against LuaJIT headers for 64 bits, light userdata is restricted to 47 significant bits, because LuaJIT uses the other bits for internal optimizations |
14 | #define MAKE_UNIQUE_KEY( p_) ((void*)((uintptr_t)(p_) & 0x7fffffffffffull)) | 18 | : m_storage{ static_cast<uintptr_t>(val_ & 0x7fffffffffffull) } |
15 | #else // LUAJIT_FLAVOR() | 19 | #else // LUAJIT_FLAVOR() |
16 | #define MAKE_UNIQUE_KEY( p_) ((void*)(uintptr_t)(p_)) | 20 | : m_storage{ static_cast<uintptr_t>(val_) } |
17 | #endif // LUAJIT_FLAVOR() | 21 | #endif // LUAJIT_FLAVOR() |
22 | { | ||
23 | } | ||
24 | constexpr UniqueKey(UniqueKey const& rhs_) = default; | ||
25 | constexpr bool operator!=(UniqueKey const& rhs_) const | ||
26 | { | ||
27 | return m_storage != rhs_.m_storage; | ||
28 | } | ||
29 | constexpr bool operator==(UniqueKey const& rhs_) const | ||
30 | { | ||
31 | return m_storage == rhs_.m_storage; | ||
32 | } | ||
18 | 33 | ||
19 | #define DECLARE_UNIQUE_KEY( name_) UniqueKey name_ | 34 | void pushKey(lua_State* const L) const |
20 | #define DECLARE_CONST_UNIQUE_KEY( name_, p_) UniqueKey const name_ = { MAKE_UNIQUE_KEY( p_)} | 35 | { |
21 | 36 | lua_pushlightuserdata(L, std::bit_cast<void*>(m_storage)); | |
22 | #define push_unique_key( L, key_) lua_pushlightuserdata( L, key_.value) | 37 | } |
23 | #define equal_unique_key( L, i, key_) (lua_touserdata( L, i) == key_.value) | 38 | bool equals(lua_State* const L, int i) const |
24 | 39 | { | |
25 | #endif // __LANES_UNIQUEKEY_H__ | 40 | return lua_touserdata(L, i) == std::bit_cast<void*>(m_storage); |
41 | } | ||
42 | void pushValue(lua_State* const L) const | ||
43 | { | ||
44 | pushKey(L); | ||
45 | lua_rawget(L, LUA_REGISTRYINDEX); | ||
46 | } | ||
47 | template <typename OP> | ||
48 | void setValue(lua_State* L, OP operation_) const | ||
49 | { | ||
50 | // Note we can't check stack consistency because operation is not always a push (could be insert, replace, whatever) | ||
51 | pushKey(L); // ... key | ||
52 | operation_(L); // ... key value | ||
53 | lua_rawset(L, LUA_REGISTRYINDEX); // ... | ||
54 | } | ||
55 | template <typename T> | ||
56 | T* readLightUserDataValue(lua_State* const L) const | ||
57 | { | ||
58 | STACK_GROW(L, 1); | ||
59 | STACK_CHECK_START_REL(L, 0); | ||
60 | pushValue(L); | ||
61 | T* const value{ lua_tolightuserdata<T>(L, -1) }; // lightuserdata/nil | ||
62 | lua_pop(L, 1); | ||
63 | STACK_CHECK(L, 0); | ||
64 | return value; | ||
65 | } | ||
66 | bool readBoolValue(lua_State* const L) const | ||
67 | { | ||
68 | STACK_GROW(L, 1); | ||
69 | STACK_CHECK_START_REL(L, 0); | ||
70 | pushValue(L); | ||
71 | bool const value{ lua_toboolean(L, -1) ? true : false}; // bool/nil | ||
72 | lua_pop(L, 1); | ||
73 | STACK_CHECK(L, 0); | ||
74 | return value; | ||
75 | } | ||
76 | }; | ||
diff --git a/src/universe.c b/src/universe.c deleted file mode 100644 index 9f84baf..0000000 --- a/src/universe.c +++ /dev/null | |||
@@ -1,75 +0,0 @@ | |||
1 | /* | ||
2 | * UNIVERSE.C Copyright (c) 2017, Benoit Germain | ||
3 | */ | ||
4 | |||
5 | /* | ||
6 | =============================================================================== | ||
7 | |||
8 | Copyright (C) 2017 Benoit Germain <bnt.germain@gmail.com> | ||
9 | |||
10 | Permission is hereby granted, free of charge, to any person obtaining a copy | ||
11 | of this software and associated documentation files (the "Software"), to deal | ||
12 | in the Software without restriction, including without limitation the rights | ||
13 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||
14 | copies of the Software, and to permit persons to whom the Software is | ||
15 | furnished to do so, subject to the following conditions: | ||
16 | |||
17 | The above copyright notice and this permission notice shall be included in | ||
18 | all copies or substantial portions of the Software. | ||
19 | |||
20 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
21 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
22 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
23 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
24 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
25 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | ||
26 | THE SOFTWARE. | ||
27 | |||
28 | =============================================================================== | ||
29 | */ | ||
30 | |||
31 | #include <string.h> | ||
32 | #include <assert.h> | ||
33 | |||
34 | #include "universe.h" | ||
35 | #include "compat.h" | ||
36 | #include "macros_and_utils.h" | ||
37 | #include "uniquekey.h" | ||
38 | |||
39 | // crc64/we of string "UNIVERSE_REGKEY" generated at http://www.nitrxgen.net/hashgen/ | ||
40 | static DECLARE_CONST_UNIQUE_KEY( UNIVERSE_REGKEY, 0x9f877b2cf078f17f); | ||
41 | |||
42 | // ################################################################################################ | ||
43 | |||
44 | Universe* universe_create( lua_State* L) | ||
45 | { | ||
46 | Universe* U = (Universe*) lua_newuserdatauv( L, sizeof(Universe), 0); // universe | ||
47 | memset( U, 0, sizeof( Universe)); | ||
48 | STACK_CHECK( L, 1); | ||
49 | REGISTRY_SET( L, UNIVERSE_REGKEY, lua_pushvalue(L, -2)); // universe | ||
50 | STACK_END( L, 1); | ||
51 | return U; | ||
52 | } | ||
53 | |||
54 | // ################################################################################################ | ||
55 | |||
56 | void universe_store( lua_State* L, Universe* U) | ||
57 | { | ||
58 | STACK_CHECK( L, 0); | ||
59 | REGISTRY_SET( L, UNIVERSE_REGKEY, (NULL != U) ? lua_pushlightuserdata( L, U) : lua_pushnil( L)); | ||
60 | STACK_END( L, 0); | ||
61 | } | ||
62 | |||
63 | // ################################################################################################ | ||
64 | |||
65 | Universe* universe_get( lua_State* L) | ||
66 | { | ||
67 | Universe* universe; | ||
68 | STACK_GROW( L, 2); | ||
69 | STACK_CHECK( L, 0); | ||
70 | REGISTRY_GET( L, UNIVERSE_REGKEY); | ||
71 | universe = lua_touserdata( L, -1); // NULL if nil | ||
72 | lua_pop( L, 1); | ||
73 | STACK_END( L, 0); | ||
74 | return universe; | ||
75 | } | ||
diff --git a/src/universe.cpp b/src/universe.cpp new file mode 100644 index 0000000..4c53987 --- /dev/null +++ b/src/universe.cpp | |||
@@ -0,0 +1,106 @@ | |||
1 | /* | ||
2 | * UNIVERSE.C Copyright (c) 2017, Benoit Germain | ||
3 | */ | ||
4 | |||
5 | /* | ||
6 | =============================================================================== | ||
7 | |||
8 | Copyright (C) 2017 Benoit Germain <bnt.germain@gmail.com> | ||
9 | |||
10 | Permission is hereby granted, free of charge, to any person obtaining a copy | ||
11 | of this software and associated documentation files (the "Software"), to deal | ||
12 | in the Software without restriction, including without limitation the rights | ||
13 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||
14 | copies of the Software, and to permit persons to whom the Software is | ||
15 | furnished to do so, subject to the following conditions: | ||
16 | |||
17 | The above copyright notice and this permission notice shall be included in | ||
18 | all copies or substantial portions of the Software. | ||
19 | |||
20 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
21 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
22 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
23 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
24 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
25 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | ||
26 | THE SOFTWARE. | ||
27 | |||
28 | =============================================================================== | ||
29 | */ | ||
30 | |||
31 | #include <string.h> | ||
32 | #include <assert.h> | ||
33 | |||
34 | #include "universe.h" | ||
35 | #include "compat.h" | ||
36 | #include "macros_and_utils.h" | ||
37 | #include "uniquekey.h" | ||
38 | |||
39 | // xxh64 of string "UNIVERSE_FULL_REGKEY" generated at http://www.nitrxgen.net/hashgen/ | ||
40 | static constexpr UniqueKey UNIVERSE_FULL_REGKEY{ 0x99CA130C09EDC074ull }; | ||
41 | // xxh64 of string "UNIVERSE_LIGHT_REGKEY" generated at http://www.nitrxgen.net/hashgen/ | ||
42 | static constexpr UniqueKey UNIVERSE_LIGHT_REGKEY{ 0x3663C07C742CEB81ull }; | ||
43 | |||
44 | // ################################################################################################ | ||
45 | |||
46 | Universe::Universe() | ||
47 | { | ||
48 | //--- | ||
49 | // Linux needs SCHED_RR to change thread priorities, and that is only | ||
50 | // allowed for sudo'ers. SCHED_OTHER (default) has no priorities. | ||
51 | // SCHED_OTHER threads are always lower priority than SCHED_RR. | ||
52 | // | ||
53 | // ^-- those apply to 2.6 kernel. IF **wishful thinking** these | ||
54 | // constraints will change in the future, non-sudo priorities can | ||
55 | // be enabled also for Linux. | ||
56 | // | ||
57 | #ifdef PLATFORM_LINUX | ||
58 | // If lower priorities (-2..-1) are wanted, we need to lift the main | ||
59 | // thread to SCHED_RR and 50 (medium) level. Otherwise, we're always below | ||
60 | // the launched threads (even -2). | ||
61 | // | ||
62 | #ifdef LINUX_SCHED_RR | ||
63 | if (m_sudo) | ||
64 | { | ||
65 | struct sched_param sp; | ||
66 | sp.sched_priority = _PRIO_0; | ||
67 | PT_CALL(pthread_setschedparam(pthread_self(), SCHED_RR, &sp)); | ||
68 | } | ||
69 | #endif // LINUX_SCHED_RR | ||
70 | #endif // PLATFORM_LINUX | ||
71 | } | ||
72 | |||
73 | // ################################################################################################ | ||
74 | |||
75 | // only called from the master state | ||
76 | Universe* universe_create(lua_State* L) | ||
77 | { | ||
78 | ASSERT_L(universe_get(L) == nullptr); | ||
79 | Universe* const U = static_cast<Universe*>(lua_newuserdatauv(L, sizeof(Universe), 0)); // universe | ||
80 | U->Universe::Universe(); | ||
81 | STACK_CHECK_START_REL(L, 1); | ||
82 | UNIVERSE_FULL_REGKEY.setValue(L, [](lua_State* L) { lua_pushvalue(L, -2); }); | ||
83 | UNIVERSE_LIGHT_REGKEY.setValue(L, [U](lua_State* L) { lua_pushlightuserdata(L, U); }); | ||
84 | STACK_CHECK(L, 1); | ||
85 | return U; | ||
86 | } | ||
87 | |||
88 | // ################################################################################################ | ||
89 | |||
90 | void universe_store(lua_State* L, Universe* U) | ||
91 | { | ||
92 | ASSERT_L(!U || universe_get(L) == nullptr); | ||
93 | STACK_CHECK_START_REL(L, 0); | ||
94 | UNIVERSE_LIGHT_REGKEY.setValue(L, [U](lua_State* L) { U ? lua_pushlightuserdata(L, U) : lua_pushnil(L); }); | ||
95 | STACK_CHECK(L, 0); | ||
96 | } | ||
97 | |||
98 | // ################################################################################################ | ||
99 | |||
100 | Universe* universe_get(lua_State* L) | ||
101 | { | ||
102 | STACK_CHECK_START_REL(L, 0); | ||
103 | Universe* const universe{ UNIVERSE_LIGHT_REGKEY.readLightUserDataValue<Universe>(L) }; | ||
104 | STACK_CHECK(L, 0); | ||
105 | return universe; | ||
106 | } | ||
diff --git a/src/universe.h b/src/universe.h index 03c78cf..113ed21 100644 --- a/src/universe.h +++ b/src/universe.h | |||
@@ -1,23 +1,25 @@ | |||
1 | /* | 1 | #pragma once |
2 | * UNIVERSE.H | ||
3 | */ | ||
4 | #ifndef UNIVERSE_H | ||
5 | #define UNIVERSE_H | ||
6 | 2 | ||
3 | #ifdef __cplusplus | ||
4 | extern "C" { | ||
5 | #endif // __cplusplus | ||
7 | #include "lua.h" | 6 | #include "lua.h" |
8 | #include "threading.h" | 7 | #ifdef __cplusplus |
8 | } | ||
9 | #endif // __cplusplus | ||
10 | |||
11 | #include "compat.h" | ||
9 | #include "macros_and_utils.h" | 12 | #include "macros_and_utils.h" |
10 | 13 | ||
11 | // forwards | 14 | #include <mutex> |
12 | struct s_DeepPrelude; | ||
13 | typedef struct s_DeepPrelude DeepPrelude; | ||
14 | struct s_Keepers; | ||
15 | typedef struct s_Keepers Keepers; | ||
16 | struct s_Lane; | ||
17 | typedef struct s_Lane Lane; | ||
18 | 15 | ||
19 | // ################################################################################################ | 16 | // ################################################################################################ |
20 | 17 | ||
18 | // forwards | ||
19 | struct DeepPrelude; | ||
20 | struct Keepers; | ||
21 | class Lane; | ||
22 | |||
21 | /* | 23 | /* |
22 | * Do we want to activate full lane tracking feature? (EXPERIMENTAL) | 24 | * Do we want to activate full lane tracking feature? (EXPERIMENTAL) |
23 | */ | 25 | */ |
@@ -26,38 +28,118 @@ typedef struct s_Lane Lane; | |||
26 | // ################################################################################################ | 28 | // ################################################################################################ |
27 | 29 | ||
28 | // everything we need to provide to lua_newstate() | 30 | // everything we need to provide to lua_newstate() |
29 | struct AllocatorDefinition_s | 31 | class AllocatorDefinition |
30 | { | 32 | { |
31 | lua_Alloc allocF; | 33 | public: |
32 | void* allocUD; | 34 | |
35 | lua_Alloc m_allocF{ nullptr }; | ||
36 | void* m_allocUD{ nullptr }; | ||
37 | |||
38 | [[nodiscard]] static void* operator new(size_t size_, lua_State* L) noexcept { return lua_newuserdatauv(L, size_, 0); } | ||
39 | // always embedded somewhere else or "in-place constructed" as a full userdata | ||
40 | // can't actually delete the operator because the compiler generates stack unwinding code that could call it in case of exception | ||
41 | static void operator delete([[maybe_unused]] void* p_, lua_State* L) { ASSERT_L(!"should never be called") }; | ||
42 | |||
43 | AllocatorDefinition(lua_Alloc allocF_, void* allocUD_) noexcept | ||
44 | : m_allocF{ allocF_ } | ||
45 | , m_allocUD{ allocUD_ } | ||
46 | { | ||
47 | } | ||
48 | AllocatorDefinition() = default; | ||
49 | AllocatorDefinition(AllocatorDefinition const& rhs_) = default; | ||
50 | AllocatorDefinition(AllocatorDefinition&& rhs_) = default; | ||
51 | AllocatorDefinition& operator=(AllocatorDefinition const& rhs_) = default; | ||
52 | AllocatorDefinition& operator=(AllocatorDefinition&& rhs_) = default; | ||
53 | |||
54 | void initFrom(lua_State* L) | ||
55 | { | ||
56 | m_allocF = lua_getallocf(L, &m_allocUD); | ||
57 | } | ||
58 | |||
59 | void* lua_alloc(void* ptr_, size_t osize_, size_t nsize_) | ||
60 | { | ||
61 | m_allocF(m_allocUD, ptr_, osize_, nsize_); | ||
62 | } | ||
63 | |||
64 | void* alloc(size_t nsize_) | ||
65 | { | ||
66 | return m_allocF(m_allocUD, nullptr, 0, nsize_); | ||
67 | } | ||
68 | |||
69 | void free(void* ptr_, size_t osize_) | ||
70 | { | ||
71 | std::ignore = m_allocF(m_allocUD, ptr_, osize_, 0); | ||
72 | } | ||
33 | }; | 73 | }; |
34 | typedef struct AllocatorDefinition_s AllocatorDefinition; | 74 | |
75 | // ################################################################################################ | ||
35 | 76 | ||
36 | // mutex-protected allocator for use with Lua states that share a non-threadsafe allocator | 77 | // mutex-protected allocator for use with Lua states that share a non-threadsafe allocator |
37 | struct ProtectedAllocator_s | 78 | class ProtectedAllocator : public AllocatorDefinition |
38 | { | 79 | { |
39 | AllocatorDefinition definition; | 80 | private: |
40 | MUTEX_T lock; | 81 | |
82 | std::mutex m_lock; | ||
83 | |||
84 | [[nodiscard]] static void* protected_lua_Alloc(void* ud_, void* ptr_, size_t osize_, size_t nsize_) | ||
85 | { | ||
86 | ProtectedAllocator* const allocator{ static_cast<ProtectedAllocator*>(ud_) }; | ||
87 | std::lock_guard<std::mutex> guard{ allocator->m_lock }; | ||
88 | return allocator->m_allocF(allocator->m_allocUD, ptr_, osize_, nsize_); | ||
89 | } | ||
90 | |||
91 | public: | ||
92 | |||
93 | // we are not like our base class: we can't be created inside a full userdata (or we would have to install a metatable and __gc handler to destroy ourselves properly) | ||
94 | [[nodiscard]] static void* operator new(size_t size_, lua_State* L) noexcept = delete; | ||
95 | static void operator delete(void* p_, lua_State* L) = delete; | ||
96 | |||
97 | AllocatorDefinition makeDefinition() | ||
98 | { | ||
99 | return AllocatorDefinition{ protected_lua_Alloc, this}; | ||
100 | } | ||
101 | |||
102 | void installIn(lua_State* L) | ||
103 | { | ||
104 | lua_setallocf(L, protected_lua_Alloc, this); | ||
105 | } | ||
106 | |||
107 | void removeFrom(lua_State* L) | ||
108 | { | ||
109 | // remove the protected allocator, if any | ||
110 | if (m_allocF != nullptr) | ||
111 | { | ||
112 | // install the non-protected allocator | ||
113 | lua_setallocf(L, m_allocF, m_allocUD); | ||
114 | } | ||
115 | } | ||
41 | }; | 116 | }; |
42 | typedef struct ProtectedAllocator_s ProtectedAllocator; | ||
43 | 117 | ||
44 | // ################################################################################################ | 118 | // ################################################################################################ |
45 | 119 | ||
46 | // everything regarding the Lanes universe is stored in that global structure | 120 | // everything regarding the Lanes universe is stored in that global structure |
47 | // held as a full userdata in the master Lua state that required it for the first time | 121 | // held as a full userdata in the master Lua state that required it for the first time |
48 | // don't forget to initialize all members in LG_configure() | 122 | class Universe |
49 | struct s_Universe | ||
50 | { | 123 | { |
124 | public: | ||
125 | |||
126 | #ifdef PLATFORM_LINUX | ||
127 | // Linux needs to check, whether it's been run as root | ||
128 | bool const m_sudo{ geteuid() == 0 }; | ||
129 | #else | ||
130 | bool const m_sudo{ false }; | ||
131 | #endif // PLATFORM_LINUX | ||
132 | |||
51 | // for verbose errors | 133 | // for verbose errors |
52 | bool_t verboseErrors; | 134 | bool verboseErrors{ false }; |
53 | 135 | ||
54 | bool_t demoteFullUserdata; | 136 | bool demoteFullUserdata{ false }; |
55 | 137 | ||
56 | // before a state is created, this function will be called to obtain the allocator | 138 | // before a state is created, this function will be called to obtain the allocator |
57 | lua_CFunction provide_allocator; | 139 | lua_CFunction provide_allocator{ nullptr }; |
58 | 140 | ||
59 | // after a state is created, this function will be called right after the bases libraries are loaded | 141 | // after a state is created, this function will be called right after the bases libraries are loaded |
60 | lua_CFunction on_state_create_func; | 142 | lua_CFunction on_state_create_func{ nullptr }; |
61 | 143 | ||
62 | // if allocator="protected" is found in the configuration settings, a wrapper allocator will protect all allocator calls with a mutex | 144 | // if allocator="protected" is found in the configuration settings, a wrapper allocator will protect all allocator calls with a mutex |
63 | // contains a mutex and the original allocator definition | 145 | // contains a mutex and the original allocator definition |
@@ -65,41 +147,44 @@ struct s_Universe | |||
65 | 147 | ||
66 | AllocatorDefinition internal_allocator; | 148 | AllocatorDefinition internal_allocator; |
67 | 149 | ||
68 | Keepers* keepers; | 150 | Keepers* keepers{ nullptr }; |
69 | 151 | ||
70 | // Initialized by 'init_once_LOCKED()': the deep userdata Linda object | 152 | // Initialized by 'init_once_LOCKED()': the deep userdata Linda object |
71 | // used for timers (each lane will get a proxy to this) | 153 | // used for timers (each lane will get a proxy to this) |
72 | volatile DeepPrelude* timer_deep; // = NULL | 154 | DeepPrelude* timer_deep{ nullptr }; |
73 | 155 | ||
74 | #if HAVE_LANE_TRACKING() | 156 | #if HAVE_LANE_TRACKING() |
75 | MUTEX_T tracking_cs; | 157 | std::mutex tracking_cs; |
76 | Lane* volatile tracking_first; // will change to TRACKING_END if we want to activate tracking | 158 | Lane* volatile tracking_first{ nullptr }; // will change to TRACKING_END if we want to activate tracking |
77 | #endif // HAVE_LANE_TRACKING() | 159 | #endif // HAVE_LANE_TRACKING() |
78 | 160 | ||
79 | MUTEX_T selfdestruct_cs; | 161 | std::mutex selfdestruct_cs; |
80 | 162 | ||
81 | // require() serialization | 163 | // require() serialization |
82 | MUTEX_T require_cs; | 164 | std::recursive_mutex require_cs; |
83 | |||
84 | // Lock for reference counter inc/dec locks (to be initialized by outside code) TODO: get rid of this and use atomics instead! | ||
85 | MUTEX_T deep_lock; | ||
86 | MUTEX_T mtid_lock; | ||
87 | 165 | ||
88 | lua_Integer last_mt_id; | 166 | // metatable unique identifiers |
167 | std::atomic<lua_Integer> next_mt_id{ 1 }; | ||
89 | 168 | ||
90 | #if USE_DEBUG_SPEW() | 169 | #if USE_DEBUG_SPEW() |
91 | int debugspew_indent_depth; | 170 | std::atomic<int> debugspew_indent_depth{ 0 }; |
92 | #endif // USE_DEBUG_SPEW() | 171 | #endif // USE_DEBUG_SPEW() |
93 | 172 | ||
94 | Lane* volatile selfdestruct_first; | 173 | Lane* volatile selfdestruct_first{ nullptr }; |
95 | // After a lane has removed itself from the chain, it still performs some processing. | 174 | // After a lane has removed itself from the chain, it still performs some processing. |
96 | // The terminal desinit sequence should wait for all such processing to terminate before force-killing threads | 175 | // The terminal desinit sequence should wait for all such processing to terminate before force-killing threads |
97 | int volatile selfdestructing_count; | 176 | std::atomic<int> selfdestructing_count{ 0 }; |
177 | |||
178 | Universe(); | ||
179 | ~Universe() = default; | ||
180 | Universe(Universe const&) = delete; | ||
181 | Universe(Universe&&) = delete; | ||
182 | Universe& operator=(Universe const&) = delete; | ||
183 | Universe& operator=(Universe&&) = delete; | ||
98 | }; | 184 | }; |
99 | typedef struct s_Universe Universe; | ||
100 | 185 | ||
101 | Universe* universe_get( lua_State* L); | 186 | // ################################################################################################ |
102 | Universe* universe_create( lua_State* L); | ||
103 | void universe_store( lua_State* L, Universe* U); | ||
104 | 187 | ||
105 | #endif // UNIVERSE_H | 188 | [[nodiscard]] Universe* universe_get(lua_State* L); |
189 | [[nodiscard]] Universe* universe_create(lua_State* L); | ||
190 | void universe_store(lua_State* L, Universe* U); | ||
diff --git a/tests/cancel.lua b/tests/cancel.lua index 4e57184..c22103f 100644 --- a/tests/cancel.lua +++ b/tests/cancel.lua | |||
@@ -220,25 +220,10 @@ if not next(which_tests) or which_tests.hard_unprotected then | |||
220 | waitCancellation( h, "cancelled") | 220 | waitCancellation( h, "cancelled") |
221 | end | 221 | end |
222 | 222 | ||
223 | if not next(which_tests) or which_tests.kill then | ||
224 | remaining_tests.kill = nil | ||
225 | print "\n\n####################################################################\nbegin kill cancel test\n" | ||
226 | h = lanes.gen( "*", laneBody)( "busy", 50000000) -- start a pure Lua busy loop lane | ||
227 | |||
228 | -- wait 1/3s before cancelling the lane, before the busy loop can finish | ||
229 | print "wait 0.3s" | ||
230 | linda:receive( 0.3, "yeah") | ||
231 | |||
232 | -- hard cancel with kill: the lane thread will be forcefully terminated. kill timeout is pthread-specific | ||
233 | print "cancelling" | ||
234 | h:cancel( true, 1.0) | ||
235 | |||
236 | -- wait until cancellation is effective. the lane will be stopped by the linda operation throwing an error | ||
237 | waitCancellation( h, "killed") | ||
238 | end | ||
239 | --#################################################################### | 223 | --#################################################################### |
240 | 224 | ||
241 | local unknown_test, val = next(remaining_tests) | 225 | local unknown_test, val = next(remaining_tests) |
242 | assert(not unknown_test, tostring(unknown_test) .. " test is unknown") | 226 | assert(not unknown_test, tostring(unknown_test) .. " test is unknown") |
243 | 227 | ||
244 | print "\nTHE END" | 228 | print "\nTHE END" |
229 | |||
diff --git a/tests/keeper.lua b/tests/keeper.lua index 3333938..6dbbd15 100644 --- a/tests/keeper.lua +++ b/tests/keeper.lua | |||
@@ -97,4 +97,4 @@ assert( A.some==2 ) | |||
97 | PRINT("C.some = " .. C.some ) | 97 | PRINT("C.some = " .. C.some ) |
98 | assert( C.some==3 ) | 98 | assert( C.some==3 ) |
99 | lindaC:set("some") | 99 | lindaC:set("some") |
100 | assert( C.some==nil ) \ No newline at end of file | 100 | assert( C.some==nil ) |
diff --git a/tests/perftest.lua b/tests/perftest.lua index 6ffc064..35f522c 100644 --- a/tests/perftest.lua +++ b/tests/perftest.lua | |||
@@ -83,46 +83,53 @@ PRIO_EVEN= PRIO_EVEN or 0 | |||
83 | -- | 83 | -- |
84 | -- AKa 20-Jul-2008: Now the wrapping to one function is no longer needed; | 84 | -- AKa 20-Jul-2008: Now the wrapping to one function is no longer needed; |
85 | -- Lanes 2008 can take the used functions as upvalues. | 85 | -- Lanes 2008 can take the used functions as upvalues. |
86 | -- | 86 | |
87 | -- It looks like this implementation uses a lot of C stack, possibly resulting in stack overflow with Lua54 | ||
88 | -- this is reproducible with the original sieve.lua implementation found at https://www.lua.org/extras/ | ||
89 | -- for example: | ||
90 | -- Lua54 exe built with 1Mb of C stack crashes for M above 230, C stack at 500 calls | ||
91 | -- Lua53 exe built with 1Mb of C stack crashes for M above 491, C stack at 740 calls | ||
92 | -- Lua52 exe built with 1Mb of C stack crashes for M above 672, C stack at 1000 calls | ||
93 | -- Lua51 exe built with 1Mb of C stack crashes for M above 718, C stack at 900 calls | ||
87 | local function sieve_lane(N,id) | 94 | local function sieve_lane(N,id) |
88 | 95 | ||
89 | if MSYS then | 96 | if MSYS then |
90 | io.stderr:setvbuf "no" | 97 | io.stderr:setvbuf "no" |
91 | end | 98 | end |
99 | |||
100 | -- generate all the numbers from 2 to n | ||
101 | local function gen (n) | ||
102 | return coroutine.wrap(function () | ||
103 | for i=2,n do coroutine.yield(i) end | ||
104 | end) | ||
105 | end | ||
92 | 106 | ||
93 | -- generate all the numbers from 2 to n | 107 | -- filter the numbers generated by `g', removing multiples of `p' |
94 | local function gen (n) | 108 | local function filter (p, g) |
95 | return coroutine.wrap(function () | 109 | return coroutine.wrap(function () |
96 | for i=2,n do coroutine.yield(i) end | 110 | while 1 do |
97 | end) | 111 | local n = g() |
98 | end | 112 | if n == nil then return end |
113 | if math.fmod(n, p) ~= 0 then coroutine.yield(n) end | ||
114 | end | ||
115 | end) | ||
116 | end | ||
99 | 117 | ||
100 | -- filter the numbers generated by `g', removing multiples of `p' | 118 | local ret= {} -- returned values: { 2, 3, 5, 7, 11, ... } |
101 | local function filter (p, g) | 119 | N=N or 1000 -- from caller |
102 | return coroutine.wrap(function () | 120 | local x = gen(N) -- generate primes up to N |
103 | while 1 do | 121 | while 1 do |
104 | local n = g() | 122 | local n = x() -- pick a number until done |
105 | if n == nil then return end | 123 | if n == nil then break end |
106 | if math.fmod(n, p) ~= 0 then coroutine.yield(n) end | 124 | --print(n) -- must be a prime number |
125 | table.insert( ret, n ) | ||
126 | |||
127 | x = filter(n, x) -- now remove its multiples | ||
107 | end | 128 | end |
108 | end) | ||
109 | end | ||
110 | |||
111 | local ret= {} -- returned values: { 2, 3, 5, 7, 11, ... } | ||
112 | N=N or 1000 -- from caller | ||
113 | local x = gen(N) -- generate primes up to N | ||
114 | while 1 do | ||
115 | local n = x() -- pick a number until done | ||
116 | if n == nil then break end | ||
117 | --print(n) -- must be a prime number | ||
118 | table.insert( ret, n ) | ||
119 | |||
120 | x = filter(n, x) -- now remove its multiples | ||
121 | end | ||
122 | 129 | ||
123 | io.stderr:write(id..(MSYS and "\n" or "\t")) -- mark we're ready | 130 | io.stderr:write(id..(MSYS and "\n" or "\t")) -- mark we're ready |
124 | 131 | ||
125 | return ret | 132 | return ret |
126 | end | 133 | end |
127 | -- ** END OF LANE ** -- | 134 | -- ** END OF LANE ** -- |
128 | 135 | ||
@@ -169,6 +176,7 @@ else | |||
169 | -- | 176 | -- |
170 | for i=1,N do | 177 | for i=1,N do |
171 | local tmp= t[i]:join() | 178 | local tmp= t[i]:join() |
179 | -- this assert will trigger if you change M to values below 1000 in order to solve C stack overflow | ||
172 | assert( type(tmp)=="table" and tmp[1]==2 and tmp[168]==997 ) | 180 | assert( type(tmp)=="table" and tmp[1]==2 and tmp[168]==997 ) |
173 | end | 181 | end |
174 | end | 182 | end |
@@ -180,5 +188,7 @@ if TIME then | |||
180 | io.stderr:write( "*** TIMING: "..t.." seconds ***\n" ) | 188 | io.stderr:write( "*** TIMING: "..t.." seconds ***\n" ) |
181 | end | 189 | end |
182 | 190 | ||
191 | io.stderr:write "done\n" | ||
192 | |||
183 | -- | 193 | -- |
184 | -- end | 194 | -- end |