diff options
author | jsing <> | 2024-01-24 14:02:52 +0000 |
---|---|---|
committer | jsing <> | 2024-01-24 14:02:52 +0000 |
commit | 8cf7a8d06866dd151552f63a2f8bf289d3e48683 (patch) | |
tree | 23db63fc3b4137172ed05920d323daacd2d92049 | |
parent | 304c4bea814902359fc956dde611b1bf4fe6c276 (diff) | |
download | openbsd-8cf7a8d06866dd151552f63a2f8bf289d3e48683.tar.gz openbsd-8cf7a8d06866dd151552f63a2f8bf289d3e48683.tar.bz2 openbsd-8cf7a8d06866dd151552f63a2f8bf289d3e48683.zip |
Make it safe to delete entries from an lhash doall callback.
Currently, the callback cannot safely delete entries as it could lead to
contraction of the hash table, which in turn could lead to doall skipping
entries (and that typically leads to memory leaks). The recommended
workaround is to reach in and fiddle with the hash table internals in
order to prevent contraction, call the doall function and then restore
the internals that were changed.
Rather than just improving our documentation, actually make it safe to
delete entries from an lhash doall callback by pausing contractions prior
to starting the callback loop, then restoring the down load factor and
triggering contraction once completed. This means that callers no longer
need access to change hash table internals in order to achieve this same
behaviour.
ok tb@
-rw-r--r-- | src/lib/libcrypto/lhash/lhash.c | 17 | ||||
-rw-r--r-- | src/lib/libcrypto/man/lh_new.3 | 15 |
2 files changed, 19 insertions, 13 deletions
diff --git a/src/lib/libcrypto/lhash/lhash.c b/src/lib/libcrypto/lhash/lhash.c index 3adec71ed6..81660419c7 100644 --- a/src/lib/libcrypto/lhash/lhash.c +++ b/src/lib/libcrypto/lhash/lhash.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* $OpenBSD: lhash.c,v 1.20 2023/07/07 13:40:44 beck Exp $ */ | 1 | /* $OpenBSD: lhash.c,v 1.21 2024/01/24 14:02:52 jsing Exp $ */ |
2 | /* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) | 2 | /* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) |
3 | * All rights reserved. | 3 | * All rights reserved. |
4 | * | 4 | * |
@@ -250,12 +250,21 @@ static void | |||
250 | doall_util_fn(_LHASH *lh, int use_arg, LHASH_DOALL_FN_TYPE func, | 250 | doall_util_fn(_LHASH *lh, int use_arg, LHASH_DOALL_FN_TYPE func, |
251 | LHASH_DOALL_ARG_FN_TYPE func_arg, void *arg) | 251 | LHASH_DOALL_ARG_FN_TYPE func_arg, void *arg) |
252 | { | 252 | { |
253 | int i; | ||
254 | LHASH_NODE *a, *n; | 253 | LHASH_NODE *a, *n; |
254 | int down_load; | ||
255 | int i; | ||
255 | 256 | ||
256 | if (lh == NULL) | 257 | if (lh == NULL) |
257 | return; | 258 | return; |
258 | 259 | ||
260 | /* | ||
261 | * Disable contraction of the hash while walking, as some consumers use | ||
262 | * it to delete hash entries. A better option would be to snapshot the | ||
263 | * hash, making it insert safe as well. | ||
264 | */ | ||
265 | down_load = lh->down_load; | ||
266 | lh->down_load = 0; | ||
267 | |||
259 | /* reverse the order so we search from 'top to bottom' | 268 | /* reverse the order so we search from 'top to bottom' |
260 | * We were having memory leaks otherwise */ | 269 | * We were having memory leaks otherwise */ |
261 | for (i = lh->num_nodes - 1; i >= 0; i--) { | 270 | for (i = lh->num_nodes - 1; i >= 0; i--) { |
@@ -273,6 +282,10 @@ doall_util_fn(_LHASH *lh, int use_arg, LHASH_DOALL_FN_TYPE func, | |||
273 | a = n; | 282 | a = n; |
274 | } | 283 | } |
275 | } | 284 | } |
285 | |||
286 | /* Restore down load factor and trigger contraction. */ | ||
287 | lh->down_load = down_load; | ||
288 | contract(lh); | ||
276 | } | 289 | } |
277 | 290 | ||
278 | void | 291 | void |
diff --git a/src/lib/libcrypto/man/lh_new.3 b/src/lib/libcrypto/man/lh_new.3 index c848eed825..d672b4d2d8 100644 --- a/src/lib/libcrypto/man/lh_new.3 +++ b/src/lib/libcrypto/man/lh_new.3 | |||
@@ -1,4 +1,4 @@ | |||
1 | .\" $OpenBSD: lh_new.3,v 1.9 2022/03/31 17:27:17 naddy Exp $ | 1 | .\" $OpenBSD: lh_new.3,v 1.10 2024/01/24 14:02:52 jsing Exp $ |
2 | .\" full merge up to: | 2 | .\" full merge up to: |
3 | .\" OpenSSL doc/crypto/lhash.pod 1bc74519 May 20 08:11:46 2016 -0400 | 3 | .\" OpenSSL doc/crypto/lhash.pod 1bc74519 May 20 08:11:46 2016 -0400 |
4 | .\" selective merge up to: | 4 | .\" selective merge up to: |
@@ -118,7 +118,7 @@ | |||
118 | .\" copied and put under another distribution licence | 118 | .\" copied and put under another distribution licence |
119 | .\" [including the GNU Public Licence.] | 119 | .\" [including the GNU Public Licence.] |
120 | .\" | 120 | .\" |
121 | .Dd $Mdocdate: March 31 2022 $ | 121 | .Dd $Mdocdate: January 24 2024 $ |
122 | .Dt LH_NEW 3 | 122 | .Dt LH_NEW 3 |
123 | .Os | 123 | .Os |
124 | .Sh NAME | 124 | .Sh NAME |
@@ -342,15 +342,8 @@ lh_STUFF_doall(hashtable, LHASH_DOALL_FN(STUFF_cleanup)); | |||
342 | lh_STUFF_free(hashtable); | 342 | lh_STUFF_free(hashtable); |
343 | .Ed | 343 | .Ed |
344 | .Pp | 344 | .Pp |
345 | When doing this, be careful if you delete entries from the hash table in | 345 | A callback may delete entries from the hash table, however, it is |
346 | your callbacks: the table may decrease in size, moving the item that you | 346 | not safe to insert new entries. |
347 | are currently on down lower in the hash table \(em this could cause some | ||
348 | entries to be skipped during the iteration. | ||
349 | The second best solution to this problem is to set hash->down_load=0 | ||
350 | before you start (which will stop the hash table ever decreasing in | ||
351 | size). | ||
352 | The best solution is probably to avoid deleting items from the hash | ||
353 | table inside a doall callback! | ||
354 | .Pp | 347 | .Pp |
355 | .Fn lh_<type>_doall_arg | 348 | .Fn lh_<type>_doall_arg |
356 | is the same as | 349 | is the same as |