Skip to content

Commit 68f7b2b

Browse files
cotastsquad
authored andcommitted
util/qht: use striped locks under TSAN
Fixes this tsan crash, easy to reproduce with any large enough program: $ tests/unit/test-qht 1..2 ThreadSanitizer: CHECK failed: sanitizer_deadlock_detector.h:67 "((n_all_locks_)) < (((sizeof(all_locks_with_contexts_)/sizeof((all_locks_with_contexts_)[0]))))" (0x40, 0x40) (tid=1821568) #0 __tsan::CheckUnwind() ../../../../src/libsanitizer/tsan/tsan_rtl.cpp:353 (libtsan.so.2+0x90034) qemu#1 __sanitizer::CheckFailed(char const*, int, char const*, unsigned long long, unsigned long long) ../../../../src/libsanitizer/sanitizer_common/sanitizer_termination.cpp:86 (libtsan.so.2+0xca555) qemu#2 __sanitizer::DeadlockDetectorTLS<__sanitizer::TwoLevelBitVector<1ul, __sanitizer::BasicBitVector<unsigned long> > >::addLock(unsigned long, unsigned long, unsigned int) ../../../../src/libsanitizer/sanitizer_common/sanitizer_deadlock_detector.h:67 (libtsan.so.2+0xb3616) qemu#3 __sanitizer::DeadlockDetectorTLS<__sanitizer::TwoLevelBitVector<1ul, __sanitizer::BasicBitVector<unsigned long> > >::addLock(unsigned long, unsigned long, unsigned int) ../../../../src/libsanitizer/sanitizer_common/sanitizer_deadlock_detector.h:59 (libtsan.so.2+0xb3616) qemu#4 __sanitizer::DeadlockDetector<__sanitizer::TwoLevelBitVector<1ul, __sanitizer::BasicBitVector<unsigned long> > >::onLockAfter(__sanitizer::DeadlockDetectorTLS<__sanitizer::TwoLevelBitVector<1ul, __sanitizer::BasicBitVector<unsigned long> > >*, unsigned long, unsigned int) ../../../../src/libsanitizer/sanitizer_common/sanitizer_deadlock_detector.h:216 (libtsan.so.2+0xb3616) qemu#5 __sanitizer::DD::MutexAfterLock(__sanitizer::DDCallback*, __sanitizer::DDMutex*, bool, bool) ../../../../src/libsanitizer/sanitizer_common/sanitizer_deadlock_detector1.cpp:169 (libtsan.so.2+0xb3616) qemu#6 __tsan::MutexPostLock(__tsan::ThreadState*, unsigned long, unsigned long, unsigned int, int) ../../../../src/libsanitizer/tsan/tsan_rtl_mutex.cpp:200 (libtsan.so.2+0xa3382) qemu#7 __tsan_mutex_post_lock ../../../../src/libsanitizer/tsan/tsan_interface_ann.cpp:384 (libtsan.so.2+0x76bc3) qemu#8 qemu_spin_lock /home/cota/src/qemu/include/qemu/thread.h:259 (test-qht+0x44a97) qemu#9 qht_map_lock_buckets ../util/qht.c:253 (test-qht+0x44a97) qemu#10 do_qht_iter ../util/qht.c:809 (test-qht+0x45f33) qemu#11 qht_iter ../util/qht.c:821 (test-qht+0x45f33) qemu#12 iter_check ../tests/unit/test-qht.c:121 (test-qht+0xe473) qemu#13 qht_do_test ../tests/unit/test-qht.c:202 (test-qht+0xe473) qemu#14 qht_test ../tests/unit/test-qht.c:240 (test-qht+0xe7c1) qemu#15 test_default ../tests/unit/test-qht.c:246 (test-qht+0xe828) qemu#16 <null> <null> (libglib-2.0.so.0+0x7daed) qemu#17 <null> <null> (libglib-2.0.so.0+0x7d80a) qemu#18 <null> <null> (libglib-2.0.so.0+0x7d80a) qemu#19 g_test_run_suite <null> (libglib-2.0.so.0+0x7dfe9) qemu#20 g_test_run <null> (libglib-2.0.so.0+0x7e055) qemu#21 main ../tests/unit/test-qht.c:259 (test-qht+0xd2c6) qemu#22 __libc_start_call_main ../sysdeps/nptl/libc_start_call_main.h:58 (libc.so.6+0x29d8f) qemu#23 __libc_start_main_impl ../csu/libc-start.c:392 (libc.so.6+0x29e3f) qemu#24 _start <null> (test-qht+0xdb44) Signed-off-by: Emilio Cota <[email protected]> Reviewed-by: Richard Henderson <[email protected]> Message-Id: <[email protected]> Signed-off-by: Alex Bennée <[email protected]> Message-Id: <[email protected]>
1 parent 047e2bd commit 68f7b2b

File tree

1 file changed

+81
-14
lines changed

1 file changed

+81
-14
lines changed

util/qht.c

Lines changed: 81 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -151,6 +151,22 @@ struct qht_bucket {
151151

152152
QEMU_BUILD_BUG_ON(sizeof(struct qht_bucket) > QHT_BUCKET_ALIGN);
153153

154+
/*
155+
* Under TSAN, we use striped locks instead of one lock per bucket chain.
156+
* This avoids crashing under TSAN, since TSAN aborts the program if more than
157+
* 64 locks are held (this is a hardcoded limit in TSAN).
158+
* When resizing a QHT we grab all the buckets' locks, which can easily
159+
* go over TSAN's limit. By using striped locks, we avoid this problem.
160+
*
161+
* Note: this number must be a power of two for easy index computation.
162+
*/
163+
#define QHT_TSAN_BUCKET_LOCKS_BITS 4
164+
#define QHT_TSAN_BUCKET_LOCKS (1 << QHT_TSAN_BUCKET_LOCKS_BITS)
165+
166+
struct qht_tsan_lock {
167+
QemuSpin lock;
168+
} QEMU_ALIGNED(QHT_BUCKET_ALIGN);
169+
154170
/**
155171
* struct qht_map - structure to track an array of buckets
156172
* @rcu: used by RCU. Keep it as the top field in the struct to help valgrind
@@ -160,6 +176,7 @@ QEMU_BUILD_BUG_ON(sizeof(struct qht_bucket) > QHT_BUCKET_ALIGN);
160176
* @n_added_buckets: number of added (i.e. "non-head") buckets
161177
* @n_added_buckets_threshold: threshold to trigger an upward resize once the
162178
* number of added buckets surpasses it.
179+
* @tsan_bucket_locks: Array of striped locks to be used only under TSAN.
163180
*
164181
* Buckets are tracked in what we call a "map", i.e. this structure.
165182
*/
@@ -169,6 +186,9 @@ struct qht_map {
169186
size_t n_buckets;
170187
size_t n_added_buckets;
171188
size_t n_added_buckets_threshold;
189+
#ifdef CONFIG_TSAN
190+
struct qht_tsan_lock tsan_bucket_locks[QHT_TSAN_BUCKET_LOCKS];
191+
#endif
172192
};
173193

174194
/* trigger a resize when n_added_buckets > n_buckets / div */
@@ -229,10 +249,56 @@ static inline size_t qht_elems_to_buckets(size_t n_elems)
229249
return pow2ceil(n_elems / QHT_BUCKET_ENTRIES);
230250
}
231251

232-
static inline void qht_head_init(struct qht_bucket *b)
252+
/*
253+
* When using striped locks (i.e. under TSAN), we have to be careful not
254+
* to operate on the same lock twice (e.g. when iterating through all buckets).
255+
* We achieve this by operating only on each stripe's first matching lock.
256+
*/
257+
static inline void qht_do_if_first_in_stripe(struct qht_map *map,
258+
struct qht_bucket *b,
259+
void (*func)(QemuSpin *spin))
260+
{
261+
#ifdef CONFIG_TSAN
262+
unsigned long bucket_idx = b - map->buckets;
263+
bool is_first_in_stripe = (bucket_idx >> QHT_TSAN_BUCKET_LOCKS_BITS) == 0;
264+
if (is_first_in_stripe) {
265+
unsigned long lock_idx = bucket_idx & (QHT_TSAN_BUCKET_LOCKS - 1);
266+
func(&map->tsan_bucket_locks[lock_idx].lock);
267+
}
268+
#else
269+
func(&b->lock);
270+
#endif
271+
}
272+
273+
static inline void qht_bucket_lock_do(struct qht_map *map,
274+
struct qht_bucket *b,
275+
void (*func)(QemuSpin *lock))
276+
{
277+
#ifdef CONFIG_TSAN
278+
unsigned long bucket_idx = b - map->buckets;
279+
unsigned long lock_idx = bucket_idx & (QHT_TSAN_BUCKET_LOCKS - 1);
280+
func(&map->tsan_bucket_locks[lock_idx].lock);
281+
#else
282+
func(&b->lock);
283+
#endif
284+
}
285+
286+
static inline void qht_bucket_lock(struct qht_map *map,
287+
struct qht_bucket *b)
288+
{
289+
qht_bucket_lock_do(map, b, qemu_spin_lock);
290+
}
291+
292+
static inline void qht_bucket_unlock(struct qht_map *map,
293+
struct qht_bucket *b)
294+
{
295+
qht_bucket_lock_do(map, b, qemu_spin_unlock);
296+
}
297+
298+
static inline void qht_head_init(struct qht_map *map, struct qht_bucket *b)
233299
{
234300
memset(b, 0, sizeof(*b));
235-
qemu_spin_init(&b->lock);
301+
qht_do_if_first_in_stripe(map, b, qemu_spin_init);
236302
seqlock_init(&b->sequence);
237303
}
238304

@@ -250,7 +316,7 @@ static void qht_map_lock_buckets(struct qht_map *map)
250316
for (i = 0; i < map->n_buckets; i++) {
251317
struct qht_bucket *b = &map->buckets[i];
252318

253-
qemu_spin_lock(&b->lock);
319+
qht_do_if_first_in_stripe(map, b, qemu_spin_lock);
254320
}
255321
}
256322

@@ -261,7 +327,7 @@ static void qht_map_unlock_buckets(struct qht_map *map)
261327
for (i = 0; i < map->n_buckets; i++) {
262328
struct qht_bucket *b = &map->buckets[i];
263329

264-
qemu_spin_unlock(&b->lock);
330+
qht_do_if_first_in_stripe(map, b, qemu_spin_unlock);
265331
}
266332
}
267333

@@ -308,7 +374,7 @@ void qht_map_lock_buckets__no_stale(struct qht *ht, struct qht_map **pmap)
308374
* Get a head bucket and lock it, making sure its parent map is not stale.
309375
* @pmap is filled with a pointer to the bucket's parent map.
310376
*
311-
* Unlock with qemu_spin_unlock(&b->lock).
377+
* Unlock with qht_bucket_unlock.
312378
*
313379
* Note: callers cannot have ht->lock held.
314380
*/
@@ -322,18 +388,18 @@ struct qht_bucket *qht_bucket_lock__no_stale(struct qht *ht, uint32_t hash,
322388
map = qatomic_rcu_read(&ht->map);
323389
b = qht_map_to_bucket(map, hash);
324390

325-
qemu_spin_lock(&b->lock);
391+
qht_bucket_lock(map, b);
326392
if (likely(!qht_map_is_stale__locked(ht, map))) {
327393
*pmap = map;
328394
return b;
329395
}
330-
qemu_spin_unlock(&b->lock);
396+
qht_bucket_unlock(map, b);
331397

332398
/* we raced with a resize; acquire ht->lock to see the updated ht->map */
333399
qht_lock(ht);
334400
map = ht->map;
335401
b = qht_map_to_bucket(map, hash);
336-
qemu_spin_lock(&b->lock);
402+
qht_bucket_lock(map, b);
337403
qht_unlock(ht);
338404
*pmap = map;
339405
return b;
@@ -345,12 +411,13 @@ static inline bool qht_map_needs_resize(const struct qht_map *map)
345411
map->n_added_buckets_threshold;
346412
}
347413

348-
static inline void qht_chain_destroy(const struct qht_bucket *head)
414+
static inline void qht_chain_destroy(struct qht_map *map,
415+
struct qht_bucket *head)
349416
{
350417
struct qht_bucket *curr = head->next;
351418
struct qht_bucket *prev;
352419

353-
qemu_spin_destroy(&head->lock);
420+
qht_do_if_first_in_stripe(map, head, qemu_spin_destroy);
354421
while (curr) {
355422
prev = curr;
356423
curr = curr->next;
@@ -364,7 +431,7 @@ static void qht_map_destroy(struct qht_map *map)
364431
size_t i;
365432

366433
for (i = 0; i < map->n_buckets; i++) {
367-
qht_chain_destroy(&map->buckets[i]);
434+
qht_chain_destroy(map, &map->buckets[i]);
368435
}
369436
qemu_vfree(map->buckets);
370437
g_free(map);
@@ -390,7 +457,7 @@ static struct qht_map *qht_map_create(size_t n_buckets)
390457
map->buckets = qemu_memalign(QHT_BUCKET_ALIGN,
391458
sizeof(*map->buckets) * n_buckets);
392459
for (i = 0; i < n_buckets; i++) {
393-
qht_head_init(&map->buckets[i]);
460+
qht_head_init(map, &map->buckets[i]);
394461
}
395462
return map;
396463
}
@@ -638,7 +705,7 @@ bool qht_insert(struct qht *ht, void *p, uint32_t hash, void **existing)
638705
b = qht_bucket_lock__no_stale(ht, hash, &map);
639706
prev = qht_insert__locked(ht, map, b, p, hash, &needs_resize);
640707
qht_bucket_debug__locked(b);
641-
qemu_spin_unlock(&b->lock);
708+
qht_bucket_unlock(map, b);
642709

643710
if (unlikely(needs_resize) && ht->mode & QHT_MODE_AUTO_RESIZE) {
644711
qht_grow_maybe(ht);
@@ -749,7 +816,7 @@ bool qht_remove(struct qht *ht, const void *p, uint32_t hash)
749816
b = qht_bucket_lock__no_stale(ht, hash, &map);
750817
ret = qht_remove__locked(b, p, hash);
751818
qht_bucket_debug__locked(b);
752-
qemu_spin_unlock(&b->lock);
819+
qht_bucket_unlock(map, b);
753820
return ret;
754821
}
755822

0 commit comments

Comments
 (0)