aboutsummaryrefslogtreecommitdiff
path: root/fs/bcachefs/six.c
diff options
context:
space:
mode:
authorGravatar Kent Overstreet <kent.overstreet@linux.dev> 2023-08-12 15:05:06 -0400
committerGravatar Kent Overstreet <kent.overstreet@linux.dev> 2023-10-22 17:10:10 -0400
commitc294ea50da4b1a0ee84253f46391aa87a6efe91c (patch)
tree681f242bac712ffaf718ffb54012b5b7c23e9d8c /fs/bcachefs/six.c
parentbcachefs: Check for directories in deleted inodes btree (diff)
downloadlinux-c294ea50da4b1a0ee84253f46391aa87a6efe91c.tar.gz
linux-c294ea50da4b1a0ee84253f46391aa87a6efe91c.tar.bz2
linux-c294ea50da4b1a0ee84253f46391aa87a6efe91c.zip
bcachefs: six locks: Fix missing barrier on wait->lock_acquired
Six locks do lock handoff via the wakeup path: the thread doing the wakeup also takes the lock on behalf of the waiter, which means the waiter only has to look at its waitlist entry, and doesn't have to touch the lock cacheline while another thread is using it. Linus noticed that this needs a real barrier, which this patch fixes. Also add a comment for the should_sleep_fn() error path. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: linux-bcachefs@vger.kernel.org Cc: linux-kernel@vger.kernel.org
Diffstat (limited to 'fs/bcachefs/six.c')
-rw-r--r--fs/bcachefs/six.c33
1 files changed, 25 insertions, 8 deletions
diff --git a/fs/bcachefs/six.c b/fs/bcachefs/six.c
index 7ce45aeaee8d..0473aa4dd18a 100644
--- a/fs/bcachefs/six.c
+++ b/fs/bcachefs/six.c
@@ -221,14 +221,16 @@ again:
if (ret <= 0)
goto unlock;
- __list_del(w->list.prev, w->list.next);
task = w->task;
+ __list_del(w->list.prev, w->list.next);
/*
- * Do no writes to @w besides setting lock_acquired - otherwise
- * we would need a memory barrier:
+ * The release barrier here ensures the ordering of the
+ * __list_del before setting w->lock_acquired; @w is on the
+ * stack of the thread doing the waiting and will be reused
+ * after it sees w->lock_acquired with no other locking:
+ * pairs with smp_load_acquire() in six_lock_slowpath()
*/
- barrier();
- w->lock_acquired = true;
+ smp_store_release(&w->lock_acquired, true);
wake_up_process(task);
}
@@ -499,17 +501,32 @@ static int six_lock_slowpath(struct six_lock *lock, enum six_lock_type type,
while (1) {
set_current_state(TASK_UNINTERRUPTIBLE);
- if (wait->lock_acquired)
+ /*
+ * Ensures that writes to the waitlist entry happen after we see
+ * wait->lock_acquired: pairs with the smp_store_release in
+ * __six_lock_wakeup
+ */
+ if (smp_load_acquire(&wait->lock_acquired))
break;
ret = should_sleep_fn ? should_sleep_fn(lock, p) : 0;
if (unlikely(ret)) {
+ bool acquired;
+
+ /*
+ * If should_sleep_fn() returns an error, we are
+ * required to return that error even if we already
+ * acquired the lock - should_sleep_fn() might have
+ * modified external state (e.g. when the deadlock cycle
+ * detector in bcachefs issued a transaction restart)
+ */
raw_spin_lock(&lock->wait_lock);
- if (!wait->lock_acquired)
+ acquired = wait->lock_acquired;
+ if (!acquired)
list_del(&wait->list);
raw_spin_unlock(&lock->wait_lock);
- if (unlikely(wait->lock_acquired))
+ if (unlikely(acquired))
do_six_unlock_type(lock, type);
break;
}