aboutsummaryrefslogtreecommitdiff
path: root/fs/bcachefs/btree_locking.c
diff options
context:
space:
mode:
authorGravatar Kent Overstreet <kent.overstreet@linux.dev> 2024-04-09 19:45:41 -0400
committerGravatar Kent Overstreet <kent.overstreet@linux.dev> 2024-05-08 17:29:19 -0400
commit923ed0ae5ebd0bb0dbc64a88348fa1835a3644ee (patch)
tree2edf3f7b2b77fed3a715b1fc1c9d477bd8ec778a /fs/bcachefs/btree_locking.c
parentbcachefs: bch2_dir_emit() - drop_locks_do() conversion (diff)
downloadlinux-923ed0ae5ebd0bb0dbc64a88348fa1835a3644ee.tar.gz
linux-923ed0ae5ebd0bb0dbc64a88348fa1835a3644ee.tar.bz2
linux-923ed0ae5ebd0bb0dbc64a88348fa1835a3644ee.zip
bcachefs: bch2_trans_relock_fail() - factor out slowpath
Factor out slowpath into a separate helper Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/btree_locking.c')
-rw-r--r--fs/bcachefs/btree_locking.c98
1 files changed, 52 insertions, 46 deletions
diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c
index 8d1c4f78db5e..e6adb2df3a57 100644
--- a/fs/bcachefs/btree_locking.c
+++ b/fs/bcachefs/btree_locking.c
@@ -723,51 +723,51 @@ void bch2_trans_downgrade(struct btree_trans *trans)
bch2_btree_path_downgrade(trans, path);
}
-int bch2_trans_relock(struct btree_trans *trans)
+static inline void __bch2_trans_unlock(struct btree_trans *trans)
{
struct btree_path *path;
unsigned i;
- if (unlikely(trans->restarted))
- return -((int) trans->restarted);
+ trans_for_each_path(trans, path, i)
+ __bch2_btree_path_unlock(trans, path);
+}
- trans_for_each_path(trans, path, i) {
- struct get_locks_fail f;
+static noinline __cold int bch2_trans_relock_fail(struct btree_trans *trans, struct btree_path *path,
+ struct get_locks_fail *f, bool trace)
+{
+ if (!trace)
+ goto out;
- if (path->should_be_locked &&
- !btree_path_get_locks(trans, path, false, &f)) {
- if (trace_trans_restart_relock_enabled()) {
- struct printbuf buf = PRINTBUF;
-
- bch2_bpos_to_text(&buf, path->pos);
- prt_printf(&buf, " l=%u seq=%u node seq=",
- f.l, path->l[f.l].lock_seq);
- if (IS_ERR_OR_NULL(f.b)) {
- prt_str(&buf, bch2_err_str(PTR_ERR(f.b)));
- } else {
- prt_printf(&buf, "%u", f.b->c.lock.seq);
-
- struct six_lock_count c =
- bch2_btree_node_lock_counts(trans, NULL, &f.b->c, f.l);
- prt_printf(&buf, " self locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
-
- c = six_lock_counts(&f.b->c.lock);
- prt_printf(&buf, " total locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
- }
+ if (trace_trans_restart_relock_enabled()) {
+ struct printbuf buf = PRINTBUF;
- trace_trans_restart_relock(trans, _RET_IP_, buf.buf);
- printbuf_exit(&buf);
- }
+ bch2_bpos_to_text(&buf, path->pos);
+ prt_printf(&buf, " l=%u seq=%u node seq=", f->l, path->l[f->l].lock_seq);
+ if (IS_ERR_OR_NULL(f->b)) {
+ prt_str(&buf, bch2_err_str(PTR_ERR(f->b)));
+ } else {
+ prt_printf(&buf, "%u", f->b->c.lock.seq);
- count_event(trans->c, trans_restart_relock);
- return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
+ struct six_lock_count c =
+ bch2_btree_node_lock_counts(trans, NULL, &f->b->c, f->l);
+ prt_printf(&buf, " self locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
+
+ c = six_lock_counts(&f->b->c.lock);
+ prt_printf(&buf, " total locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
}
+
+ trace_trans_restart_relock(trans, _RET_IP_, buf.buf);
+ printbuf_exit(&buf);
}
- return 0;
+ count_event(trans->c, trans_restart_relock);
+out:
+ __bch2_trans_unlock(trans);
+ bch2_trans_verify_locks(trans);
+ return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
}
-int bch2_trans_relock_notrace(struct btree_trans *trans)
+static inline int __bch2_trans_relock(struct btree_trans *trans, bool trace)
{
struct btree_path *path;
unsigned i;
@@ -775,30 +775,36 @@ int bch2_trans_relock_notrace(struct btree_trans *trans)
if (unlikely(trans->restarted))
return -((int) trans->restarted);
- trans_for_each_path(trans, path, i)
+ trans_for_each_path(trans, path, i) {
+ struct get_locks_fail f;
+
if (path->should_be_locked &&
- !bch2_btree_path_relock_norestart(trans, path)) {
- return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
- }
+ !btree_path_get_locks(trans, path, false, &f))
+ return bch2_trans_relock_fail(trans, path, &f, trace);
+ }
+
+ bch2_trans_verify_locks(trans);
return 0;
}
-void bch2_trans_unlock_noassert(struct btree_trans *trans)
+int bch2_trans_relock(struct btree_trans *trans)
{
- struct btree_path *path;
- unsigned i;
+ return __bch2_trans_relock(trans, true);
+}
- trans_for_each_path(trans, path, i)
- __bch2_btree_path_unlock(trans, path);
+int bch2_trans_relock_notrace(struct btree_trans *trans)
+{
+ return __bch2_trans_relock(trans, false);
}
-void bch2_trans_unlock(struct btree_trans *trans)
+void bch2_trans_unlock_noassert(struct btree_trans *trans)
{
- struct btree_path *path;
- unsigned i;
+ __bch2_trans_unlock(trans);
+}
- trans_for_each_path(trans, path, i)
- __bch2_btree_path_unlock(trans, path);
+void bch2_trans_unlock(struct btree_trans *trans)
+{
+ __bch2_trans_unlock(trans);
}
void bch2_trans_unlock_long(struct btree_trans *trans)