aboutsummaryrefslogtreecommitdiff
path: root/fs/btrfs/qgroup.c
diff options
context:
space:
mode:
authorGravatar Qu Wenruo <wqu@suse.com> 2023-08-29 19:48:57 +0800
committerGravatar David Sterba <dsterba@suse.com> 2023-10-12 16:44:03 +0200
commit8d54518b5e52e5b351cb5893f83990093699e415 (patch)
tree7c23fcecebee21d7a905d54a74a1fde1be0868dd /fs/btrfs/qgroup.c
parentbtrfs: qgroup: use qgroup_iterator_nested to in qgroup_update_refcnt() (diff)
downloadlinux-8d54518b5e52e5b351cb5893f83990093699e415.tar.gz
linux-8d54518b5e52e5b351cb5893f83990093699e415.tar.bz2
linux-8d54518b5e52e5b351cb5893f83990093699e415.zip
btrfs: qgroup: pre-allocate btrfs_qgroup to reduce GFP_ATOMIC usage
Qgroup is the heaviest user of GFP_ATOMIC, but one call site does not really need GFP_ATOMIC, that is add_qgroup_rb(). That function only searches the rbtree to find if we already have such entry. If not, then it would try to allocate memory for it. This means we can afford to pre-allocate such structure unconditionally, then free the memory if it's not needed. Considering this function is not a hot path, only utilized by the following functions: - btrfs_qgroup_inherit() For "btrfs subvolume snapshot -i" option. - btrfs_read_qgroup_config() At mount time, and we're ensured there would be no existing rb tree entry for each qgroup. - btrfs_create_qgroup() Thus we're completely safe to pre-allocate the extra memory for btrfs_qgroup structure, and reduce unnecessary GFP_ATOMIC usage. Reviewed-by: Filipe Manana <fdmanana@suse.com> Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/qgroup.c')
-rw-r--r--fs/btrfs/qgroup.c87
1 files changed, 61 insertions, 26 deletions
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index cfe2a6ed6229..da38b38cc6ad 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -180,30 +180,40 @@ static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
return NULL;
}
-/* must be called with qgroup_lock held */
+/*
+ * Add qgroup to the filesystem's qgroup tree.
+ *
+ * Must be called with qgroup_lock held and @prealloc preallocated.
+ *
+ * The control on the lifespan of @prealloc would be transfered to this
+ * function, thus caller should no longer touch @prealloc.
+ */
static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
+ struct btrfs_qgroup *prealloc,
u64 qgroupid)
{
struct rb_node **p = &fs_info->qgroup_tree.rb_node;
struct rb_node *parent = NULL;
struct btrfs_qgroup *qgroup;
+ /* Caller must have pre-allocated @prealloc. */
+ ASSERT(prealloc);
+
while (*p) {
parent = *p;
qgroup = rb_entry(parent, struct btrfs_qgroup, node);
- if (qgroup->qgroupid < qgroupid)
+ if (qgroup->qgroupid < qgroupid) {
p = &(*p)->rb_left;
- else if (qgroup->qgroupid > qgroupid)
+ } else if (qgroup->qgroupid > qgroupid) {
p = &(*p)->rb_right;
- else
+ } else {
+ kfree(prealloc);
return qgroup;
+ }
}
- qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
- if (!qgroup)
- return ERR_PTR(-ENOMEM);
-
+ qgroup = prealloc;
qgroup->qgroupid = qgroupid;
INIT_LIST_HEAD(&qgroup->groups);
INIT_LIST_HEAD(&qgroup->members);
@@ -436,11 +446,14 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
qgroup_mark_inconsistent(fs_info);
}
if (!qgroup) {
- qgroup = add_qgroup_rb(fs_info, found_key.offset);
- if (IS_ERR(qgroup)) {
- ret = PTR_ERR(qgroup);
+ struct btrfs_qgroup *prealloc;
+
+ prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
+ if (!prealloc) {
+ ret = -ENOMEM;
goto out;
}
+ qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset);
}
ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
if (ret < 0)
@@ -961,6 +974,7 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
struct btrfs_key key;
struct btrfs_key found_key;
struct btrfs_qgroup *qgroup = NULL;
+ struct btrfs_qgroup *prealloc = NULL;
struct btrfs_trans_handle *trans = NULL;
struct ulist *ulist = NULL;
int ret = 0;
@@ -1096,6 +1110,15 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
/* Release locks on tree_root before we access quota_root */
btrfs_release_path(path);
+ /* We should not have a stray @prealloc pointer. */
+ ASSERT(prealloc == NULL);
+ prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
+ if (!prealloc) {
+ ret = -ENOMEM;
+ btrfs_abort_transaction(trans, ret);
+ goto out_free_path;
+ }
+
ret = add_qgroup_item(trans, quota_root,
found_key.offset);
if (ret) {
@@ -1103,7 +1126,8 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
goto out_free_path;
}
- qgroup = add_qgroup_rb(fs_info, found_key.offset);
+ qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset);
+ prealloc = NULL;
if (IS_ERR(qgroup)) {
ret = PTR_ERR(qgroup);
btrfs_abort_transaction(trans, ret);
@@ -1146,12 +1170,14 @@ out_add_root:
goto out_free_path;
}
- qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
- if (IS_ERR(qgroup)) {
- ret = PTR_ERR(qgroup);
- btrfs_abort_transaction(trans, ret);
+ ASSERT(prealloc == NULL);
+ prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
+ if (!prealloc) {
+ ret = -ENOMEM;
goto out_free_path;
}
+ qgroup = add_qgroup_rb(fs_info, prealloc, BTRFS_FS_TREE_OBJECTID);
+ prealloc = NULL;
ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
if (ret < 0) {
btrfs_abort_transaction(trans, ret);
@@ -1224,6 +1250,7 @@ out:
else if (trans)
ret = btrfs_end_transaction(trans);
ulist_free(ulist);
+ kfree(prealloc);
return ret;
}
@@ -1581,6 +1608,7 @@ int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *quota_root;
struct btrfs_qgroup *qgroup;
+ struct btrfs_qgroup *prealloc = NULL;
int ret = 0;
mutex_lock(&fs_info->qgroup_ioctl_lock);
@@ -1595,21 +1623,25 @@ int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
goto out;
}
+ prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
+ if (!prealloc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
ret = add_qgroup_item(trans, quota_root, qgroupid);
if (ret)
goto out;
spin_lock(&fs_info->qgroup_lock);
- qgroup = add_qgroup_rb(fs_info, qgroupid);
+ qgroup = add_qgroup_rb(fs_info, prealloc, qgroupid);
spin_unlock(&fs_info->qgroup_lock);
+ prealloc = NULL;
- if (IS_ERR(qgroup)) {
- ret = PTR_ERR(qgroup);
- goto out;
- }
ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
out:
mutex_unlock(&fs_info->qgroup_ioctl_lock);
+ kfree(prealloc);
return ret;
}
@@ -2854,10 +2886,15 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
struct btrfs_root *quota_root;
struct btrfs_qgroup *srcgroup;
struct btrfs_qgroup *dstgroup;
+ struct btrfs_qgroup *prealloc;
bool need_rescan = false;
u32 level_size = 0;
u64 nums;
+ prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
+ if (!prealloc)
+ return -ENOMEM;
+
/*
* There are only two callers of this function.
*
@@ -2935,11 +2972,8 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
spin_lock(&fs_info->qgroup_lock);
- dstgroup = add_qgroup_rb(fs_info, objectid);
- if (IS_ERR(dstgroup)) {
- ret = PTR_ERR(dstgroup);
- goto unlock;
- }
+ dstgroup = add_qgroup_rb(fs_info, prealloc, objectid);
+ prealloc = NULL;
if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
dstgroup->lim_flags = inherit->lim.flags;
@@ -3050,6 +3084,7 @@ out:
mutex_unlock(&fs_info->qgroup_ioctl_lock);
if (need_rescan)
qgroup_mark_inconsistent(fs_info);
+ kfree(prealloc);
return ret;
}