aboutsummaryrefslogtreecommitdiff
path: root/fs/f2fs/segment.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/f2fs/segment.c')
-rw-r--r--fs/f2fs/segment.c390
1 files changed, 211 insertions, 179 deletions
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 227e25836173..6db410f1bb8c 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -217,7 +217,7 @@ static int __replace_atomic_write_block(struct inode *inode, pgoff_t index,
retry:
set_new_dnode(&dn, inode, NULL, NULL, 0);
- err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE_RA);
+ err = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
if (err) {
if (err == -ENOMEM) {
f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
@@ -246,10 +246,16 @@ retry:
} else {
blkcnt_t count = 1;
+ err = inc_valid_block_count(sbi, inode, &count);
+ if (err) {
+ f2fs_put_dnode(&dn);
+ return err;
+ }
+
*old_addr = dn.data_blkaddr;
f2fs_truncate_data_blocks_range(&dn, 1);
dec_valid_block_count(sbi, F2FS_I(inode)->cow_inode, count);
- inc_valid_block_count(sbi, inode, &count);
+
f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr,
ni.version, true, false);
}
@@ -257,7 +263,7 @@ retry:
f2fs_put_dnode(&dn);
trace_f2fs_replace_atomic_write_block(inode, F2FS_I(inode)->cow_inode,
- index, *old_addr, new_addr, recover);
+ index, old_addr ? *old_addr : 0, new_addr, recover);
return 0;
}
@@ -406,27 +412,28 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
* We should do GC or end up with checkpoint, if there are so many dirty
* dir/node pages without enough free segments.
*/
- if (has_not_enough_free_secs(sbi, 0, 0)) {
- if (test_opt(sbi, GC_MERGE) && sbi->gc_thread &&
- sbi->gc_thread->f2fs_gc_task) {
- DEFINE_WAIT(wait);
-
- prepare_to_wait(&sbi->gc_thread->fggc_wq, &wait,
- TASK_UNINTERRUPTIBLE);
- wake_up(&sbi->gc_thread->gc_wait_queue_head);
- io_schedule();
- finish_wait(&sbi->gc_thread->fggc_wq, &wait);
- } else {
- struct f2fs_gc_control gc_control = {
- .victim_segno = NULL_SEGNO,
- .init_gc_type = BG_GC,
- .no_bg_gc = true,
- .should_migrate_blocks = false,
- .err_gc_skipped = false,
- .nr_free_secs = 1 };
- f2fs_down_write(&sbi->gc_lock);
- f2fs_gc(sbi, &gc_control);
- }
+ if (has_enough_free_secs(sbi, 0, 0))
+ return;
+
+ if (test_opt(sbi, GC_MERGE) && sbi->gc_thread &&
+ sbi->gc_thread->f2fs_gc_task) {
+ DEFINE_WAIT(wait);
+
+ prepare_to_wait(&sbi->gc_thread->fggc_wq, &wait,
+ TASK_UNINTERRUPTIBLE);
+ wake_up(&sbi->gc_thread->gc_wait_queue_head);
+ io_schedule();
+ finish_wait(&sbi->gc_thread->fggc_wq, &wait);
+ } else {
+ struct f2fs_gc_control gc_control = {
+ .victim_segno = NULL_SEGNO,
+ .init_gc_type = BG_GC,
+ .no_bg_gc = true,
+ .should_migrate_blocks = false,
+ .err_gc_skipped = false,
+ .nr_free_secs = 1 };
+ f2fs_down_write(&sbi->gc_lock);
+ f2fs_gc(sbi, &gc_control);
}
}
@@ -933,9 +940,9 @@ static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS, true, NULL);
INIT_LIST_HEAD(&dc->list);
dc->bdev = bdev;
- dc->lstart = lstart;
- dc->start = start;
- dc->len = len;
+ dc->di.lstart = lstart;
+ dc->di.start = start;
+ dc->di.len = len;
dc->ref = 0;
dc->state = D_PREP;
dc->queued = 0;
@@ -950,20 +957,108 @@ static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
return dc;
}
-static struct discard_cmd *__attach_discard_cmd(struct f2fs_sb_info *sbi,
- struct block_device *bdev, block_t lstart,
- block_t start, block_t len,
- struct rb_node *parent, struct rb_node **p,
- bool leftmost)
+static bool f2fs_check_discard_tree(struct f2fs_sb_info *sbi)
+{
+#ifdef CONFIG_F2FS_CHECK_FS
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct rb_node *cur = rb_first_cached(&dcc->root), *next;
+ struct discard_cmd *cur_dc, *next_dc;
+
+ while (cur) {
+ next = rb_next(cur);
+ if (!next)
+ return true;
+
+ cur_dc = rb_entry(cur, struct discard_cmd, rb_node);
+ next_dc = rb_entry(next, struct discard_cmd, rb_node);
+
+ if (cur_dc->di.lstart + cur_dc->di.len > next_dc->di.lstart) {
+ f2fs_info(sbi, "broken discard_rbtree, "
+ "cur(%u, %u) next(%u, %u)",
+ cur_dc->di.lstart, cur_dc->di.len,
+ next_dc->di.lstart, next_dc->di.len);
+ return false;
+ }
+ cur = next;
+ }
+#endif
+ return true;
+}
+
+static struct discard_cmd *__lookup_discard_cmd(struct f2fs_sb_info *sbi,
+ block_t blkaddr)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct rb_node *node = dcc->root.rb_root.rb_node;
struct discard_cmd *dc;
- dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
+ while (node) {
+ dc = rb_entry(node, struct discard_cmd, rb_node);
- rb_link_node(&dc->rb_node, parent, p);
- rb_insert_color_cached(&dc->rb_node, &dcc->root, leftmost);
+ if (blkaddr < dc->di.lstart)
+ node = node->rb_left;
+ else if (blkaddr >= dc->di.lstart + dc->di.len)
+ node = node->rb_right;
+ else
+ return dc;
+ }
+ return NULL;
+}
+
+static struct discard_cmd *__lookup_discard_cmd_ret(struct rb_root_cached *root,
+ block_t blkaddr,
+ struct discard_cmd **prev_entry,
+ struct discard_cmd **next_entry,
+ struct rb_node ***insert_p,
+ struct rb_node **insert_parent)
+{
+ struct rb_node **pnode = &root->rb_root.rb_node;
+ struct rb_node *parent = NULL, *tmp_node;
+ struct discard_cmd *dc;
+
+ *insert_p = NULL;
+ *insert_parent = NULL;
+ *prev_entry = NULL;
+ *next_entry = NULL;
+
+ if (RB_EMPTY_ROOT(&root->rb_root))
+ return NULL;
+
+ while (*pnode) {
+ parent = *pnode;
+ dc = rb_entry(*pnode, struct discard_cmd, rb_node);
+
+ if (blkaddr < dc->di.lstart)
+ pnode = &(*pnode)->rb_left;
+ else if (blkaddr >= dc->di.lstart + dc->di.len)
+ pnode = &(*pnode)->rb_right;
+ else
+ goto lookup_neighbors;
+ }
+
+ *insert_p = pnode;
+ *insert_parent = parent;
+
+ dc = rb_entry(parent, struct discard_cmd, rb_node);
+ tmp_node = parent;
+ if (parent && blkaddr > dc->di.lstart)
+ tmp_node = rb_next(parent);
+ *next_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
+
+ tmp_node = parent;
+ if (parent && blkaddr < dc->di.lstart)
+ tmp_node = rb_prev(parent);
+ *prev_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
+ return NULL;
+lookup_neighbors:
+ /* lookup prev node for merging backward later */
+ tmp_node = rb_prev(&dc->rb_node);
+ *prev_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
+
+ /* lookup next node for merging frontward later */
+ tmp_node = rb_next(&dc->rb_node);
+ *next_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
return dc;
}
@@ -975,7 +1070,7 @@ static void __detach_discard_cmd(struct discard_cmd_control *dcc,
list_del(&dc->list);
rb_erase_cached(&dc->rb_node, &dcc->root);
- dcc->undiscard_blks -= dc->len;
+ dcc->undiscard_blks -= dc->di.len;
kmem_cache_free(discard_cmd_slab, dc);
@@ -988,7 +1083,7 @@ static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
unsigned long flags;
- trace_f2fs_remove_discard(dc->bdev, dc->start, dc->len);
+ trace_f2fs_remove_discard(dc->bdev, dc->di.start, dc->di.len);
spin_lock_irqsave(&dc->lock, flags);
if (dc->bio_ref) {
@@ -1006,7 +1101,7 @@ static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
printk_ratelimited(
"%sF2FS-fs (%s): Issue discard(%u, %u, %u) failed, ret: %d",
KERN_INFO, sbi->sb->s_id,
- dc->lstart, dc->start, dc->len, dc->error);
+ dc->di.lstart, dc->di.start, dc->di.len, dc->error);
__detach_discard_cmd(dcc, dc);
}
@@ -1122,14 +1217,14 @@ static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
return 0;
- trace_f2fs_issue_discard(bdev, dc->start, dc->len);
+ trace_f2fs_issue_discard(bdev, dc->di.start, dc->di.len);
- lstart = dc->lstart;
- start = dc->start;
- len = dc->len;
+ lstart = dc->di.lstart;
+ start = dc->di.start;
+ len = dc->di.len;
total_len = len;
- dc->len = 0;
+ dc->di.len = 0;
while (total_len && *issued < dpolicy->max_requests && !err) {
struct bio *bio = NULL;
@@ -1145,7 +1240,7 @@ static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
if (*issued == dpolicy->max_requests)
last = true;
- dc->len += len;
+ dc->di.len += len;
if (time_to_inject(sbi, FAULT_DISCARD)) {
err = -EIO;
@@ -1207,34 +1302,41 @@ static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
return err;
}
-static void __insert_discard_tree(struct f2fs_sb_info *sbi,
+static void __insert_discard_cmd(struct f2fs_sb_info *sbi,
struct block_device *bdev, block_t lstart,
- block_t start, block_t len,
- struct rb_node **insert_p,
- struct rb_node *insert_parent)
+ block_t start, block_t len)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
- struct rb_node **p;
+ struct rb_node **p = &dcc->root.rb_root.rb_node;
struct rb_node *parent = NULL;
+ struct discard_cmd *dc;
bool leftmost = true;
- if (insert_p && insert_parent) {
- parent = insert_parent;
- p = insert_p;
- goto do_insert;
+ /* look up rb tree to find parent node */
+ while (*p) {
+ parent = *p;
+ dc = rb_entry(parent, struct discard_cmd, rb_node);
+
+ if (lstart < dc->di.lstart) {
+ p = &(*p)->rb_left;
+ } else if (lstart >= dc->di.lstart + dc->di.len) {
+ p = &(*p)->rb_right;
+ leftmost = false;
+ } else {
+ f2fs_bug_on(sbi, 1);
+ }
}
- p = f2fs_lookup_rb_tree_for_insert(sbi, &dcc->root, &parent,
- lstart, &leftmost);
-do_insert:
- __attach_discard_cmd(sbi, bdev, lstart, start, len, parent,
- p, leftmost);
+ dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
+
+ rb_link_node(&dc->rb_node, parent, p);
+ rb_insert_color_cached(&dc->rb_node, &dcc->root, leftmost);
}
static void __relocate_discard_cmd(struct discard_cmd_control *dcc,
struct discard_cmd *dc)
{
- list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->len)]);
+ list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->di.len)]);
}
static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
@@ -1244,7 +1346,7 @@ static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
struct discard_info di = dc->di;
bool modified = false;
- if (dc->state == D_DONE || dc->len == 1) {
+ if (dc->state == D_DONE || dc->di.len == 1) {
__remove_discard_cmd(sbi, dc);
return;
}
@@ -1252,23 +1354,22 @@ static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
dcc->undiscard_blks -= di.len;
if (blkaddr > di.lstart) {
- dc->len = blkaddr - dc->lstart;
- dcc->undiscard_blks += dc->len;
+ dc->di.len = blkaddr - dc->di.lstart;
+ dcc->undiscard_blks += dc->di.len;
__relocate_discard_cmd(dcc, dc);
modified = true;
}
if (blkaddr < di.lstart + di.len - 1) {
if (modified) {
- __insert_discard_tree(sbi, dc->bdev, blkaddr + 1,
+ __insert_discard_cmd(sbi, dc->bdev, blkaddr + 1,
di.start + blkaddr + 1 - di.lstart,
- di.lstart + di.len - 1 - blkaddr,
- NULL, NULL);
+ di.lstart + di.len - 1 - blkaddr);
} else {
- dc->lstart++;
- dc->len--;
- dc->start++;
- dcc->undiscard_blks += dc->len;
+ dc->di.lstart++;
+ dc->di.len--;
+ dc->di.start++;
+ dcc->undiscard_blks += dc->di.len;
__relocate_discard_cmd(dcc, dc);
}
}
@@ -1287,17 +1388,14 @@ static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
SECTOR_TO_BLOCK(bdev_max_discard_sectors(bdev));
block_t end = lstart + len;
- dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
- NULL, lstart,
- (struct rb_entry **)&prev_dc,
- (struct rb_entry **)&next_dc,
- &insert_p, &insert_parent, true, NULL);
+ dc = __lookup_discard_cmd_ret(&dcc->root, lstart,
+ &prev_dc, &next_dc, &insert_p, &insert_parent);
if (dc)
prev_dc = dc;
if (!prev_dc) {
di.lstart = lstart;
- di.len = next_dc ? next_dc->lstart - lstart : len;
+ di.len = next_dc ? next_dc->di.lstart - lstart : len;
di.len = min(di.len, len);
di.start = start;
}
@@ -1308,16 +1406,16 @@ static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
struct discard_cmd *tdc = NULL;
if (prev_dc) {
- di.lstart = prev_dc->lstart + prev_dc->len;
+ di.lstart = prev_dc->di.lstart + prev_dc->di.len;
if (di.lstart < lstart)
di.lstart = lstart;
if (di.lstart >= end)
break;
- if (!next_dc || next_dc->lstart > end)
+ if (!next_dc || next_dc->di.lstart > end)
di.len = end - di.lstart;
else
- di.len = next_dc->lstart - di.lstart;
+ di.len = next_dc->di.lstart - di.lstart;
di.start = start + di.lstart - lstart;
}
@@ -1350,10 +1448,9 @@ static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
merged = true;
}
- if (!merged) {
- __insert_discard_tree(sbi, bdev, di.lstart, di.start,
- di.len, NULL, NULL);
- }
+ if (!merged)
+ __insert_discard_cmd(sbi, bdev,
+ di.lstart, di.start, di.len);
next:
prev_dc = next_dc;
if (!prev_dc)
@@ -1392,15 +1489,11 @@ static void __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi,
struct rb_node **insert_p = NULL, *insert_parent = NULL;
struct discard_cmd *dc;
struct blk_plug plug;
- unsigned int pos = dcc->next_pos;
bool io_interrupted = false;
mutex_lock(&dcc->cmd_lock);
- dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
- NULL, pos,
- (struct rb_entry **)&prev_dc,
- (struct rb_entry **)&next_dc,
- &insert_p, &insert_parent, true, NULL);
+ dc = __lookup_discard_cmd_ret(&dcc->root, dcc->next_pos,
+ &prev_dc, &next_dc, &insert_p, &insert_parent);
if (!dc)
dc = next_dc;
@@ -1418,7 +1511,7 @@ static void __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi,
break;
}
- dcc->next_pos = dc->lstart + dc->len;
+ dcc->next_pos = dc->di.lstart + dc->di.len;
err = __submit_discard_cmd(sbi, dpolicy, dc, issued);
if (*issued >= dpolicy->max_requests)
@@ -1477,8 +1570,7 @@ retry:
if (list_empty(pend_list))
goto next;
if (unlikely(dcc->rbtree_check))
- f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
- &dcc->root, false));
+ f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi));
blk_start_plug(&plug);
list_for_each_entry_safe(dc, tmp, pend_list, list) {
f2fs_bug_on(sbi, dc->state != D_PREP);
@@ -1556,7 +1648,7 @@ static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi,
dc->ref--;
if (!dc->ref) {
if (!dc->error)
- len = dc->len;
+ len = dc->di.len;
__remove_discard_cmd(sbi, dc);
}
mutex_unlock(&dcc->cmd_lock);
@@ -1579,14 +1671,15 @@ next:
mutex_lock(&dcc->cmd_lock);
list_for_each_entry_safe(iter, tmp, wait_list, list) {
- if (iter->lstart + iter->len <= start || end <= iter->lstart)
+ if (iter->di.lstart + iter->di.len <= start ||
+ end <= iter->di.lstart)
continue;
- if (iter->len < dpolicy->granularity)
+ if (iter->di.len < dpolicy->granularity)
continue;
if (iter->state == D_DONE && !iter->ref) {
wait_for_completion_io(&iter->wait);
if (!iter->error)
- trimmed += iter->len;
+ trimmed += iter->di.len;
__remove_discard_cmd(sbi, iter);
} else {
iter->ref++;
@@ -1630,8 +1723,7 @@ static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
bool need_wait = false;
mutex_lock(&dcc->cmd_lock);
- dc = (struct discard_cmd *)f2fs_lookup_rb_tree(&dcc->root,
- NULL, blkaddr);
+ dc = __lookup_discard_cmd(sbi, blkaddr);
if (dc) {
if (dc->state == D_PREP) {
__punch_discard_cmd(sbi, dc, blkaddr);
@@ -1760,6 +1852,7 @@ static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
sector_t sector, nr_sects;
block_t lblkstart = blkstart;
int devi = 0;
+ u64 remainder = 0;
if (f2fs_is_multi_device(sbi)) {
devi = f2fs_target_device_index(sbi, blkstart);
@@ -1775,9 +1868,9 @@ static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
if (f2fs_blkz_is_seq(sbi, devi, blkstart)) {
sector = SECTOR_FROM_BLOCK(blkstart);
nr_sects = SECTOR_FROM_BLOCK(blklen);
+ div64_u64_rem(sector, bdev_zone_sectors(bdev), &remainder);
- if (sector & (bdev_zone_sectors(bdev) - 1) ||
- nr_sects != bdev_zone_sectors(bdev)) {
+ if (remainder || nr_sects != bdev_zone_sectors(bdev)) {
f2fs_err(sbi, "(%d) %s: Unaligned zone reset attempted (block %x + %x)",
devi, sbi->s_ndevs ? FDEV(devi).path : "",
blkstart, blklen);
@@ -1982,9 +2075,11 @@ void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
if (force && start >= cpc->trim_start &&
(end - 1) <= cpc->trim_end)
- continue;
+ continue;
- if (!f2fs_lfs_mode(sbi) || !__is_large_section(sbi)) {
+ /* Should cover 2MB zoned device for zone-based reset */
+ if (!f2fs_sb_has_blkzoned(sbi) &&
+ (!f2fs_lfs_mode(sbi) || !__is_large_section(sbi))) {
f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
(end - start) << sbi->log_blocks_per_seg);
continue;
@@ -2787,7 +2882,6 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
int alloc_mode, unsigned long long age)
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
- const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
unsigned segno = NULL_SEGNO;
unsigned short seg_type = curseg->seg_type;
int i, cnt;
@@ -2796,7 +2890,7 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
sanity_check_seg_type(sbi, seg_type);
/* f2fs_need_SSR() already forces to do this */
- if (!v_ops->get_victim(sbi, &segno, BG_GC, seg_type, alloc_mode, age)) {
+ if (!f2fs_get_victim(sbi, &segno, BG_GC, seg_type, alloc_mode, age)) {
curseg->next_segno = segno;
return 1;
}
@@ -2823,7 +2917,7 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
for (; cnt-- > 0; reversed ? i-- : i++) {
if (i == seg_type)
continue;
- if (!v_ops->get_victim(sbi, &segno, BG_GC, i, alloc_mode, age)) {
+ if (!f2fs_get_victim(sbi, &segno, BG_GC, i, alloc_mode, age)) {
curseg->next_segno = segno;
return 1;
}
@@ -2964,24 +3058,20 @@ next:
mutex_lock(&dcc->cmd_lock);
if (unlikely(dcc->rbtree_check))
- f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
- &dcc->root, false));
-
- dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
- NULL, start,
- (struct rb_entry **)&prev_dc,
- (struct rb_entry **)&next_dc,
- &insert_p, &insert_parent, true, NULL);
+ f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi));
+
+ dc = __lookup_discard_cmd_ret(&dcc->root, start,
+ &prev_dc, &next_dc, &insert_p, &insert_parent);
if (!dc)
dc = next_dc;
blk_start_plug(&plug);
- while (dc && dc->lstart <= end) {
+ while (dc && dc->di.lstart <= end) {
struct rb_node *node;
int err = 0;
- if (dc->len < dpolicy->granularity)
+ if (dc->di.len < dpolicy->granularity)
goto skip;
if (dc->state != D_PREP) {
@@ -2992,7 +3082,7 @@ next:
err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
if (issued >= dpolicy->max_requests) {
- start = dc->lstart + dc->len;
+ start = dc->di.lstart + dc->di.len;
if (err)
__remove_discard_cmd(sbi, dc);
@@ -4859,9 +4949,8 @@ static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
"New zone for curseg[%d] is not yet discarded. "
"Reset the zone: curseg[0x%x,0x%x]",
type, cs->segno, cs->next_blkoff);
- err = __f2fs_issue_discard_zone(sbi, zbd->bdev,
- zone_sector >> log_sectors_per_block,
- zone.len >> log_sectors_per_block);
+ err = __f2fs_issue_discard_zone(sbi, zbd->bdev, cs_zone_block,
+ zone.len >> log_sectors_per_block);
if (err) {
f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
zbd->path, err);
@@ -4920,48 +5009,6 @@ int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
return 0;
}
-static bool is_conv_zone(struct f2fs_sb_info *sbi, unsigned int zone_idx,
- unsigned int dev_idx)
-{
- if (!bdev_is_zoned(FDEV(dev_idx).bdev))
- return true;
- return !test_bit(zone_idx, FDEV(dev_idx).blkz_seq);
-}
-
-/* Return the zone index in the given device */
-static unsigned int get_zone_idx(struct f2fs_sb_info *sbi, unsigned int secno,
- int dev_idx)
-{
- block_t sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
-
- return (sec_start_blkaddr - FDEV(dev_idx).start_blk) >>
- sbi->log_blocks_per_blkz;
-}
-
-/*
- * Return the usable segments in a section based on the zone's
- * corresponding zone capacity. Zone is equal to a section.
- */
-static inline unsigned int f2fs_usable_zone_segs_in_sec(
- struct f2fs_sb_info *sbi, unsigned int segno)
-{
- unsigned int dev_idx, zone_idx;
-
- dev_idx = f2fs_target_device_index(sbi, START_BLOCK(sbi, segno));
- zone_idx = get_zone_idx(sbi, GET_SEC_FROM_SEG(sbi, segno), dev_idx);
-
- /* Conventional zone's capacity is always equal to zone size */
- if (is_conv_zone(sbi, zone_idx, dev_idx))
- return sbi->segs_per_sec;
-
- if (!sbi->unusable_blocks_per_sec)
- return sbi->segs_per_sec;
-
- /* Get the segment count beyond zone capacity block */
- return sbi->segs_per_sec - (sbi->unusable_blocks_per_sec >>
- sbi->log_blocks_per_seg);
-}
-
/*
* Return the number of usable blocks in a segment. The number of blocks
* returned is always equal to the number of blocks in a segment for
@@ -4974,23 +5021,13 @@ static inline unsigned int f2fs_usable_zone_blks_in_seg(
struct f2fs_sb_info *sbi, unsigned int segno)
{
block_t seg_start, sec_start_blkaddr, sec_cap_blkaddr;
- unsigned int zone_idx, dev_idx, secno;
-
- secno = GET_SEC_FROM_SEG(sbi, segno);
- seg_start = START_BLOCK(sbi, segno);
- dev_idx = f2fs_target_device_index(sbi, seg_start);
- zone_idx = get_zone_idx(sbi, secno, dev_idx);
-
- /*
- * Conventional zone's capacity is always equal to zone size,
- * so, blocks per segment is unchanged.
- */
- if (is_conv_zone(sbi, zone_idx, dev_idx))
- return sbi->blocks_per_seg;
+ unsigned int secno;
if (!sbi->unusable_blocks_per_sec)
return sbi->blocks_per_seg;
+ secno = GET_SEC_FROM_SEG(sbi, segno);
+ seg_start = START_BLOCK(sbi, segno);
sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
sec_cap_blkaddr = sec_start_blkaddr + CAP_BLKS_PER_SEC(sbi);
@@ -5024,11 +5061,6 @@ static inline unsigned int f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info *sbi
return 0;
}
-static inline unsigned int f2fs_usable_zone_segs_in_sec(struct f2fs_sb_info *sbi,
- unsigned int segno)
-{
- return 0;
-}
#endif
unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
unsigned int segno)
@@ -5043,7 +5075,7 @@ unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
unsigned int segno)
{
if (f2fs_sb_has_blkzoned(sbi))
- return f2fs_usable_zone_segs_in_sec(sbi, segno);
+ return CAP_SEGS_PER_SEC(sbi);
return sbi->segs_per_sec;
}