Skip to content

Commit a20135f

Browse files
htejunaxboe
authored andcommitted
writeback: don't drain bdi_writeback_congested on bdi destruction
52ebea7 ("writeback: make backing_dev_info host cgroup-specific bdi_writebacks") made bdi (backing_dev_info) host per-cgroup wb's (bdi_writeback's). As the congested state needs to be per-wb and referenced from blkcg side and multiple wbs, the patch made all non-root cong's (bdi_writeback_congested's) reference counted and indexed on bdi. When a bdi is destroyed, cgwb_bdi_destroy() tries to drain all non-root cong's; however, this can hang indefinitely because wb's can also be referenced from blkcg_gq's which are destroyed after bdi destruction is complete. This patch fixes the bug by updating bdi destruction to not wait for cong's to drain. A cong is unlinked from bdi->cgwb_congested_tree on bdi destuction regardless of its reference count as the bdi may go away any point after destruction. wb_congested_put() checks whether the cong is already unlinked on release. Signed-off-by: Tejun Heo <[email protected]> Reported-by: Jon Christopherson <[email protected]> Link: https://bugzilla.kernel.org/show_bug.cgi?id=100681 Fixes: 52ebea7 ("writeback: make backing_dev_info host cgroup-specific bdi_writebacks") Tested-by: Jon Christopherson <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent a13f35e commit a20135f

File tree

1 file changed

+16
-6
lines changed

1 file changed

+16
-6
lines changed

mm/backing-dev.c

Lines changed: 16 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -425,7 +425,6 @@ wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
425425
new_congested = NULL;
426426
rb_link_node(&congested->rb_node, parent, node);
427427
rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree);
428-
atomic_inc(&bdi->usage_cnt);
429428
goto found;
430429
}
431430

@@ -456,7 +455,6 @@ wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
456455
*/
457456
void wb_congested_put(struct bdi_writeback_congested *congested)
458457
{
459-
struct backing_dev_info *bdi = congested->bdi;
460458
unsigned long flags;
461459

462460
local_irq_save(flags);
@@ -465,12 +463,15 @@ void wb_congested_put(struct bdi_writeback_congested *congested)
465463
return;
466464
}
467465

468-
rb_erase(&congested->rb_node, &congested->bdi->cgwb_congested_tree);
466+
/* bdi might already have been destroyed leaving @congested unlinked */
467+
if (congested->bdi) {
468+
rb_erase(&congested->rb_node,
469+
&congested->bdi->cgwb_congested_tree);
470+
congested->bdi = NULL;
471+
}
472+
469473
spin_unlock_irqrestore(&cgwb_lock, flags);
470474
kfree(congested);
471-
472-
if (atomic_dec_and_test(&bdi->usage_cnt))
473-
wake_up_all(&cgwb_release_wait);
474475
}
475476

476477
static void cgwb_release_workfn(struct work_struct *work)
@@ -675,13 +676,22 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi)
675676
static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
676677
{
677678
struct radix_tree_iter iter;
679+
struct bdi_writeback_congested *congested, *congested_n;
678680
void **slot;
679681

680682
WARN_ON(test_bit(WB_registered, &bdi->wb.state));
681683

682684
spin_lock_irq(&cgwb_lock);
685+
683686
radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
684687
cgwb_kill(*slot);
688+
689+
rbtree_postorder_for_each_entry_safe(congested, congested_n,
690+
&bdi->cgwb_congested_tree, rb_node) {
691+
rb_erase(&congested->rb_node, &bdi->cgwb_congested_tree);
692+
congested->bdi = NULL; /* mark @congested unlinked */
693+
}
694+
685695
spin_unlock_irq(&cgwb_lock);
686696

687697
/*

0 commit comments

Comments
 (0)