summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorBaolin Wang <baolin.wang@linux.alibaba.com>2021-01-28 13:58:15 +0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2021-02-13 13:52:56 +0100
commit513fee2aee13cc4fd92dbeca0004581860c0ed26 (patch)
tree17fdb5b578f715ccd39b82a2968fdafec5f29a54 /block
parentd1eb41833408f8f5d980ebcc37b15fd8e14632d9 (diff)
downloadlinux-rt-513fee2aee13cc4fd92dbeca0004581860c0ed26.tar.gz
blk-cgroup: Use cond_resched() when destroy blkgs
[ Upstream commit 6c635caef410aa757befbd8857c1eadde5cc22ed ] On !PREEMPT kernel, we can get below softlockup when doing stress testing with creating and destroying block cgroup repeatly. The reason is it may take a long time to acquire the queue's lock in the loop of blkcg_destroy_blkgs(), or the system can accumulate a huge number of blkgs in pathological cases. We can add a need_resched() check on each loop and release locks and do cond_resched() if true to avoid this issue, since the blkcg_destroy_blkgs() is not called from atomic contexts. [ 4757.010308] watchdog: BUG: soft lockup - CPU#11 stuck for 94s! [ 4757.010698] Call trace: [ 4757.010700]  blkcg_destroy_blkgs+0x68/0x150 [ 4757.010701]  cgwb_release_workfn+0x104/0x158 [ 4757.010702]  process_one_work+0x1bc/0x3f0 [ 4757.010704]  worker_thread+0x164/0x468 [ 4757.010705]  kthread+0x108/0x138 Suggested-by: Tejun Heo <tj@kernel.org> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com> Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c18
1 files changed, 13 insertions, 5 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 3d34ac02d76e..cb3d44d20005 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1089,6 +1089,8 @@ static void blkcg_css_offline(struct cgroup_subsys_state *css)
*/
void blkcg_destroy_blkgs(struct blkcg *blkcg)
{
+ might_sleep();
+
spin_lock_irq(&blkcg->lock);
while (!hlist_empty(&blkcg->blkg_list)) {
@@ -1096,14 +1098,20 @@ void blkcg_destroy_blkgs(struct blkcg *blkcg)
struct blkcg_gq, blkcg_node);
struct request_queue *q = blkg->q;
- if (spin_trylock(&q->queue_lock)) {
- blkg_destroy(blkg);
- spin_unlock(&q->queue_lock);
- } else {
+ if (need_resched() || !spin_trylock(&q->queue_lock)) {
+ /*
+ * Given that the system can accumulate a huge number
+ * of blkgs in pathological cases, check to see if we
+ * need to rescheduling to avoid softlockup.
+ */
spin_unlock_irq(&blkcg->lock);
- cpu_relax();
+ cond_resched();
spin_lock_irq(&blkcg->lock);
+ continue;
}
+
+ blkg_destroy(blkg);
+ spin_unlock(&q->queue_lock);
}
spin_unlock_irq(&blkcg->lock);