workqueue: lock cwq access in drain_workqueue
commit fa2563e41c3d6d6e8af437643981ed28ae0cb56d upstream. Take cwq->gcwq->lock to avoid racing between drain_workqueue checking to make sure the workqueues are empty and cwq_dec_nr_in_flight decrementing and then incrementing nr_active when it activates a delayed work. We discovered this when a corner case in one of our drivers resulted in us trying to destroy a workqueue in which the remaining work would always requeue itself again in the same workqueue. We would hit this race condition and trip the BUG_ON on workqueue.c:3080. Signed-off-by: Thomas Tuttle <ttuttle@chromium.org> Acked-by: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
committed by
Greg Kroah-Hartman
parent
c780713f78
commit
d5b1a08d0d
@@ -3026,8 +3026,13 @@ reflush:
|
||||
|
||||
for_each_cwq_cpu(cpu, wq) {
|
||||
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
|
||||
bool drained;
|
||||
|
||||
if (!cwq->nr_active && list_empty(&cwq->delayed_works))
|
||||
spin_lock_irq(&cwq->gcwq->lock);
|
||||
drained = !cwq->nr_active && list_empty(&cwq->delayed_works);
|
||||
spin_unlock_irq(&cwq->gcwq->lock);
|
||||
|
||||
if (drained)
|
||||
continue;
|
||||
|
||||
if (++flush_cnt == 10 ||
|
||||
|
||||
Reference in New Issue
Block a user