|
@@ -137,6 +137,7 @@ struct worker {
|
|
|
};
|
|
|
|
|
|
struct work_struct *current_work; /* L: work being processed */
|
|
|
+ work_func_t current_func; /* L: current_work's fn */
|
|
|
struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
|
|
|
struct list_head scheduled; /* L: scheduled works */
|
|
|
struct task_struct *task; /* I: worker task */
|
|
@@ -861,9 +862,27 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
|
|
|
* @gcwq: gcwq of interest
|
|
|
* @work: work to find worker for
|
|
|
*
|
|
|
- * Find a worker which is executing @work on @gcwq. This function is
|
|
|
- * identical to __find_worker_executing_work() except that this
|
|
|
- * function calculates @bwh itself.
|
|
|
+ * Find a worker which is executing @work on @gcwq by searching
|
|
|
+ * @gcwq->busy_hash which is keyed by the address of @work. For a worker
|
|
|
+ * to match, its current execution should match the address of @work and
|
|
|
+ * its work function. This is to avoid unwanted dependency between
|
|
|
+ * unrelated work executions through a work item being recycled while still
|
|
|
+ * being executed.
|
|
|
+ *
|
|
|
+ * This is a bit tricky. A work item may be freed once its execution
|
|
|
+ * starts and nothing prevents the freed area from being recycled for
|
|
|
+ * another work item. If the same work item address ends up being reused
|
|
|
+ * before the original execution finishes, workqueue will identify the
|
|
|
+ * recycled work item as currently executing and make it wait until the
|
|
|
+ * current execution finishes, introducing an unwanted dependency.
|
|
|
+ *
|
|
|
+ * This function checks the work item address, work function and workqueue
|
|
|
+ * to avoid false positives. Note that this isn't complete as one may
|
|
|
+ * construct a work function which can introduce dependency onto itself
|
|
|
+ * through a recycled work item. Well, if somebody wants to shoot oneself
|
|
|
+ * in the foot that badly, there's only so much we can do, and if such
|
|
|
+ * deadlock actually occurs, it should be easy to locate the culprit work
|
|
|
+ * function.
|
|
|
*
|
|
|
* CONTEXT:
|
|
|
* spin_lock_irq(gcwq->lock).
|
|
@@ -878,8 +897,10 @@ static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
|
|
|
struct worker *worker;
|
|
|
struct hlist_node *tmp;
|
|
|
|
|
|
- hash_for_each_possible(gcwq->busy_hash, worker, tmp, hentry, (unsigned long)work)
|
|
|
- if (worker->current_work == work)
|
|
|
+ hash_for_each_possible(gcwq->busy_hash, worker, tmp, hentry,
|
|
|
+ (unsigned long)work)
|
|
|
+ if (worker->current_work == work &&
|
|
|
+ worker->current_func == work->func)
|
|
|
return worker;
|
|
|
|
|
|
return NULL;
|
|
@@ -2114,7 +2135,6 @@ __acquires(&gcwq->lock)
|
|
|
struct worker_pool *pool = worker->pool;
|
|
|
struct global_cwq *gcwq = pool->gcwq;
|
|
|
bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
|
|
|
- work_func_t f = work->func;
|
|
|
int work_color;
|
|
|
struct worker *collision;
|
|
|
#ifdef CONFIG_LOCKDEP
|
|
@@ -2154,6 +2174,7 @@ __acquires(&gcwq->lock)
|
|
|
debug_work_deactivate(work);
|
|
|
hash_add(gcwq->busy_hash, &worker->hentry, (unsigned long)worker);
|
|
|
worker->current_work = work;
|
|
|
+ worker->current_func = work->func;
|
|
|
worker->current_cwq = cwq;
|
|
|
work_color = get_work_color(work);
|
|
|
|
|
@@ -2186,7 +2207,7 @@ __acquires(&gcwq->lock)
|
|
|
lock_map_acquire_read(&cwq->wq->lockdep_map);
|
|
|
lock_map_acquire(&lockdep_map);
|
|
|
trace_workqueue_execute_start(work);
|
|
|
- f(work);
|
|
|
+ worker->current_func(work);
|
|
|
/*
|
|
|
* While we must be careful to not use "work" after this, the trace
|
|
|
* point will only record its address.
|
|
@@ -2198,7 +2219,8 @@ __acquires(&gcwq->lock)
|
|
|
if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
|
|
|
pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
|
|
|
" last function: %pf\n",
|
|
|
- current->comm, preempt_count(), task_pid_nr(current), f);
|
|
|
+ current->comm, preempt_count(), task_pid_nr(current),
|
|
|
+ worker->current_func);
|
|
|
debug_show_held_locks(current);
|
|
|
dump_stack();
|
|
|
}
|
|
@@ -2212,6 +2234,7 @@ __acquires(&gcwq->lock)
|
|
|
/* we're done with it, release */
|
|
|
hash_del(&worker->hentry);
|
|
|
worker->current_work = NULL;
|
|
|
+ worker->current_func = NULL;
|
|
|
worker->current_cwq = NULL;
|
|
|
cwq_dec_nr_in_flight(cwq, work_color);
|
|
|
}
|