|
@@ -667,21 +667,38 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
|
|
|
int schedule_on_each_cpu(work_func_t func)
|
|
|
{
|
|
|
int cpu;
|
|
|
+ int orig = -1;
|
|
|
struct work_struct *works;
|
|
|
|
|
|
works = alloc_percpu(struct work_struct);
|
|
|
if (!works)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
+ /*
|
|
|
+ * when running in keventd don't schedule a work item on itself.
|
|
|
+ * Can just call directly because the work queue is already bound.
|
|
|
+ * This also is faster.
|
|
|
+ * Make this a generic parameter for other workqueues?
|
|
|
+ */
|
|
|
+ if (current_is_keventd()) {
|
|
|
+ orig = raw_smp_processor_id();
|
|
|
+ INIT_WORK(per_cpu_ptr(works, orig), func);
|
|
|
+ func(per_cpu_ptr(works, orig));
|
|
|
+ }
|
|
|
+
|
|
|
get_online_cpus();
|
|
|
for_each_online_cpu(cpu) {
|
|
|
struct work_struct *work = per_cpu_ptr(works, cpu);
|
|
|
|
|
|
+ if (cpu == orig)
|
|
|
+ continue;
|
|
|
INIT_WORK(work, func);
|
|
|
schedule_work_on(cpu, work);
|
|
|
}
|
|
|
- for_each_online_cpu(cpu)
|
|
|
- flush_work(per_cpu_ptr(works, cpu));
|
|
|
+ for_each_online_cpu(cpu) {
|
|
|
+ if (cpu != orig)
|
|
|
+ flush_work(per_cpu_ptr(works, cpu));
|
|
|
+ }
|
|
|
put_online_cpus();
|
|
|
free_percpu(works);
|
|
|
return 0;
|