|
@@ -61,7 +61,6 @@ struct acpi_os_dpc {
|
|
|
acpi_osd_exec_callback function;
|
|
|
void *context;
|
|
|
struct work_struct work;
|
|
|
- int wait;
|
|
|
};
|
|
|
|
|
|
#ifdef CONFIG_ACPI_CUSTOM_DSDT
|
|
@@ -1067,9 +1066,6 @@ static void acpi_os_execute_deferred(struct work_struct *work)
|
|
|
{
|
|
|
struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
|
|
|
|
|
|
- if (dpc->wait)
|
|
|
- acpi_os_wait_events_complete();
|
|
|
-
|
|
|
dpc->function(dpc->context);
|
|
|
kfree(dpc);
|
|
|
}
|
|
@@ -1089,8 +1085,8 @@ static void acpi_os_execute_deferred(struct work_struct *work)
|
|
|
*
|
|
|
******************************************************************************/
|
|
|
|
|
|
-static acpi_status __acpi_os_execute(acpi_execute_type type,
|
|
|
- acpi_osd_exec_callback function, void *context, int hp)
|
|
|
+acpi_status acpi_os_execute(acpi_execute_type type,
|
|
|
+ acpi_osd_exec_callback function, void *context)
|
|
|
{
|
|
|
acpi_status status = AE_OK;
|
|
|
struct acpi_os_dpc *dpc;
|
|
@@ -1117,20 +1113,11 @@ static acpi_status __acpi_os_execute(acpi_execute_type type,
|
|
|
dpc->context = context;
|
|
|
|
|
|
/*
|
|
|
- * We can't run hotplug code in keventd_wq/kacpid_wq/kacpid_notify_wq
|
|
|
- * because the hotplug code may call driver .remove() functions,
|
|
|
- * which invoke flush_scheduled_work/acpi_os_wait_events_complete
|
|
|
- * to flush these workqueues.
|
|
|
- *
|
|
|
* To prevent lockdep from complaining unnecessarily, make sure that
|
|
|
* there is a different static lockdep key for each workqueue by using
|
|
|
* INIT_WORK() for each of them separately.
|
|
|
*/
|
|
|
- if (hp) {
|
|
|
- queue = kacpi_hotplug_wq;
|
|
|
- dpc->wait = 1;
|
|
|
- INIT_WORK(&dpc->work, acpi_os_execute_deferred);
|
|
|
- } else if (type == OSL_NOTIFY_HANDLER) {
|
|
|
+ if (type == OSL_NOTIFY_HANDLER) {
|
|
|
queue = kacpi_notify_wq;
|
|
|
INIT_WORK(&dpc->work, acpi_os_execute_deferred);
|
|
|
} else {
|
|
@@ -1155,28 +1142,59 @@ static acpi_status __acpi_os_execute(acpi_execute_type type,
|
|
|
}
|
|
|
return status;
|
|
|
}
|
|
|
+EXPORT_SYMBOL(acpi_os_execute);
|
|
|
|
|
|
-acpi_status acpi_os_execute(acpi_execute_type type,
|
|
|
- acpi_osd_exec_callback function, void *context)
|
|
|
+void acpi_os_wait_events_complete(void)
|
|
|
{
|
|
|
- return __acpi_os_execute(type, function, context, 0);
|
|
|
+ flush_workqueue(kacpid_wq);
|
|
|
+ flush_workqueue(kacpi_notify_wq);
|
|
|
}
|
|
|
-EXPORT_SYMBOL(acpi_os_execute);
|
|
|
|
|
|
-acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function,
|
|
|
- void *context)
|
|
|
+struct acpi_hp_work {
|
|
|
+ struct work_struct work;
|
|
|
+ acpi_hp_callback func;
|
|
|
+ void *data;
|
|
|
+ u32 src;
|
|
|
+};
|
|
|
+
|
|
|
+static void acpi_hotplug_work_fn(struct work_struct *work)
|
|
|
{
|
|
|
- return __acpi_os_execute(0, function, context, 1);
|
|
|
+ struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work);
|
|
|
+
|
|
|
+ acpi_os_wait_events_complete();
|
|
|
+ hpw->func(hpw->data, hpw->src);
|
|
|
+ kfree(hpw);
|
|
|
}
|
|
|
-EXPORT_SYMBOL(acpi_os_hotplug_execute);
|
|
|
|
|
|
-void acpi_os_wait_events_complete(void)
|
|
|
+acpi_status acpi_hotplug_execute(acpi_hp_callback func, void *data, u32 src)
|
|
|
{
|
|
|
- flush_workqueue(kacpid_wq);
|
|
|
- flush_workqueue(kacpi_notify_wq);
|
|
|
+ struct acpi_hp_work *hpw;
|
|
|
+
|
|
|
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
|
|
|
+ "Scheduling function [%p(%p, %u)] for deferred execution.\n",
|
|
|
+ func, data, src));
|
|
|
+
|
|
|
+ hpw = kmalloc(sizeof(*hpw), GFP_KERNEL);
|
|
|
+ if (!hpw)
|
|
|
+ return AE_NO_MEMORY;
|
|
|
+
|
|
|
+ INIT_WORK(&hpw->work, acpi_hotplug_work_fn);
|
|
|
+ hpw->func = func;
|
|
|
+ hpw->data = data;
|
|
|
+ hpw->src = src;
|
|
|
+ /*
|
|
|
+ * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because
|
|
|
+ * the hotplug code may call driver .remove() functions, which may
|
|
|
+ * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush
|
|
|
+ * these workqueues.
|
|
|
+ */
|
|
|
+ if (!queue_work(kacpi_hotplug_wq, &hpw->work)) {
|
|
|
+ kfree(hpw);
|
|
|
+ return AE_ERROR;
|
|
|
+ }
|
|
|
+ return AE_OK;
|
|
|
}
|
|
|
|
|
|
-EXPORT_SYMBOL(acpi_os_wait_events_complete);
|
|
|
|
|
|
acpi_status
|
|
|
acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
|
|
@@ -1825,25 +1843,3 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
|
|
|
{
|
|
|
__acpi_os_prepare_extended_sleep = func;
|
|
|
}
|
|
|
-
|
|
|
-
|
|
|
-void alloc_acpi_hp_work(acpi_handle handle, u32 type, void *context,
|
|
|
- void (*func)(struct work_struct *work))
|
|
|
-{
|
|
|
- struct acpi_hp_work *hp_work;
|
|
|
- int ret;
|
|
|
-
|
|
|
- hp_work = kmalloc(sizeof(*hp_work), GFP_KERNEL);
|
|
|
- if (!hp_work)
|
|
|
- return;
|
|
|
-
|
|
|
- hp_work->handle = handle;
|
|
|
- hp_work->type = type;
|
|
|
- hp_work->context = context;
|
|
|
-
|
|
|
- INIT_WORK(&hp_work->work, func);
|
|
|
- ret = queue_work(kacpi_hotplug_wq, &hp_work->work);
|
|
|
- if (!ret)
|
|
|
- kfree(hp_work);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(alloc_acpi_hp_work);
|