forked from Minki/linux
ACPI: Defer enabling of level GPE until all pending notifies done
Level GPE should not be enabled until all work caused by it is done, e.g. all Notify() methods are completed. This can be accomplished by appending enable_gpe function to the end of notify queue. Signed-off-by: Alexey Starikovskiy <astarikovskiy@suse.de> Acked-by: Shaohua Li <shaohua.li@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
This commit is contained in:
parent
f194d132e4
commit
17bc54eef9
@ -501,6 +501,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
|
||||
* an interrupt handler.
|
||||
*
|
||||
******************************************************************************/
|
||||
static void acpi_ev_asynch_enable_gpe(void *context);
|
||||
|
||||
static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
|
||||
{
|
||||
@ -576,22 +577,30 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
|
||||
method_node)));
|
||||
}
|
||||
}
|
||||
/* Defer enabling of GPE until all notify handlers are done */
|
||||
acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_asynch_enable_gpe,
|
||||
gpe_event_info);
|
||||
return_VOID;
|
||||
}
|
||||
|
||||
if ((local_gpe_event_info.flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
|
||||
static void acpi_ev_asynch_enable_gpe(void *context)
|
||||
{
|
||||
struct acpi_gpe_event_info *gpe_event_info = context;
|
||||
acpi_status status;
|
||||
if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
|
||||
ACPI_GPE_LEVEL_TRIGGERED) {
|
||||
/*
|
||||
* GPE is level-triggered, we clear the GPE status bit after
|
||||
* handling the event.
|
||||
*/
|
||||
status = acpi_hw_clear_gpe(&local_gpe_event_info);
|
||||
status = acpi_hw_clear_gpe(gpe_event_info);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_VOID;
|
||||
}
|
||||
}
|
||||
|
||||
/* Enable this GPE */
|
||||
|
||||
(void)acpi_hw_write_gpe_enable_reg(&local_gpe_event_info);
|
||||
(void)acpi_hw_write_gpe_enable_reg(gpe_event_info);
|
||||
return_VOID;
|
||||
}
|
||||
|
||||
|
@ -618,25 +618,6 @@ static void acpi_os_execute_deferred(struct work_struct *work)
|
||||
dpc->function(dpc->context);
|
||||
kfree(dpc);
|
||||
|
||||
/* Yield cpu to notify thread */
|
||||
cond_resched();
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static void acpi_os_execute_notify(struct work_struct *work)
|
||||
{
|
||||
struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
|
||||
|
||||
if (!dpc) {
|
||||
printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
|
||||
return;
|
||||
}
|
||||
|
||||
dpc->function(dpc->context);
|
||||
|
||||
kfree(dpc);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
@ -660,7 +641,7 @@ acpi_status acpi_os_execute(acpi_execute_type type,
|
||||
{
|
||||
acpi_status status = AE_OK;
|
||||
struct acpi_os_dpc *dpc;
|
||||
|
||||
struct workqueue_struct *queue;
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
|
||||
"Scheduling function [%p(%p)] for deferred execution.\n",
|
||||
function, context));
|
||||
@ -684,20 +665,13 @@ acpi_status acpi_os_execute(acpi_execute_type type,
|
||||
dpc->function = function;
|
||||
dpc->context = context;
|
||||
|
||||
if (type == OSL_NOTIFY_HANDLER) {
|
||||
INIT_WORK(&dpc->work, acpi_os_execute_notify);
|
||||
if (!queue_work(kacpi_notify_wq, &dpc->work)) {
|
||||
status = AE_ERROR;
|
||||
kfree(dpc);
|
||||
}
|
||||
} else {
|
||||
INIT_WORK(&dpc->work, acpi_os_execute_deferred);
|
||||
if (!queue_work(kacpid_wq, &dpc->work)) {
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
|
||||
"Call to queue_work() failed.\n"));
|
||||
status = AE_ERROR;
|
||||
kfree(dpc);
|
||||
}
|
||||
INIT_WORK(&dpc->work, acpi_os_execute_deferred);
|
||||
queue = (type == OSL_NOTIFY_HANDLER) ? kacpi_notify_wq : kacpid_wq;
|
||||
if (!queue_work(queue, &dpc->work)) {
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
|
||||
"Call to queue_work() failed.\n"));
|
||||
status = AE_ERROR;
|
||||
kfree(dpc);
|
||||
}
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user