/******************************************************************************* * * FUNCTION: acpi_ev_enable_gpe * * PARAMETERS: gpe_event_info - GPE to enable * * RETURN: Status * * DESCRIPTION: Clear a GPE of stale events and enable it. * ******************************************************************************/ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) { acpi_status status; ACPI_FUNCTION_TRACE(ev_enable_gpe); /* * We will only allow a GPE to be enabled if it has either an associated * method (_Lxx/_Exx) or a handler, or is using the implicit notify * feature. Otherwise, the GPE will be immediately disabled by * acpi_ev_gpe_dispatch the first time it fires. */ if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == ACPI_GPE_DISPATCH_NONE) { return_ACPI_STATUS(AE_NO_HANDLER); } /* Clear the GPE (of stale events) */ status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Enable the requested GPE */ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); return_ACPI_STATUS(status); }
/******************************************************************************* * * FUNCTION: acpi_clear_gpe * * PARAMETERS: gpe_device - Parent GPE Device * gpe_number - GPE level within the GPE block * Flags - Called from an ISR or not * * RETURN: Status * * DESCRIPTION: Clear an ACPI event (general purpose) * ******************************************************************************/ acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags) { acpi_status status = AE_OK; struct acpi_gpe_event_info *gpe_event_info; ACPI_FUNCTION_TRACE(acpi_clear_gpe); /* Use semaphore lock if not executing at interrupt level */ if (flags & ACPI_NOT_ISR) { status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } /* Ensure that we have a valid GPE number */ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); if (!gpe_event_info) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } status = acpi_hw_clear_gpe(gpe_event_info); unlock_and_exit: if (flags & ACPI_NOT_ISR) { (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); } return_ACPI_STATUS(status); }
acpi_status acpi_ev_enable_gpe ( struct acpi_gpe_event_info *gpe_event_info, u8 write_to_hardware) { acpi_status status; ACPI_FUNCTION_TRACE ("ev_enable_gpe"); /* Make sure HW enable masks are updated */ status = acpi_ev_update_gpe_enable_masks (gpe_event_info, ACPI_GPE_ENABLE); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } /* Mark wake-enabled or HW enable, or both */ switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) { case ACPI_GPE_TYPE_WAKE: ACPI_SET_BIT (gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED); break; case ACPI_GPE_TYPE_WAKE_RUN: ACPI_SET_BIT (gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED); /*lint -fallthrough */ case ACPI_GPE_TYPE_RUNTIME: ACPI_SET_BIT (gpe_event_info->flags, ACPI_GPE_RUN_ENABLED); if (write_to_hardware) { /* Clear the GPE (of stale events), then enable it */ status = acpi_hw_clear_gpe (gpe_event_info); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } /* Enable the requested runtime GPE */ status = acpi_hw_write_gpe_enable_reg (gpe_event_info); } break; default: return_ACPI_STATUS (AE_BAD_PARAMETER); } return_ACPI_STATUS (AE_OK); }
acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 write_to_hardware) { acpi_status status; ACPI_FUNCTION_TRACE(ev_enable_gpe); status = acpi_ev_update_gpe_enable_masks(gpe_event_info, ACPI_GPE_ENABLE); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) { case ACPI_GPE_TYPE_WAKE: ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED); break; case ACPI_GPE_TYPE_WAKE_RUN: ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED); case ACPI_GPE_TYPE_RUNTIME: ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED); if (write_to_hardware) { status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } status = acpi_hw_write_gpe_enable_reg(gpe_event_info); } break; default: return_ACPI_STATUS(AE_BAD_PARAMETER); } return_ACPI_STATUS(AE_OK); }
acpi_status acpi_install_gpe_handler ( u32 gpe_number, u32 type, acpi_gpe_handler handler, void *context) { acpi_status status = AE_OK; FUNCTION_TRACE ("Acpi_install_gpe_handler"); /* Parameter validation */ if (!handler || (gpe_number > ACPI_GPE_MAX)) { return_ACPI_STATUS (AE_BAD_PARAMETER); } /* Ensure that we have a valid GPE number */ if (acpi_gbl_gpe_valid[gpe_number] == ACPI_GPE_INVALID) { return_ACPI_STATUS (AE_BAD_PARAMETER); } acpi_ut_acquire_mutex (ACPI_MTX_EVENTS); /* Make sure that there isn't a handler there already */ if (acpi_gbl_gpe_info[gpe_number].handler) { status = AE_EXIST; goto cleanup; } /* Install the handler */ acpi_gbl_gpe_info[gpe_number].handler = handler; acpi_gbl_gpe_info[gpe_number].context = context; acpi_gbl_gpe_info[gpe_number].type = (u8) type; /* Clear the GPE (of stale events), the enable it */ acpi_hw_clear_gpe (gpe_number); acpi_hw_enable_gpe (gpe_number); cleanup: acpi_ut_release_mutex (ACPI_MTX_EVENTS); return_ACPI_STATUS (status); }
static void acpi_ev_asynch_enable_gpe(void *context) { struct acpi_gpe_event_info *gpe_event_info = context; acpi_status status; if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == ACPI_GPE_LEVEL_TRIGGERED) { status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { return_VOID; } } (void)acpi_hw_write_gpe_enable_reg(gpe_event_info); return_VOID; }
acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) { acpi_status status; ACPI_FUNCTION_TRACE(ev_enable_gpe); /* Clear the GPE (of stale events) */ status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Enable the requested GPE */ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); return_ACPI_STATUS(status); }
static void acpi_ev_asynch_execute_gpe_method ( void *context) { unsigned long gpe_number = (unsigned long) context; acpi_gpe_level_info gpe_info; FUNCTION_TRACE ("Ev_asynch_execute_gpe_method"); /* * Take a snapshot of the GPE info for this level */ acpi_ut_acquire_mutex (ACPI_MTX_EVENTS); gpe_info = acpi_gbl_gpe_info [gpe_number]; acpi_ut_release_mutex (ACPI_MTX_EVENTS); /* * Method Handler (_Lxx, _Exx): * ---------------------------- * Evaluate the _Lxx/_Exx control method that corresponds to this GPE. */ if (gpe_info.method_handle) { acpi_ns_evaluate_by_handle (gpe_info.method_handle, NULL, NULL); } /* * Level-Triggered? * ---------------- * If level-triggered we clear the GPE status bit after handling the event. */ if (gpe_info.type & ACPI_EVENT_LEVEL_TRIGGERED) { acpi_hw_clear_gpe (gpe_number); } /* * Enable the GPE. */ acpi_hw_enable_gpe (gpe_number); return_VOID; }
static void acpi_ev_asynch_enable_gpe(void *context) { struct acpi_gpe_event_info *gpe_event_info = context; acpi_status status; if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == ACPI_GPE_LEVEL_TRIGGERED) { /* * GPE is level-triggered, we clear the GPE status bit after handling * the event. */ status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { return_VOID; } } /* Enable this GPE */ (void)acpi_hw_write_gpe_enable_reg(gpe_event_info); return_VOID; }
static void acpi_ev_asynch_enable_gpe(void *context) { struct acpi_gpe_event_info *gpe_event_info = context; acpi_status status; if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == ACPI_GPE_LEVEL_TRIGGERED) { /* * GPE is level-triggered, we clear the GPE status bit after handling * the event. */ status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { return_VOID; } } /* * Enable this GPE, conditionally. This means that the GPE will only be * physically enabled if the enable_for_run bit is set in the event_info */ (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_COND_ENABLE); return_VOID; }
/******************************************************************************* * * FUNCTION: acpi_clear_gpe * * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 * gpe_number - GPE level within the GPE block * * RETURN: Status * * DESCRIPTION: Clear an ACPI event (general purpose) * ******************************************************************************/ acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number) { acpi_status status = AE_OK; struct acpi_gpe_event_info *gpe_event_info; acpi_cpu_flags flags; ACPI_FUNCTION_TRACE(acpi_clear_gpe); flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); /* Ensure that we have a valid GPE number */ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); if (!gpe_event_info) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } status = acpi_hw_clear_gpe(gpe_event_info); unlock_and_exit: acpi_os_release_lock(acpi_gbl_gpe_lock, flags); return_ACPI_STATUS(status); }
acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info * gpe_event_info) { acpi_status status; if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == ACPI_GPE_LEVEL_TRIGGERED) { /* * GPE is level-triggered, we clear the GPE status bit after * handling the event. */ status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { return (status); } } /* * Enable this GPE, conditionally. This means that the GPE will * only be physically enabled if the enable_mask bit is set * in the event_info. */ (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_CONDITIONAL_ENABLE); return (AE_OK); }
u32 acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) { acpi_status status; ACPI_FUNCTION_TRACE(ev_gpe_dispatch); acpi_os_gpe_count(gpe_number); /* * If edge-triggered, clear the GPE status bit now. Note that * level-triggered events are cleared after the GPE is serviced. */ if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == ACPI_GPE_EDGE_TRIGGERED) { status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to clear GPE[%2X]", gpe_number)); return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); } } /* * Dispatch the GPE to either an installed handler, or the control method * associated with this GPE (_Lxx or _Exx). If a handler exists, we invoke * it and do not attempt to run the method. If there is neither a handler * nor a method, we disable this GPE to prevent further such pointless * events from firing. */ switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { case ACPI_GPE_DISPATCH_HANDLER: /* * Invoke the installed handler (at interrupt level) * Ignore return status for now. * TBD: leave GPE disabled on error? */ (void)gpe_event_info->dispatch.handler->address(gpe_event_info-> dispatch. handler-> context); /* It is now safe to clear level-triggered events. */ if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == ACPI_GPE_LEVEL_TRIGGERED) { status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to clear GPE[%2X]", gpe_number)); return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); } } break; case ACPI_GPE_DISPATCH_METHOD: /* * Disable the GPE, so it doesn't keep firing before the method has a * chance to run (it runs asynchronously with interrupts enabled). */ status = acpi_ev_disable_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to disable GPE[%2X]", gpe_number)); return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); } /* * Execute the method associated with the GPE * NOTE: Level-triggered GPEs are cleared after the method completes. */ status = acpi_os_execute(OSL_GPE_HANDLER, acpi_ev_asynch_execute_gpe_method, gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to queue handler for GPE[%2X] - event disabled", gpe_number)); } break; default: /* No handler or method to run! */ ACPI_ERROR((AE_INFO, "No handler or method for GPE[%2X], disabling event", gpe_number)); /* * Disable the GPE. The GPE will remain disabled until the ACPICA * Core Subsystem is restarted, or a handler is installed. */ status = acpi_ev_disable_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to disable GPE[%2X]", gpe_number)); return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); } break; } return_UINT32(ACPI_INTERRUPT_HANDLED); }
u32 acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) { acpi_status status; ACPI_FUNCTION_TRACE(ev_gpe_dispatch); acpi_os_gpe_count(gpe_number); if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == ACPI_GPE_EDGE_TRIGGERED) { status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to clear GPE[%2X]", gpe_number)); return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); } } switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { case ACPI_GPE_DISPATCH_HANDLER: (void)gpe_event_info->dispatch.handler->address(gpe_event_info-> dispatch. handler-> context); if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == ACPI_GPE_LEVEL_TRIGGERED) { status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to clear GPE[%2X]", gpe_number)); return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); } } break; case ACPI_GPE_DISPATCH_METHOD: status = acpi_ev_disable_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to disable GPE[%2X]", gpe_number)); return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); } status = acpi_os_execute(OSL_GPE_HANDLER, acpi_ev_asynch_execute_gpe_method, gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to queue handler for GPE[%2X] - event disabled", gpe_number)); } break; default: ACPI_ERROR((AE_INFO, "No handler or method for GPE[%2X], disabling event", gpe_number)); status = acpi_ev_disable_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to disable GPE[%2X]", gpe_number)); return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); } break; } return_UINT32(ACPI_INTERRUPT_HANDLED); }
static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method ( void *context) { struct acpi_gpe_event_info *gpe_event_info = (void *) context; u32 gpe_number = 0; acpi_status status; struct acpi_gpe_event_info local_gpe_event_info; ACPI_FUNCTION_TRACE ("ev_asynch_execute_gpe_method"); status = acpi_ut_acquire_mutex (ACPI_MTX_EVENTS); if (ACPI_FAILURE (status)) { return_VOID; } /* Must revalidate the gpe_number/gpe_block */ if (!acpi_ev_valid_gpe_event (gpe_event_info)) { status = acpi_ut_release_mutex (ACPI_MTX_EVENTS); return_VOID; } /* * Take a snapshot of the GPE info for this level - we copy the * info to prevent a race condition with remove_handler/remove_block. */ ACPI_MEMCPY (&local_gpe_event_info, gpe_event_info, sizeof (struct acpi_gpe_event_info)); status = acpi_ut_release_mutex (ACPI_MTX_EVENTS); if (ACPI_FAILURE (status)) { return_VOID; } if (local_gpe_event_info.method_node) { /* * Invoke the GPE Method (_Lxx, _Exx): * (Evaluate the _Lxx/_Exx control method that corresponds to this GPE.) */ status = acpi_ns_evaluate_by_handle (local_gpe_event_info.method_node, NULL, NULL); if (ACPI_FAILURE (status)) { ACPI_REPORT_ERROR (("%s while evaluating method [%4.4s] for GPE[%2X]\n", acpi_format_exception (status), local_gpe_event_info.method_node->name.ascii, gpe_number)); } } if (local_gpe_event_info.flags & ACPI_EVENT_LEVEL_TRIGGERED) { /* * GPE is level-triggered, we clear the GPE status bit after handling * the event. */ status = acpi_hw_clear_gpe (&local_gpe_event_info); if (ACPI_FAILURE (status)) { return_VOID; } } /* Enable this GPE */ (void) acpi_hw_enable_gpe (&local_gpe_event_info); return_VOID; }
u32 acpi_ev_gpe_dispatch ( struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) { acpi_status status; ACPI_FUNCTION_TRACE ("ev_gpe_dispatch"); /* * If edge-triggered, clear the GPE status bit now. Note that * level-triggered events are cleared after the GPE is serviced. */ if (gpe_event_info->flags & ACPI_EVENT_EDGE_TRIGGERED) { status = acpi_hw_clear_gpe (gpe_event_info); if (ACPI_FAILURE (status)) { ACPI_REPORT_ERROR (("acpi_ev_gpe_dispatch: Unable to clear GPE[%2X]\n", gpe_number)); return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); } } /* * Dispatch the GPE to either an installed handler, or the control * method associated with this GPE (_Lxx or _Exx). * If a handler exists, we invoke it and do not attempt to run the method. * If there is neither a handler nor a method, we disable the level to * prevent further events from coming in here. */ if (gpe_event_info->handler) { /* Invoke the installed handler (at interrupt level) */ gpe_event_info->handler (gpe_event_info->context); } else if (gpe_event_info->method_node) { /* * Disable GPE, so it doesn't keep firing before the method has a * chance to run. */ status = acpi_hw_disable_gpe (gpe_event_info); if (ACPI_FAILURE (status)) { ACPI_REPORT_ERROR (("acpi_ev_gpe_dispatch: Unable to disable GPE[%2X]\n", gpe_number)); return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); } /* Execute the method associated with the GPE. */ if (ACPI_FAILURE (acpi_os_queue_for_execution (OSD_PRIORITY_GPE, acpi_ev_asynch_execute_gpe_method, gpe_event_info))) { ACPI_REPORT_ERROR (( "acpi_ev_gpe_dispatch: Unable to queue handler for GPE[%2X], event is disabled\n", gpe_number)); } } else { /* No handler or method to run! */ ACPI_REPORT_ERROR (( "acpi_ev_gpe_dispatch: No handler or method for GPE[%2X], disabling event\n", gpe_number)); /* * Disable the GPE. The GPE will remain disabled until the ACPI * Core Subsystem is restarted, or the handler is reinstalled. */ status = acpi_hw_disable_gpe (gpe_event_info); if (ACPI_FAILURE (status)) { ACPI_REPORT_ERROR (("acpi_ev_gpe_dispatch: Unable to disable GPE[%2X]\n", gpe_number)); return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); } } /* It is now safe to clear level-triggered events. */ if (gpe_event_info->flags & ACPI_EVENT_LEVEL_TRIGGERED) { status = acpi_hw_clear_gpe (gpe_event_info); if (ACPI_FAILURE (status)) { ACPI_REPORT_ERROR (("acpi_ev_gpe_dispatch: Unable to clear GPE[%2X]\n", gpe_number)); return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); } } return_VALUE (ACPI_INTERRUPT_HANDLED); }
u32 acpi_ev_gpe_dispatch ( u32 gpe_number) { acpi_gpe_level_info gpe_info; FUNCTION_TRACE ("Ev_gpe_dispatch"); /* * Valid GPE number? */ if (acpi_gbl_gpe_valid[gpe_number] == ACPI_GPE_INVALID) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Invalid GPE bit [%X].\n", gpe_number)); return_VALUE (INTERRUPT_NOT_HANDLED); } /* * Disable the GPE. */ acpi_hw_disable_gpe (gpe_number); gpe_info = acpi_gbl_gpe_info [gpe_number]; /* * Edge-Triggered? * --------------- * If edge-triggered, clear the GPE status bit now. Note that * level-triggered events are cleared after the GPE is serviced. */ if (gpe_info.type & ACPI_EVENT_EDGE_TRIGGERED) { acpi_hw_clear_gpe (gpe_number); } /* * Function Handler (e.g. EC)? */ if (gpe_info.handler) { /* Invoke function handler (at interrupt level). */ gpe_info.handler (gpe_info.context); /* Level-Triggered? */ if (gpe_info.type & ACPI_EVENT_LEVEL_TRIGGERED) { acpi_hw_clear_gpe (gpe_number); } /* Enable GPE */ acpi_hw_enable_gpe (gpe_number); } /* * Method Handler (e.g. _Exx/_Lxx)? */ else if (gpe_info.method_handle) { if (ACPI_FAILURE(acpi_os_queue_for_execution (OSD_PRIORITY_GPE, acpi_ev_asynch_execute_gpe_method, (void*) (u64)gpe_number))) { /* * Shoudn't occur, but if it does report an error. Note that * the GPE will remain disabled until the ACPI Core Subsystem * is restarted, or the handler is removed/reinstalled. */ REPORT_ERROR (("Acpi_ev_gpe_dispatch: Unable to queue handler for GPE bit [%X]\n", gpe_number)); } } /* * No Handler? Report an error and leave the GPE disabled. */ else { REPORT_ERROR (("Acpi_ev_gpe_dispatch: No installed handler for GPE [%X]\n", gpe_number)); /* Level-Triggered? */ if (gpe_info.type & ACPI_EVENT_LEVEL_TRIGGERED) { acpi_hw_clear_gpe (gpe_number); } } return_VALUE (INTERRUPT_HANDLED); }
u32 acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device, struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) { acpi_status status; u32 return_value; ACPI_FUNCTION_TRACE(ev_gpe_dispatch); /* * Always disable the GPE so that it does not keep firing before * any asynchronous activity completes (either from the execution * of a GPE method or an asynchronous GPE handler.) * * If there is no handler or method to run, just disable the * GPE and leave it disabled permanently to prevent further such * pointless events from firing. */ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to disable GPE %02X", gpe_number)); return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); } /* * If edge-triggered, clear the GPE status bit now. Note that * level-triggered events are cleared after the GPE is serviced. */ if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == ACPI_GPE_EDGE_TRIGGERED) { status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to clear GPE %02X", gpe_number)); (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_CONDITIONAL_ENABLE); return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); } } /* * Dispatch the GPE to either an installed handler or the control * method associated with this GPE (_Lxx or _Exx). If a handler * exists, we invoke it and do not attempt to run the method. * If there is neither a handler nor a method, leave the GPE * disabled. */ switch (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)) { case ACPI_GPE_DISPATCH_HANDLER: /* Invoke the installed handler (at interrupt level) */ return_value = gpe_event_info->dispatch.handler->address(gpe_device, gpe_number, gpe_event_info-> dispatch.handler-> context); /* If requested, clear (if level-triggered) and reenable the GPE */ if (return_value & ACPI_REENABLE_GPE) { (void)acpi_ev_finish_gpe(gpe_event_info); } break; case ACPI_GPE_DISPATCH_METHOD: case ACPI_GPE_DISPATCH_NOTIFY: /* * Execute the method associated with the GPE * NOTE: Level-triggered GPEs are cleared after the method completes. */ status = acpi_os_execute(OSL_GPE_HANDLER, acpi_ev_asynch_execute_gpe_method, gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to queue handler for GPE %02X - event disabled", gpe_number)); } break; default: /* * No handler or method to run! * 03/2010: This case should no longer be possible. We will not allow * a GPE to be enabled if it has no handler or method. */ ACPI_ERROR((AE_INFO, "No handler or method for GPE %02X, disabling event", gpe_number)); break; } return_UINT32(ACPI_INTERRUPT_HANDLED); }
u32 acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) { acpi_status status; ACPI_FUNCTION_TRACE(ev_gpe_dispatch); acpi_os_gpe_count(gpe_number); /* * If edge-triggered, clear the GPE status bit now. Note that * level-triggered events are cleared after the GPE is serviced. */ if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == ACPI_GPE_EDGE_TRIGGERED) { status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to clear GPE[0x%2X]", gpe_number)); return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); } } /* * Dispatch the GPE to either any installed handler or control * method associated with this GPE (_Lxx or _Exx). We invoke * the method first in case it has side effects that would be * interfered with if the handler has already altered hardware * state. If there is neither a handler nor a method, we * disable this GPE to prevent further such pointless events * from firing. */ if (gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD) { /* * Disable the GPE, so it doesn't keep firing before the method has a * chance to run (it runs asynchronously with interrupts enabled). */ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to disable GPE[0x%2X]", gpe_number)); return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); } /* * Execute the method associated with the GPE * NOTE: Level-triggered GPEs are cleared after the method completes. */ status = acpi_os_execute(OSL_GPE_HANDLER, acpi_ev_asynch_execute_gpe_method, gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to queue handler for GPE[0x%2X] - event disabled", gpe_number)); } } if (gpe_event_info->flags & ACPI_GPE_DISPATCH_HANDLER) { /* * Invoke the installed handler (at interrupt level) * Ignore return status for now. * TBD: leave GPE disabled on error? */ (void)gpe_event_info->dispatch.handler->address(gpe_event_info-> dispatch. handler-> context); /* It is now safe to clear level-triggered events. */ if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == ACPI_GPE_LEVEL_TRIGGERED) { status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to clear GPE[0x%2X]", gpe_number)); return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); } } } if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) { /* * No handler or method to run! * 03/2010: This case should no longer be possible. We will not allow * a GPE to be enabled if it has no handler or method. */ ACPI_ERROR((AE_INFO, "No handler or method for GPE[0x%2X], disabling event", gpe_number)); /* * Disable the GPE. The GPE will remain disabled a handler * is installed or ACPICA is restarted. */ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to disable GPE[0x%2X]", gpe_number)); return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); } } return_UINT32(ACPI_INTERRUPT_HANDLED); }