acpi_status acpi_ev_set_gpe_type(struct acpi_gpe_event_info *gpe_event_info, u8 type) { acpi_status status; ACPI_FUNCTION_TRACE(ev_set_gpe_type); switch (type) { case ACPI_GPE_TYPE_WAKE: case ACPI_GPE_TYPE_RUNTIME: case ACPI_GPE_TYPE_WAKE_RUN: break; default: return_ACPI_STATUS(AE_BAD_PARAMETER); } status = acpi_ev_disable_gpe(gpe_event_info); gpe_event_info->flags &= ~ACPI_GPE_TYPE_MASK; gpe_event_info->flags |= type; return_ACPI_STATUS(status); }
acpi_status acpi_ev_set_gpe_type(struct acpi_gpe_event_info *gpe_event_info, u8 type) { acpi_status status; ACPI_FUNCTION_TRACE(ev_set_gpe_type); /* Validate type and update register enable masks */ switch (type) { case ACPI_GPE_TYPE_WAKE: case ACPI_GPE_TYPE_RUNTIME: case ACPI_GPE_TYPE_WAKE_RUN: break; default: return_ACPI_STATUS(AE_BAD_PARAMETER); } /* Disable the GPE if currently enabled */ status = acpi_ev_disable_gpe(gpe_event_info); /* Clear the type bits and insert the new Type */ gpe_event_info->flags &= ~ACPI_GPE_TYPE_MASK; gpe_event_info->flags |= type; return_ACPI_STATUS(status); }
/******************************************************************************* * * FUNCTION: acpi_disable_gpe * * PARAMETERS: gpe_device - Parent GPE Device * gpe_number - GPE level within the GPE block * Flags - Just disable, or also wake disable? * Called from ISR or not * * RETURN: Status * * DESCRIPTION: Disable an ACPI event (general purpose) * ******************************************************************************/ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags) { acpi_status status = AE_OK; struct acpi_gpe_event_info *gpe_event_info; ACPI_FUNCTION_TRACE(acpi_disable_gpe); /* Use semaphore lock if not executing at interrupt level */ if (flags & ACPI_NOT_ISR) { status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } /* Ensure that we have a valid GPE number */ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); if (!gpe_event_info) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } status = acpi_ev_disable_gpe(gpe_event_info); unlock_and_exit: if (flags & ACPI_NOT_ISR) { (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); } return_ACPI_STATUS(status); }
/******************************************************************************* * * FUNCTION: acpi_set_gpe * * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 * gpe_number - GPE level within the GPE block * action - ACPI_GPE_ENABLE or ACPI_GPE_DISABLE * * RETURN: Status * * DESCRIPTION: Enable or disable an individual GPE. This function bypasses * the reference count mechanism used in the acpi_enable_gpe and * acpi_disable_gpe interfaces -- and should be used with care. * * Note: Typically used to disable a runtime GPE for short period of time, * then re-enable it, without disturbing the existing reference counts. This * is useful, for example, in the Embedded Controller (EC) driver. * ******************************************************************************/ acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action) { struct acpi_gpe_event_info *gpe_event_info; acpi_status status; acpi_cpu_flags flags; ACPI_FUNCTION_TRACE(acpi_set_gpe); flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); /* Ensure that we have a valid GPE number */ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); if (!gpe_event_info) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } /* Perform the action */ switch (action) { case ACPI_GPE_ENABLE: status = acpi_ev_enable_gpe(gpe_event_info); break; case ACPI_GPE_DISABLE: status = acpi_ev_disable_gpe(gpe_event_info); break; default: status = AE_BAD_PARAMETER; break; } unlock_and_exit: acpi_os_release_lock(acpi_gbl_gpe_lock, flags); return_ACPI_STATUS(status); }
/******************************************************************************* * * FUNCTION: acpi_remove_gpe_handler * * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT * defined GPEs) * gpe_number - The event to remove a handler * Address - Address of the handler * * RETURN: Status * * DESCRIPTION: Remove a handler for a General Purpose acpi_event. * ******************************************************************************/ acpi_status acpi_remove_gpe_handler(acpi_handle gpe_device, u32 gpe_number, acpi_event_handler address) { struct acpi_gpe_event_info *gpe_event_info; struct acpi_handler_info *handler; acpi_status status; acpi_cpu_flags flags; ACPI_FUNCTION_TRACE(acpi_remove_gpe_handler); /* Parameter validation */ if (!address) { return_ACPI_STATUS(AE_BAD_PARAMETER); } status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Ensure that we have a valid GPE number */ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); if (!gpe_event_info) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } /* Make sure that a handler is indeed installed */ if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) != ACPI_GPE_DISPATCH_HANDLER) { status = AE_NOT_EXIST; goto unlock_and_exit; } /* Make sure that the installed handler is the same */ if (gpe_event_info->dispatch.handler->address != address) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } /* Disable the GPE before removing the handler */ status = acpi_ev_disable_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { goto unlock_and_exit; } /* Make sure all deferred tasks are completed */ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); acpi_os_wait_events_complete(NULL); status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Remove the handler */ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); handler = gpe_event_info->dispatch.handler; /* Restore Method node (if any), set dispatch flags */ gpe_event_info->dispatch.method_node = handler->method_node; gpe_event_info->flags &= ~ACPI_GPE_DISPATCH_MASK; /* Clear bits */ if (handler->method_node) { gpe_event_info->flags |= ACPI_GPE_DISPATCH_METHOD; } acpi_os_release_lock(acpi_gbl_gpe_lock, flags); /* Now we can free the handler object */ ACPI_FREE(handler); unlock_and_exit: (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); return_ACPI_STATUS(status); }
/******************************************************************************* * * FUNCTION: acpi_install_gpe_handler * * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT * defined GPEs) * gpe_number - The GPE number within the GPE block * Type - Whether this GPE should be treated as an * edge- or level-triggered interrupt. * Address - Address of the handler * Context - Value passed to the handler on each GPE * * RETURN: Status * * DESCRIPTION: Install a handler for a General Purpose Event. * ******************************************************************************/ acpi_status acpi_install_gpe_handler(acpi_handle gpe_device, u32 gpe_number, u32 type, acpi_event_handler address, void *context) { struct acpi_gpe_event_info *gpe_event_info; struct acpi_handler_info *handler; acpi_status status; acpi_cpu_flags flags; ACPI_FUNCTION_TRACE(acpi_install_gpe_handler); /* Parameter validation */ if ((!address) || (type > ACPI_GPE_XRUPT_TYPE_MASK)) { status = AE_BAD_PARAMETER; goto exit; } status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { goto exit; } /* Ensure that we have a valid GPE number */ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); if (!gpe_event_info) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } /* Make sure that there isn't a handler there already */ if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == ACPI_GPE_DISPATCH_HANDLER) { status = AE_ALREADY_EXISTS; goto unlock_and_exit; } /* Allocate and init handler object */ handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_handler_info)); if (!handler) { status = AE_NO_MEMORY; goto unlock_and_exit; } handler->address = address; handler->context = context; handler->method_node = gpe_event_info->dispatch.method_node; /* Disable the GPE before installing the handler */ status = acpi_ev_disable_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { goto unlock_and_exit; } /* Install the handler */ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); gpe_event_info->dispatch.handler = handler; /* Setup up dispatch flags to indicate handler (vs. method) */ gpe_event_info->flags &= ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); gpe_event_info->flags |= (u8) (type | ACPI_GPE_DISPATCH_HANDLER); acpi_os_release_lock(acpi_gbl_gpe_lock, flags); unlock_and_exit: (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); exit: if (ACPI_FAILURE(status)) ACPI_EXCEPTION((AE_INFO, status, "Installing notify handler failed")); return_ACPI_STATUS(status); }
u32 acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) { acpi_status status; ACPI_FUNCTION_TRACE(ev_gpe_dispatch); acpi_os_gpe_count(gpe_number); /* * If edge-triggered, clear the GPE status bit now. Note that * level-triggered events are cleared after the GPE is serviced. */ if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == ACPI_GPE_EDGE_TRIGGERED) { status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to clear GPE[%2X]", gpe_number)); return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); } } /* * Dispatch the GPE to either an installed handler, or the control method * associated with this GPE (_Lxx or _Exx). If a handler exists, we invoke * it and do not attempt to run the method. If there is neither a handler * nor a method, we disable this GPE to prevent further such pointless * events from firing. */ switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { case ACPI_GPE_DISPATCH_HANDLER: /* * Invoke the installed handler (at interrupt level) * Ignore return status for now. * TBD: leave GPE disabled on error? */ (void)gpe_event_info->dispatch.handler->address(gpe_event_info-> dispatch. handler-> context); /* It is now safe to clear level-triggered events. */ if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == ACPI_GPE_LEVEL_TRIGGERED) { status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to clear GPE[%2X]", gpe_number)); return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); } } break; case ACPI_GPE_DISPATCH_METHOD: /* * Disable the GPE, so it doesn't keep firing before the method has a * chance to run (it runs asynchronously with interrupts enabled). */ status = acpi_ev_disable_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to disable GPE[%2X]", gpe_number)); return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); } /* * Execute the method associated with the GPE * NOTE: Level-triggered GPEs are cleared after the method completes. */ status = acpi_os_execute(OSL_GPE_HANDLER, acpi_ev_asynch_execute_gpe_method, gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to queue handler for GPE[%2X] - event disabled", gpe_number)); } break; default: /* No handler or method to run! */ ACPI_ERROR((AE_INFO, "No handler or method for GPE[%2X], disabling event", gpe_number)); /* * Disable the GPE. The GPE will remain disabled until the ACPICA * Core Subsystem is restarted, or a handler is installed. */ status = acpi_ev_disable_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to disable GPE[%2X]", gpe_number)); return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); } break; } return_UINT32(ACPI_INTERRUPT_HANDLED); }
acpi_status acpi_remove_gpe_handler(acpi_handle gpe_device, u32 gpe_number, acpi_event_handler address) { struct acpi_gpe_event_info *gpe_event_info; struct acpi_handler_info *handler; acpi_status status; acpi_cpu_flags flags; ACPI_FUNCTION_TRACE(acpi_remove_gpe_handler); if (!address) { return_ACPI_STATUS(AE_BAD_PARAMETER); } status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); if (!gpe_event_info) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) != ACPI_GPE_DISPATCH_HANDLER) { status = AE_NOT_EXIST; goto unlock_and_exit; } if (gpe_event_info->dispatch.handler->address != address) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } status = acpi_ev_disable_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { goto unlock_and_exit; } (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); acpi_os_wait_events_complete(NULL); status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); handler = gpe_event_info->dispatch.handler; gpe_event_info->dispatch.method_node = handler->method_node; gpe_event_info->flags &= ~ACPI_GPE_DISPATCH_MASK; if (handler->method_node) { gpe_event_info->flags |= ACPI_GPE_DISPATCH_METHOD; } acpi_os_release_lock(acpi_gbl_gpe_lock, flags); ACPI_FREE(handler); unlock_and_exit: (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); return_ACPI_STATUS(status); }
acpi_status acpi_install_gpe_handler(acpi_handle gpe_device, u32 gpe_number, u32 type, acpi_event_handler address, void *context) { struct acpi_gpe_event_info *gpe_event_info; struct acpi_handler_info *handler; acpi_status status; acpi_cpu_flags flags; ACPI_FUNCTION_TRACE(acpi_install_gpe_handler); if ((!address) || (type > ACPI_GPE_XRUPT_TYPE_MASK)) { status = AE_BAD_PARAMETER; goto exit; } status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { goto exit; } gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); if (!gpe_event_info) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == ACPI_GPE_DISPATCH_HANDLER) { status = AE_ALREADY_EXISTS; goto unlock_and_exit; } handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_handler_info)); if (!handler) { status = AE_NO_MEMORY; goto unlock_and_exit; } handler->address = address; handler->context = context; handler->method_node = gpe_event_info->dispatch.method_node; status = acpi_ev_disable_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { goto unlock_and_exit; } flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); gpe_event_info->dispatch.handler = handler; gpe_event_info->flags &= ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); gpe_event_info->flags |= (u8) (type | ACPI_GPE_DISPATCH_HANDLER); acpi_os_release_lock(acpi_gbl_gpe_lock, flags); unlock_and_exit: (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); exit: if (ACPI_FAILURE(status)) ACPI_EXCEPTION((AE_INFO, status, "Installing notify handler failed")); return_ACPI_STATUS(status); }
/******************************************************************************* * * FUNCTION: acpi_disable_gpe * * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 * gpe_number - GPE level within the GPE block * gpe_type - ACPI_GPE_TYPE_RUNTIME or ACPI_GPE_TYPE_WAKE * or both * * RETURN: Status * * DESCRIPTION: Remove a reference to a GPE. When the last reference is * removed, only then is the GPE disabled (for runtime GPEs), or * the GPE mask bit disabled (for wake GPEs) * ******************************************************************************/ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type) { acpi_status status = AE_OK; struct acpi_gpe_event_info *gpe_event_info; acpi_cpu_flags flags; ACPI_FUNCTION_TRACE(acpi_disable_gpe); /* Parameter validation */ if (!gpe_type || (gpe_type & ~ACPI_GPE_TYPE_WAKE_RUN)) { return_ACPI_STATUS(AE_BAD_PARAMETER); } flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); /* Ensure that we have a valid GPE number */ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); if (!gpe_event_info) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } /* Hardware-disable a runtime GPE on removal of the last reference */ if (gpe_type & ACPI_GPE_TYPE_RUNTIME) { if (!gpe_event_info->runtime_count) { status = AE_LIMIT; /* There are no references to remove */ goto unlock_and_exit; } gpe_event_info->runtime_count--; if (!gpe_event_info->runtime_count) { status = acpi_ev_disable_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { gpe_event_info->runtime_count++; goto unlock_and_exit; } } } /* * Update masks for wake GPE on removal of the last reference. * No need to hardware-disable wake GPEs here, they are not currently * enabled. */ if (gpe_type & ACPI_GPE_TYPE_WAKE) { if (!gpe_event_info->wakeup_count) { status = AE_LIMIT; /* There are no references to remove */ goto unlock_and_exit; } gpe_event_info->wakeup_count--; if (!gpe_event_info->wakeup_count) { (void)acpi_ev_update_gpe_enable_masks(gpe_event_info); } } unlock_and_exit: acpi_os_release_lock(acpi_gbl_gpe_lock, flags); return_ACPI_STATUS(status); }
u32 acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) { acpi_status status; ACPI_FUNCTION_TRACE(ev_gpe_dispatch); acpi_os_gpe_count(gpe_number); if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == ACPI_GPE_EDGE_TRIGGERED) { status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to clear GPE[%2X]", gpe_number)); return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); } } switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { case ACPI_GPE_DISPATCH_HANDLER: (void)gpe_event_info->dispatch.handler->address(gpe_event_info-> dispatch. handler-> context); if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == ACPI_GPE_LEVEL_TRIGGERED) { status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to clear GPE[%2X]", gpe_number)); return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); } } break; case ACPI_GPE_DISPATCH_METHOD: status = acpi_ev_disable_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to disable GPE[%2X]", gpe_number)); return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); } status = acpi_os_execute(OSL_GPE_HANDLER, acpi_ev_asynch_execute_gpe_method, gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to queue handler for GPE[%2X] - event disabled", gpe_number)); } break; default: ACPI_ERROR((AE_INFO, "No handler or method for GPE[%2X], disabling event", gpe_number)); status = acpi_ev_disable_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to disable GPE[%2X]", gpe_number)); return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); } break; } return_UINT32(ACPI_INTERRUPT_HANDLED); }