static u32
acpi_ev_global_lock_handler (
	void                            *context)
{
	u8                              acquired = FALSE;
	acpi_status                     status;


	/*
	 * Attempt to get the lock
	 * If we don't get it now, it will be marked pending and we will
	 * take another interrupt when it becomes free.
	 */
	ACPI_ACQUIRE_GLOBAL_LOCK (acpi_gbl_common_fACS.global_lock, acquired);
	if (acquired) {
		/* Got the lock, now wake all threads waiting for it */

		acpi_gbl_global_lock_acquired = TRUE;

		/* Run the Global Lock thread which will signal all waiting threads */

		status = acpi_os_queue_for_execution (OSD_PRIORITY_HIGH,
				  acpi_ev_global_lock_thread, context);
		if (ACPI_FAILURE (status)) {
			ACPI_REPORT_ERROR (("Could not queue Global Lock thread, %s\n",
				acpi_format_exception (status)));

			return (ACPI_INTERRUPT_NOT_HANDLED);
		}
	}

	return (ACPI_INTERRUPT_HANDLED);
}
void
tz_policy_run (
	unsigned long           data)
{
	acpi_status             status = AE_OK;

	FUNCTION_TRACE("tz_policy_run");

	if (!data) {
		ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Invalid (NULL) context.\n"));
		return_VOID;
	}

	/*
	 * Defer to Non-Interrupt Level:
	 * -----------------------------
	 * Note that all Linux kernel timers run at interrupt-level (ack!).
	 */
	status = acpi_os_queue_for_execution(OSD_PRIORITY_GPE,  tz_policy_check, (void*)data);
	if (ACPI_FAILURE(status)) {
		ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Error invoking thermal policy.\n"));
	}

	return_VOID;
}
int
acpi_db_initialize (void)
{


	/* Init globals */

	acpi_gbl_db_buffer = acpi_os_callocate (ACPI_DEBUG_BUFFER_SIZE);

	/* Initial scope is the root */

	acpi_gbl_db_scope_buf [0] = '\\';
	acpi_gbl_db_scope_buf [1] = 0;


	/*
	 * If configured for multi-thread support, the debug executor runs in
	 * a separate thread so that the front end can be in another address
	 * space, environment, or even another machine.
	 */
	if (acpi_gbl_debugger_configuration & DEBUGGER_MULTI_THREADED) {
		/* These were created with one unit, grab it */

		acpi_ut_acquire_mutex (ACPI_MTX_DEBUG_CMD_COMPLETE);
		acpi_ut_acquire_mutex (ACPI_MTX_DEBUG_CMD_READY);

		/* Create the debug execution thread to execute commands */

		acpi_os_queue_for_execution (0, acpi_db_execute_thread, NULL);
	}

	if (!acpi_gbl_db_opt_verbose) {
		acpi_gbl_db_disasm_indent = " ";
		acpi_gbl_db_opt_disasm = TRUE;
		acpi_gbl_db_opt_stats = FALSE;
	}

	return (0);
}
acpi_status
acpi_ev_queue_notify_request (
	struct acpi_namespace_node      *node,
	u32                             notify_value)
{
	union acpi_operand_object       *obj_desc;
	union acpi_operand_object       *handler_obj = NULL;
	union acpi_generic_state        *notify_info;
	acpi_status                     status = AE_OK;


	ACPI_FUNCTION_NAME ("ev_queue_notify_request");


	/*
	 * For value 3 (Ejection Request), some device method may need to be run.
	 * For value 2 (Device Wake) if _PRW exists, the _PS0 method may need to be run.
	 * For value 0x80 (Status Change) on the power button or sleep button,
	 * initiate soft-off or sleep operation?
	 */
	ACPI_DEBUG_PRINT ((ACPI_DB_INFO,
		"Dispatching Notify(%X) on node %p\n", notify_value, node));

	if (notify_value <= 7) {
		ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Notify value: %s\n",
				acpi_notify_value_names[notify_value]));
	}
	else {
		ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "notify value: 0x2.2_x **Device Specific**\n",
				notify_value));
	}

	/*
	 * Get the notify object attached to the NS Node
	 */
	obj_desc = acpi_ns_get_attached_object (node);
	if (obj_desc) {
		/* We have the notify object, Get the right handler */

		switch (node->type) {
		case ACPI_TYPE_DEVICE:
		case ACPI_TYPE_THERMAL:
		case ACPI_TYPE_PROCESSOR:
		case ACPI_TYPE_POWER:

			if (notify_value <= ACPI_MAX_SYS_NOTIFY) {
				handler_obj = obj_desc->common_notify.system_notify;
			}
			else {
				handler_obj = obj_desc->common_notify.device_notify;
			}
			break;

		default:
			/* All other types are not supported */
			return (AE_TYPE);
		}
	}

	/* If there is any handler to run, schedule the dispatcher */

	if ((acpi_gbl_system_notify.handler && (notify_value <= ACPI_MAX_SYS_NOTIFY)) ||
		(acpi_gbl_device_notify.handler && (notify_value > ACPI_MAX_SYS_NOTIFY)) ||
		handler_obj) {
		notify_info = acpi_ut_create_generic_state ();
		if (!notify_info) {
			return (AE_NO_MEMORY);
		}

		notify_info->common.data_type = ACPI_DESC_TYPE_STATE_NOTIFY;
		notify_info->notify.node      = node;
		notify_info->notify.value     = (u16) notify_value;
		notify_info->notify.handler_obj = handler_obj;

		status = acpi_os_queue_for_execution (OSD_PRIORITY_HIGH,
				  acpi_ev_notify_dispatch, notify_info);
		if (ACPI_FAILURE (status)) {
			acpi_ut_delete_generic_state (notify_info);
		}
	}

	if (!handler_obj) {
		/* There is no per-device notify handler for this device */

		ACPI_DEBUG_PRINT ((ACPI_DB_INFO,
			"No notify handler for [%4.4s] node %p\n",
			acpi_ut_get_node_name (node), node));
	}

	return (status);
}
Exemple #5
0
void
ec_gpe_handler (
	void                    *context)
{
	acpi_status             status = AE_OK;
	EC_CONTEXT              *ec = (EC_CONTEXT*)context;
	EC_STATUS               ec_status = 0;

	FUNCTION_TRACE("ec_gpe_handler");

	if (!ec) {
		ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Invalid (NULL) context.\n"));
		return_VOID;
	}

	/* TBD: synchronize w/ transaction (ectransx). */

	/*
	 * EC_SCI?
	 * -------
	 * Check the EC_SCI bit to see if this is an EC_SCI event.  If not (e.g.
	 * OBF/IBE) just return, as we already poll to detect these events.
	 */
	acpi_os_read_port(ec->status_port, &ec_status, 8);
	if (!(ec_status & EC_FLAG_SCI)) {
		return_VOID;
	}

	ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "EC_SCI event detected on ec [%02x] - running query.\n", ec->device_handle));

	/*
	 * Run Query:
	 * ----------
	 * Query the EC to find out which _Qxx method we need to evaluate.
	 * Note that successful completion of the query causes the EC_SCI
	 * bit to be cleared (and thus clearing the interrupt source).
	 */
	status = ec_io_write(ec, ec->command_port, EC_COMMAND_QUERY,
		EC_EVENT_OUTPUT_BUFFER_FULL);
	if (ACPI_FAILURE(status)) {
		ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "Unable to send 'query command' to EC.\n"));
		return_VOID;
	}

	status = ec_io_read(ec, ec->data_port, &(ec->query_data),
		EC_EVENT_NONE);
	if (ACPI_FAILURE(status)) {
		ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "Error reading query data.\n"));
		return_VOID;
	}

	/* TBD: un-synchronize w/ transaction (ectransx). */

	/*
	 * Spurious EC_SCI?
	 * ----------------
	 */
	if (!ec->query_data) {
		ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Spurious EC SCI detected.\n"));
		return_VOID;
	}

	/*
	 * Defer _Qxx Execution:
	 * ---------------------
	 * Can't evaluate this method now 'cause we're at interrupt-level.
	 */
	status = acpi_os_queue_for_execution(OSD_PRIORITY_GPE,
		ec_query_handler, ec);
	if (ACPI_FAILURE(status)) {
		ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "Unable to defer _Qxx method evaluation.\n"));
		return_VOID;
	}

	return_VOID;
}
Exemple #6
0
static void
acpi_thermal_run (
	unsigned long		data)
{
	acpi_os_queue_for_execution(OSD_PRIORITY_GPE,  acpi_thermal_check, (void *) data);
}
Exemple #7
0
u32
acpi_ev_gpe_dispatch (
	struct acpi_gpe_event_info      *gpe_event_info,
	u32                             gpe_number)
{
	acpi_status                     status;


	ACPI_FUNCTION_TRACE ("ev_gpe_dispatch");


	/*
	 * If edge-triggered, clear the GPE status bit now.  Note that
	 * level-triggered events are cleared after the GPE is serviced.
	 */
	if (gpe_event_info->flags & ACPI_EVENT_EDGE_TRIGGERED) {
		status = acpi_hw_clear_gpe (gpe_event_info);
		if (ACPI_FAILURE (status)) {
			ACPI_REPORT_ERROR (("acpi_ev_gpe_dispatch: Unable to clear GPE[%2X]\n",
				gpe_number));
			return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
		}
	}

	/*
	 * Dispatch the GPE to either an installed handler, or the control
	 * method associated with this GPE (_Lxx or _Exx).
	 * If a handler exists, we invoke it and do not attempt to run the method.
	 * If there is neither a handler nor a method, we disable the level to
	 * prevent further events from coming in here.
	 */
	if (gpe_event_info->handler) {
		/* Invoke the installed handler (at interrupt level) */

		gpe_event_info->handler (gpe_event_info->context);
	}
	else if (gpe_event_info->method_node) {
		/*
		 * Disable GPE, so it doesn't keep firing before the method has a
		 * chance to run.
		 */
		status = acpi_hw_disable_gpe (gpe_event_info);
		if (ACPI_FAILURE (status)) {
			ACPI_REPORT_ERROR (("acpi_ev_gpe_dispatch: Unable to disable GPE[%2X]\n",
				gpe_number));
			return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
		}

		/* Execute the method associated with the GPE. */

		if (ACPI_FAILURE (acpi_os_queue_for_execution (OSD_PRIORITY_GPE,
				 acpi_ev_asynch_execute_gpe_method,
				 gpe_event_info))) {
			ACPI_REPORT_ERROR ((
				"acpi_ev_gpe_dispatch: Unable to queue handler for GPE[%2X], event is disabled\n",
				gpe_number));
		}
	}
	else {
		/* No handler or method to run! */

		ACPI_REPORT_ERROR ((
			"acpi_ev_gpe_dispatch: No handler or method for GPE[%2X], disabling event\n",
			gpe_number));

		/*
		 * Disable the GPE.  The GPE will remain disabled until the ACPI
		 * Core Subsystem is restarted, or the handler is reinstalled.
		 */
		status = acpi_hw_disable_gpe (gpe_event_info);
		if (ACPI_FAILURE (status)) {
			ACPI_REPORT_ERROR (("acpi_ev_gpe_dispatch: Unable to disable GPE[%2X]\n",
				gpe_number));
			return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
		}
	}

	/* It is now safe to clear level-triggered events. */

	if (gpe_event_info->flags & ACPI_EVENT_LEVEL_TRIGGERED) {
		status = acpi_hw_clear_gpe (gpe_event_info);
		if (ACPI_FAILURE (status)) {
			ACPI_REPORT_ERROR (("acpi_ev_gpe_dispatch: Unable to clear GPE[%2X]\n",
				gpe_number));
			return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
		}
	}

	return_VALUE (ACPI_INTERRUPT_HANDLED);
}
Exemple #8
0
void
acpi_db_create_execution_threads (
	NATIVE_CHAR             *num_threads_arg,
	NATIVE_CHAR             *num_loops_arg,
	NATIVE_CHAR             *method_name_arg)
{
	acpi_status             status;
	u32                     num_threads;
	u32                     num_loops;
	u32                     i;
	acpi_handle             thread_gate;


	/* Get the arguments */

	num_threads = STRTOUL (num_threads_arg, NULL, 0);
	num_loops = STRTOUL (num_loops_arg, NULL, 0);

	if (!num_threads || !num_loops) {
		acpi_os_printf ("Bad argument: Threads %X, Loops %X\n", num_threads, num_loops);
		return;
	}


	/* Create the synchronization semaphore */

	status = acpi_os_create_semaphore (1, 0, &thread_gate);
	if (ACPI_FAILURE (status)) {
		acpi_os_printf ("Could not create semaphore, %s\n", acpi_format_exception (status));
		return;
	}

	/* Setup the context to be passed to each thread */

	acpi_gbl_db_method_info.name = method_name_arg;
	acpi_gbl_db_method_info.args = NULL;
	acpi_gbl_db_method_info.flags = 0;
	acpi_gbl_db_method_info.num_loops = num_loops;
	acpi_gbl_db_method_info.thread_gate = thread_gate;

	acpi_db_execute_setup (&acpi_gbl_db_method_info);


	/* Create the threads */

	acpi_os_printf ("Creating %X threads to execute %X times each\n", num_threads, num_loops);

	for (i = 0; i < (num_threads); i++) {
		acpi_os_queue_for_execution (OSD_PRIORITY_MED, acpi_db_method_thread, &acpi_gbl_db_method_info);
	}


	/* Wait for all threads to complete */

	i = num_threads;
	while (i)   /* Brain damage for OSD implementations that only support wait of 1 unit */ {
		status = acpi_os_wait_semaphore (thread_gate, 1, WAIT_FOREVER);
		i--;
	}

	/* Cleanup and exit */

	acpi_os_delete_semaphore (thread_gate);

	acpi_db_set_output_destination (DB_DUPLICATE_OUTPUT);
	acpi_os_printf ("All threads (%X) have completed\n", num_threads);
	acpi_db_set_output_destination (DB_CONSOLE_OUTPUT);
}
u32
acpi_ev_gpe_dispatch (
	u32                     gpe_number)
{
	acpi_gpe_level_info     gpe_info;


	FUNCTION_TRACE ("Ev_gpe_dispatch");


	/*
	 * Valid GPE number?
	 */
	if (acpi_gbl_gpe_valid[gpe_number] == ACPI_GPE_INVALID) {
		ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Invalid GPE bit [%X].\n", gpe_number));
		return_VALUE (INTERRUPT_NOT_HANDLED);
	}

	/*
	 * Disable the GPE.
	 */
	acpi_hw_disable_gpe (gpe_number);

	gpe_info = acpi_gbl_gpe_info [gpe_number];

	/*
	 * Edge-Triggered?
	 * ---------------
	 * If edge-triggered, clear the GPE status bit now.  Note that
	 * level-triggered events are cleared after the GPE is serviced.
	 */
	if (gpe_info.type & ACPI_EVENT_EDGE_TRIGGERED) {
		acpi_hw_clear_gpe (gpe_number);
	}
		/*
		 * Function Handler (e.g. EC)?
		 */
	if (gpe_info.handler) {
		/* Invoke function handler (at interrupt level). */

		gpe_info.handler (gpe_info.context);

		/* Level-Triggered? */

		if (gpe_info.type & ACPI_EVENT_LEVEL_TRIGGERED) {
			acpi_hw_clear_gpe (gpe_number);
		}

		/* Enable GPE */

		acpi_hw_enable_gpe (gpe_number);
	}

	/*
	 * Method Handler (e.g. _Exx/_Lxx)?
	 */
	else if (gpe_info.method_handle) {
		if (ACPI_FAILURE(acpi_os_queue_for_execution (OSD_PRIORITY_GPE,
			acpi_ev_asynch_execute_gpe_method, (void*) (u64)gpe_number))) {
			/*
			 * Shoudn't occur, but if it does report an error. Note that
			 * the GPE will remain disabled until the ACPI Core Subsystem
			 * is restarted, or the handler is removed/reinstalled.
			 */
			REPORT_ERROR (("Acpi_ev_gpe_dispatch: Unable to queue handler for GPE bit [%X]\n", gpe_number));
		}
	}

	/*
	 * No Handler? Report an error and leave the GPE disabled.
	 */
	else {
		REPORT_ERROR (("Acpi_ev_gpe_dispatch: No installed handler for GPE [%X]\n", gpe_number));

		/* Level-Triggered? */

		if (gpe_info.type & ACPI_EVENT_LEVEL_TRIGGERED) {
			acpi_hw_clear_gpe (gpe_number);
		}
	}

	return_VALUE (INTERRUPT_HANDLED);
}