Beispiel #1
0
acpi_status
acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
			       union acpi_operand_object *obj_desc,
			       struct acpi_walk_state *walk_state)
{
	acpi_status status = AE_OK;

	ACPI_FUNCTION_TRACE_PTR(ds_begin_method_execution, method_node);

	if (!method_node) {
		return_ACPI_STATUS(AE_NULL_ENTRY);
	}

	/* Prevent wraparound of thread count */

	if (obj_desc->method.thread_count == ACPI_UINT8_MAX) {
		ACPI_ERROR((AE_INFO,
			    "Method reached maximum reentrancy limit (255)"));
		return_ACPI_STATUS(AE_AML_METHOD_LIMIT);
	}

	/*
	 * If this method is serialized, we need to acquire the method mutex.
	 */
	if (obj_desc->method.method_flags & AML_METHOD_SERIALIZED) {
		/*
		 * Create a mutex for the method if it is defined to be Serialized
		 * and a mutex has not already been created. We defer the mutex creation
		 * until a method is actually executed, to minimize the object count
		 */
		if (!obj_desc->method.mutex) {
			status = acpi_ds_create_method_mutex(obj_desc);
			if (ACPI_FAILURE(status)) {
				return_ACPI_STATUS(status);
			}
		}

		/*
		 * The current_sync_level (per-thread) must be less than or equal to
		 * the sync level of the method. This mechanism provides some
		 * deadlock prevention
		 *
		 * Top-level method invocation has no walk state at this point
		 */
		if (walk_state &&
		    (walk_state->thread->current_sync_level >
		     obj_desc->method.mutex->mutex.sync_level)) {
			ACPI_ERROR((AE_INFO,
				    "Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%d)",
				    acpi_ut_get_node_name(method_node),
				    walk_state->thread->current_sync_level));

			return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
		}

		/*
		 * Obtain the method mutex if necessary. Do not acquire mutex for a
		 * recursive call.
		 */
		if (acpi_os_get_thread_id() !=
		    obj_desc->method.mutex->mutex.owner_thread_id) {
			/*
			 * Acquire the method mutex. This releases the interpreter if we
			 * block (and reacquires it before it returns)
			 */
			status =
			    acpi_ex_system_wait_mutex(obj_desc->method.mutex->
						      mutex.os_mutex,
						      ACPI_WAIT_FOREVER);
			if (ACPI_FAILURE(status)) {
				return_ACPI_STATUS(status);
			}

			/* Update the mutex and walk info and save the original sync_level */
			obj_desc->method.mutex->mutex.owner_thread_id =
				acpi_os_get_thread_id();

			if (walk_state) {
				obj_desc->method.mutex->mutex.
				    original_sync_level =
				    walk_state->thread->current_sync_level;

				walk_state->thread->current_sync_level =
				    obj_desc->method.sync_level;
			} else {
				obj_desc->method.mutex->mutex.
				    original_sync_level =
				    obj_desc->method.mutex->mutex.sync_level;
			}
		}

		/* Always increase acquisition depth */

		obj_desc->method.mutex->mutex.acquisition_depth++;
	}

	/*
	 * Allocate an Owner ID for this method, only if this is the first thread
	 * to begin concurrent execution. We only need one owner_id, even if the
	 * method is invoked recursively.
	 */
	if (!obj_desc->method.owner_id) {
		status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id);
		if (ACPI_FAILURE(status)) {
			goto cleanup;
		}
	}

	/*
	 * Increment the method parse tree thread count since it has been
	 * reentered one more time (even if it is the same thread)
	 */
	obj_desc->method.thread_count++;
	return_ACPI_STATUS(status);

      cleanup:
	/* On error, must release the method mutex (if present) */

	if (obj_desc->method.mutex) {
		acpi_os_release_mutex(obj_desc->method.mutex->mutex.os_mutex);
	}
	return_ACPI_STATUS(status);
}
Beispiel #2
0
acpi_status
acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
		      union acpi_operand_object *obj_desc,
		      struct acpi_walk_state *walk_state)
{
	acpi_status status;

	ACPI_FUNCTION_TRACE_PTR(ex_acquire_mutex, obj_desc);

	if (!obj_desc) {
		return_ACPI_STATUS(AE_BAD_PARAMETER);
	}

	/* Sanity check: we must have a valid thread ID */

	if (!walk_state->thread) {
		ACPI_ERROR((AE_INFO,
			    "Cannot acquire Mutex [%4.4s], null thread info",
			    acpi_ut_get_node_name(obj_desc->mutex.node)));
		return_ACPI_STATUS(AE_AML_INTERNAL);
	}

	/*
	 * Current Sync must be less than or equal to the sync level of the
	 * mutex. This mechanism provides some deadlock prevention
	 */
	if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) {
		ACPI_ERROR((AE_INFO,
			    "Cannot acquire Mutex [%4.4s], current SyncLevel is too large (%d)",
			    acpi_ut_get_node_name(obj_desc->mutex.node),
			    walk_state->thread->current_sync_level));
		return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
	}

	/* Support for multiple acquires by the owning thread */

	if (obj_desc->mutex.owner_thread_id == acpi_os_get_thread_id()) {
		/*
		 * The mutex is already owned by this thread, just increment the
		 * acquisition depth
		 */
		obj_desc->mutex.acquisition_depth++;
		return_ACPI_STATUS(AE_OK);
	}

	/* Acquire the mutex, wait if necessary. Special case for Global Lock */

	if (obj_desc->mutex.os_mutex == acpi_gbl_global_lock_mutex) {
		status =
		    acpi_ev_acquire_global_lock((u16) time_desc->integer.value);
	} else {
		status = acpi_ex_system_wait_mutex(obj_desc->mutex.os_mutex,
						   (u16) time_desc->integer.
						   value);
	}

	if (ACPI_FAILURE(status)) {

		/* Includes failure from a timeout on time_desc */

		return_ACPI_STATUS(status);
	}

	/* Have the mutex: update mutex and walk info and save the sync_level */

	obj_desc->mutex.owner_thread_id = acpi_os_get_thread_id();
	obj_desc->mutex.acquisition_depth = 1;
	obj_desc->mutex.original_sync_level =
	    walk_state->thread->current_sync_level;

	walk_state->thread->current_sync_level = obj_desc->mutex.sync_level;

	/* Link the mutex to the current thread for force-unlock at method exit */

	acpi_ex_link_mutex(obj_desc, walk_state->thread);
	return_ACPI_STATUS(AE_OK);
}
Beispiel #3
0
acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
{
	acpi_status status;
	acpi_thread_id this_thread_id;

	ACPI_FUNCTION_NAME(ut_acquire_mutex);

	if (mutex_id > ACPI_MAX_MUTEX) {
		return (AE_BAD_PARAMETER);
	}

	this_thread_id = acpi_os_get_thread_id();

#ifdef ACPI_MUTEX_DEBUG
	{
		u32 i;
		/*
		 * Mutex debug code, for internal debugging only.
		 *
		 * Deadlock prevention. Check if this thread owns any mutexes of value
		 * greater than or equal to this one. If so, the thread has violated
		 * the mutex ordering rule. This indicates a coding error somewhere in
		 * the ACPI subsystem code.
		 */
		for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) {
			if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) {
				if (i == mutex_id) {
					ACPI_ERROR((AE_INFO,
						    "Mutex [%s] already acquired by this thread [%u]",
						    acpi_ut_get_mutex_name
						    (mutex_id),
						    (u32)this_thread_id));

					return (AE_ALREADY_ACQUIRED);
				}

				ACPI_ERROR((AE_INFO,
					    "Invalid acquire order: Thread %u owns [%s], wants [%s]",
					    (u32)this_thread_id,
					    acpi_ut_get_mutex_name(i),
					    acpi_ut_get_mutex_name(mutex_id)));

				return (AE_ACQUIRE_DEADLOCK);
			}
		}
	}
#endif

	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
			  "Thread %u attempting to acquire Mutex [%s]\n",
			  (u32)this_thread_id,
			  acpi_ut_get_mutex_name(mutex_id)));

	status = acpi_os_acquire_mutex(acpi_gbl_mutex_info[mutex_id].mutex,
				       ACPI_WAIT_FOREVER);
	if (ACPI_SUCCESS(status)) {
		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
				  "Thread %u acquired Mutex [%s]\n",
				  (u32)this_thread_id,
				  acpi_ut_get_mutex_name(mutex_id)));

		acpi_gbl_mutex_info[mutex_id].use_count++;
		acpi_gbl_mutex_info[mutex_id].thread_id = this_thread_id;
	} else {
		ACPI_EXCEPTION((AE_INFO, status,
				"Thread %u could not acquire Mutex [0x%X]",
				(u32)this_thread_id, mutex_id));
	}

	return (status);
}
Beispiel #4
0
acpi_status
acpi_db_single_step(struct acpi_walk_state * walk_state,
		    union acpi_parse_object * op, u32 opcode_class)
{
	union acpi_parse_object *next;
	acpi_status status = AE_OK;
	u32 original_debug_level;
	union acpi_parse_object *display_op;
	union acpi_parse_object *parent_op;
	u32 aml_offset;

	ACPI_FUNCTION_ENTRY();

#ifndef ACPI_APPLICATION
	if (acpi_gbl_db_thread_id != acpi_os_get_thread_id()) {
		return (AE_OK);
	}
#endif

	/* Check the abort flag */

	if (acpi_gbl_abort_method) {
		acpi_gbl_abort_method = FALSE;
		return (AE_ABORT_METHOD);
	}

	aml_offset = (u32)ACPI_PTR_DIFF(op->common.aml,
					walk_state->parser_state.aml_start);

	/* Check for single-step breakpoint */

	if (walk_state->method_breakpoint &&
	    (walk_state->method_breakpoint <= aml_offset)) {

		/* Check if the breakpoint has been reached or passed */
		/* Hit the breakpoint, resume single step, reset breakpoint */

		acpi_os_printf("***Break*** at AML offset %X\n", aml_offset);
		acpi_gbl_cm_single_step = TRUE;
		acpi_gbl_step_to_next_call = FALSE;
		walk_state->method_breakpoint = 0;
	}

	/* Check for user breakpoint (Must be on exact Aml offset) */

	else if (walk_state->user_breakpoint &&
		 (walk_state->user_breakpoint == aml_offset)) {
		acpi_os_printf("***UserBreakpoint*** at AML offset %X\n",
			       aml_offset);
		acpi_gbl_cm_single_step = TRUE;
		acpi_gbl_step_to_next_call = FALSE;
		walk_state->method_breakpoint = 0;
	}

	/*
	 * Check if this is an opcode that we are interested in --
	 * namely, opcodes that have arguments
	 */
	if (op->common.aml_opcode == AML_INT_NAMEDFIELD_OP) {
		return (AE_OK);
	}

	switch (opcode_class) {
	case AML_CLASS_UNKNOWN:
	case AML_CLASS_ARGUMENT:	/* constants, literals, etc. do nothing */

		return (AE_OK);

	default:

		/* All other opcodes -- continue */
		break;
	}

	/*
	 * Under certain debug conditions, display this opcode and its operands
	 */
	if ((acpi_gbl_db_output_to_file) ||
	    (acpi_gbl_cm_single_step) || (acpi_dbg_level & ACPI_LV_PARSE)) {
		if ((acpi_gbl_db_output_to_file) ||
		    (acpi_dbg_level & ACPI_LV_PARSE)) {
			acpi_os_printf
			    ("\n[AmlDebug] Next AML Opcode to execute:\n");
		}

		/*
		 * Display this op (and only this op - zero out the NEXT field
		 * temporarily, and disable parser trace output for the duration of
		 * the display because we don't want the extraneous debug output)
		 */
		original_debug_level = acpi_dbg_level;
		acpi_dbg_level &= ~(ACPI_LV_PARSE | ACPI_LV_FUNCTIONS);
		next = op->common.next;
		op->common.next = NULL;

		display_op = op;
		parent_op = op->common.parent;
		if (parent_op) {
			if ((walk_state->control_state) &&
			    (walk_state->control_state->common.state ==
			     ACPI_CONTROL_PREDICATE_EXECUTING)) {
				/*
				 * We are executing the predicate of an IF or WHILE statement
				 * Search upwards for the containing IF or WHILE so that the
				 * entire predicate can be displayed.
				 */
				while (parent_op) {
					if ((parent_op->common.aml_opcode ==
					     AML_IF_OP)
					    || (parent_op->common.aml_opcode ==
						AML_WHILE_OP)) {
						display_op = parent_op;
						break;
					}
					parent_op = parent_op->common.parent;
				}
			} else {
				while (parent_op) {
					if ((parent_op->common.aml_opcode ==
					     AML_IF_OP)
					    || (parent_op->common.aml_opcode ==
						AML_ELSE_OP)
					    || (parent_op->common.aml_opcode ==
						AML_SCOPE_OP)
					    || (parent_op->common.aml_opcode ==
						AML_METHOD_OP)
					    || (parent_op->common.aml_opcode ==
						AML_WHILE_OP)) {
						break;
					}
					display_op = parent_op;
					parent_op = parent_op->common.parent;
				}
			}
		}

		/* Now we can display it */

#ifdef ACPI_DISASSEMBLER
		acpi_dm_disassemble(walk_state, display_op, ACPI_UINT32_MAX);
#endif

		if ((op->common.aml_opcode == AML_IF_OP) ||
		    (op->common.aml_opcode == AML_WHILE_OP)) {
			if (walk_state->control_state->common.value) {
				acpi_os_printf
				    ("Predicate = [True], IF block was executed\n");
			} else {
				acpi_os_printf
				    ("Predicate = [False], Skipping IF block\n");
			}
		} else if (op->common.aml_opcode == AML_ELSE_OP) {
			acpi_os_printf
			    ("Predicate = [False], ELSE block was executed\n");
		}

		/* Restore everything */

		op->common.next = next;
		acpi_os_printf("\n");
		if ((acpi_gbl_db_output_to_file) ||
		    (acpi_dbg_level & ACPI_LV_PARSE)) {
			acpi_os_printf("\n");
		}
		acpi_dbg_level = original_debug_level;
	}

	/* If we are not single stepping, just continue executing the method */

	if (!acpi_gbl_cm_single_step) {
		return (AE_OK);
	}

	/*
	 * If we are executing a step-to-call command,
	 * Check if this is a method call.
	 */
	if (acpi_gbl_step_to_next_call) {
		if (op->common.aml_opcode != AML_INT_METHODCALL_OP) {

			/* Not a method call, just keep executing */

			return (AE_OK);
		}

		/* Found a method call, stop executing */

		acpi_gbl_step_to_next_call = FALSE;
	}

	/*
	 * If the next opcode is a method call, we will "step over" it
	 * by default.
	 */
	if (op->common.aml_opcode == AML_INT_METHODCALL_OP) {

		/* Force no more single stepping while executing called method */

		acpi_gbl_cm_single_step = FALSE;

		/*
		 * Set the breakpoint on/before the call, it will stop execution
		 * as soon as we return
		 */
		walk_state->method_breakpoint = 1;	/* Must be non-zero! */
	}

	status = acpi_db_start_command(walk_state, op);

	/* User commands complete, continue execution of the interrupted method */

	return (status);
}
Beispiel #5
0
acpi_status acpi_initialize_debugger(void)
{
	acpi_status status;

	ACPI_FUNCTION_TRACE(acpi_initialize_debugger);

	/* Init globals */

	acpi_gbl_db_buffer = NULL;
	acpi_gbl_db_filename = NULL;
	acpi_gbl_db_output_to_file = FALSE;

	acpi_gbl_db_debug_level = ACPI_LV_VERBOSITY2;
	acpi_gbl_db_console_debug_level = ACPI_NORMAL_DEFAULT | ACPI_LV_TABLES;
	acpi_gbl_db_output_flags = ACPI_DB_CONSOLE_OUTPUT;

	acpi_gbl_db_opt_no_ini_methods = FALSE;

	acpi_gbl_db_buffer = acpi_os_allocate(ACPI_DEBUG_BUFFER_SIZE);
	if (!acpi_gbl_db_buffer) {
		return_ACPI_STATUS(AE_NO_MEMORY);
	}
	memset(acpi_gbl_db_buffer, 0, ACPI_DEBUG_BUFFER_SIZE);

	/* Initial scope is the root */

	acpi_gbl_db_scope_buf[0] = AML_ROOT_PREFIX;
	acpi_gbl_db_scope_buf[1] = 0;
	acpi_gbl_db_scope_node = acpi_gbl_root_node;

	/* Initialize user commands loop */

	acpi_gbl_db_terminate_loop = FALSE;

	/*
	 * If configured for multi-thread support, the debug executor runs in
	 * a separate thread so that the front end can be in another address
	 * space, environment, or even another machine.
	 */
	if (acpi_gbl_debugger_configuration & DEBUGGER_MULTI_THREADED) {

		/* These were created with one unit, grab it */

		status = acpi_os_acquire_mutex(acpi_gbl_db_command_complete,
					       ACPI_WAIT_FOREVER);
		if (ACPI_FAILURE(status)) {
			acpi_os_printf("Could not get debugger mutex\n");
			return_ACPI_STATUS(status);
		}

		status = acpi_os_acquire_mutex(acpi_gbl_db_command_ready,
					       ACPI_WAIT_FOREVER);
		if (ACPI_FAILURE(status)) {
			acpi_os_printf("Could not get debugger mutex\n");
			return_ACPI_STATUS(status);
		}

		/* Create the debug execution thread to execute commands */

		acpi_gbl_db_threads_terminated = FALSE;
		status = acpi_os_execute(OSL_DEBUGGER_MAIN_THREAD,
					 acpi_db_execute_thread, NULL);
		if (ACPI_FAILURE(status)) {
			ACPI_EXCEPTION((AE_INFO, status,
					"Could not start debugger thread"));
			acpi_gbl_db_threads_terminated = TRUE;
			return_ACPI_STATUS(status);
		}
	} else {
		acpi_gbl_db_thread_id = acpi_os_get_thread_id();
	}

	return_ACPI_STATUS(AE_OK);
}
Beispiel #6
0
acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
{
	acpi_thread_id this_thread_id;

	ACPI_FUNCTION_NAME(ut_release_mutex);

	this_thread_id = acpi_os_get_thread_id();
	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
			  "Thread %lX releasing Mutex [%s]\n",
			  (unsigned long) this_thread_id,
			  acpi_ut_get_mutex_name(mutex_id)));

	if (mutex_id > ACPI_MAX_MUTEX) {
		return (AE_BAD_PARAMETER);
	}

	/*
	 * Mutex must be acquired in order to release it!
	 */
	if (acpi_gbl_mutex_info[mutex_id].thread_id == ACPI_MUTEX_NOT_ACQUIRED) {
		ACPI_ERROR((AE_INFO,
			    "Mutex [%X] is not acquired, cannot release",
			    mutex_id));

		return (AE_NOT_ACQUIRED);
	}
#ifdef ACPI_MUTEX_DEBUG
	{
		u32 i;
		/*
		 * Mutex debug code, for internal debugging only.
		 *
		 * Deadlock prevention.  Check if this thread owns any mutexes of value
		 * greater than this one.  If so, the thread has violated the mutex
		 * ordering rule.  This indicates a coding error somewhere in
		 * the ACPI subsystem code.
		 */
		for (i = mutex_id; i < ACPI_MAX_MUTEX; i++) {
			if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) {
				if (i == mutex_id) {
					continue;
				}

				ACPI_ERROR((AE_INFO,
					    "Invalid release order: owns [%s], releasing [%s]",
					    acpi_ut_get_mutex_name(i),
					    acpi_ut_get_mutex_name(mutex_id)));

				return (AE_RELEASE_DEADLOCK);
			}
		}
	}
#endif

	/* Mark unlocked FIRST */

	acpi_gbl_mutex_info[mutex_id].thread_id = ACPI_MUTEX_NOT_ACQUIRED;

	acpi_os_release_mutex(acpi_gbl_mutex_info[mutex_id].mutex);
	return (AE_OK);
}
Beispiel #7
0
acpi_status
acpi_ut_release_mutex (
	acpi_mutex_handle               mutex_id)
{
	acpi_status                     status;
	u32                             i;
	u32                             this_thread_id;


	ACPI_FUNCTION_NAME ("ut_release_mutex");


	this_thread_id = acpi_os_get_thread_id ();
	ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX,
		"Thread %X releasing Mutex [%s]\n", this_thread_id,
		acpi_ut_get_mutex_name (mutex_id)));

	if (mutex_id > MAX_MUTEX) {
		return (AE_BAD_PARAMETER);
	}

	/*
	 * Mutex must be acquired in order to release it!
	 */
	if (acpi_gbl_mutex_info[mutex_id].owner_id == ACPI_MUTEX_NOT_ACQUIRED) {
		ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
			"Mutex [%s] is not acquired, cannot release\n",
			acpi_ut_get_mutex_name (mutex_id)));

		return (AE_NOT_ACQUIRED);
	}

	/*
	 * Deadlock prevention.  Check if this thread owns any mutexes of value
	 * greater than this one.  If so, the thread has violated the mutex
	 * ordering rule.  This indicates a coding error somewhere in
	 * the ACPI subsystem code.
	 */
	for (i = mutex_id; i < MAX_MUTEX; i++) {
		if (acpi_gbl_mutex_info[i].owner_id == this_thread_id) {
			if (i == mutex_id) {
				continue;
			}

			ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
				"Invalid release order: owns [%s], releasing [%s]\n",
				acpi_ut_get_mutex_name (i), acpi_ut_get_mutex_name (mutex_id)));

			return (AE_RELEASE_DEADLOCK);
		}
	}

	/* Mark unlocked FIRST */

	acpi_gbl_mutex_info[mutex_id].owner_id = ACPI_MUTEX_NOT_ACQUIRED;

	status = acpi_os_signal_semaphore (acpi_gbl_mutex_info[mutex_id].mutex, 1);

	if (ACPI_FAILURE (status)) {
		ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
			"Thread %X could not release Mutex [%s] %s\n",
			this_thread_id, acpi_ut_get_mutex_name (mutex_id),
			acpi_format_exception (status)));
	}
	else {
		ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Thread %X released Mutex [%s]\n",
			this_thread_id, acpi_ut_get_mutex_name (mutex_id)));
	}

	return (status);
}
Beispiel #8
0
acpi_status acpi_ev_acquire_global_lock(u16 timeout)
{
	acpi_status status = AE_OK;
	u8 acquired = FALSE;

	ACPI_FUNCTION_TRACE(ev_acquire_global_lock);

	/*
	 * Only one thread can acquire the GL at a time, the global_lock_mutex
	 * enforces this. This interface releases the interpreter if we must wait.
	 */
	status = acpi_ex_system_wait_mutex(
			acpi_gbl_global_lock_mutex->mutex.os_mutex, 0);
	if (status == AE_TIME) {
		if (acpi_ev_global_lock_thread_id == acpi_os_get_thread_id()) {
			acpi_ev_global_lock_acquired++;
			return AE_OK;
		}
	}

	if (ACPI_FAILURE(status)) {
		status = acpi_ex_system_wait_mutex(
				acpi_gbl_global_lock_mutex->mutex.os_mutex,
				timeout);
	}
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}

	acpi_ev_global_lock_thread_id = acpi_os_get_thread_id();
	acpi_ev_global_lock_acquired++;

	/*
	 * Update the global lock handle and check for wraparound. The handle is
	 * only used for the external global lock interfaces, but it is updated
	 * here to properly handle the case where a single thread may acquire the
	 * lock via both the AML and the acpi_acquire_global_lock interfaces. The
	 * handle is therefore updated on the first acquire from a given thread
	 * regardless of where the acquisition request originated.
	 */
	acpi_gbl_global_lock_handle++;
	if (acpi_gbl_global_lock_handle == 0) {
		acpi_gbl_global_lock_handle = 1;
	}

	/*
	 * Make sure that a global lock actually exists. If not, just treat the
	 * lock as a standard mutex.
	 */
	if (!acpi_gbl_global_lock_present) {
		acpi_gbl_global_lock_acquired = TRUE;
		return_ACPI_STATUS(AE_OK);
	}

	/* Attempt to acquire the actual hardware lock */

	ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
	if (acquired) {

		/* We got the lock */

		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
				  "Acquired hardware Global Lock\n"));

		acpi_gbl_global_lock_acquired = TRUE;
		return_ACPI_STATUS(AE_OK);
	}

	/*
	 * Did not get the lock. The pending bit was set above, and we must now
	 * wait until we get the global lock released interrupt.
	 */
	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Waiting for hardware Global Lock\n"));

	/*
	 * Wait for handshake with the global lock interrupt handler.
	 * This interface releases the interpreter if we must wait.
	 */
	status = acpi_ex_system_wait_semaphore(acpi_gbl_global_lock_semaphore,
					       ACPI_WAIT_FOREVER);

	return_ACPI_STATUS(status);
}
Beispiel #9
0
acpi_status acpi_ev_acquire_global_lock(u16 timeout)
{
	acpi_status status = AE_OK;
	u8 acquired = FALSE;

	ACPI_FUNCTION_TRACE(ev_acquire_global_lock);

	/*
	 * Only one thread can acquire the GL at a time, the global_lock_mutex
	 * enforces this. This interface releases the interpreter if we must wait.
	 */
	status = acpi_ex_system_wait_mutex(acpi_gbl_global_lock_mutex, 0);
	if (status == AE_TIME) {
		if (acpi_ev_global_lock_thread_id == acpi_os_get_thread_id()) {
			acpi_ev_global_lock_acquired++;
			return AE_OK;
		}
	}

	if (ACPI_FAILURE(status)) {
		status = acpi_ex_system_wait_mutex(acpi_gbl_global_lock_mutex, timeout);
	}
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}

	acpi_ev_global_lock_thread_id = acpi_os_get_thread_id();
	acpi_ev_global_lock_acquired++;

	/*
	 * Make sure that a global lock actually exists. If not, just treat
	 * the lock as a standard mutex.
	 */
	if (!acpi_gbl_global_lock_present) {
		acpi_gbl_global_lock_acquired = TRUE;
		return_ACPI_STATUS(AE_OK);
	}

	/* Attempt to acquire the actual hardware lock */

	ACPI_ACQUIRE_GLOBAL_LOCK(facs, acquired);
	if (acquired) {

		/* We got the lock */

		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
				  "Acquired hardware Global Lock\n"));

		acpi_gbl_global_lock_acquired = TRUE;
		return_ACPI_STATUS(AE_OK);
	}

	/*
	 * Did not get the lock. The pending bit was set above, and we must now
	 * wait until we get the global lock released interrupt.
	 */
	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Waiting for hardware Global Lock\n"));

	/*
	 * Wait for handshake with the global lock interrupt handler.
	 * This interface releases the interpreter if we must wait.
	 */
	status = acpi_ex_system_wait_semaphore(acpi_gbl_global_lock_semaphore,
					       ACPI_WAIT_FOREVER);

	return_ACPI_STATUS(status);
}
Beispiel #10
0
acpi_status
acpi_ut_acquire_mutex (
	ACPI_MUTEX_HANDLE       mutex_id)
{
	acpi_status             status;
	u32                     i;
	u32                     this_thread_id;


	PROC_NAME ("Ut_acquire_mutex");


	if (mutex_id > MAX_MTX) {
		return (AE_BAD_PARAMETER);
	}


	this_thread_id = acpi_os_get_thread_id ();

	/*
	 * Deadlock prevention.  Check if this thread owns any mutexes of value
	 * greater than or equal to this one.  If so, the thread has violated
	 * the mutex ordering rule.  This indicates a coding error somewhere in
	 * the ACPI subsystem code.
	 */
	for (i = mutex_id; i < MAX_MTX; i++) {
		if (acpi_gbl_acpi_mutex_info[i].owner_id == this_thread_id) {
			if (i == mutex_id) {
				ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
						"Mutex [%s] already acquired by this thread [%X]\n",
						acpi_ut_get_mutex_name (mutex_id), this_thread_id));

				return (AE_ALREADY_ACQUIRED);
			}

			ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
					"Invalid acquire order: Thread %X owns [%s], wants [%s]\n",
					this_thread_id, acpi_ut_get_mutex_name (i),
					acpi_ut_get_mutex_name (mutex_id)));

			return (AE_ACQUIRE_DEADLOCK);
		}
	}


	ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX,
			 "Thread %X attempting to acquire Mutex [%s]\n",
			 this_thread_id, acpi_ut_get_mutex_name (mutex_id)));

	status = acpi_os_wait_semaphore (acpi_gbl_acpi_mutex_info[mutex_id].mutex,
			   1, WAIT_FOREVER);

	if (ACPI_SUCCESS (status)) {
		ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Thread %X acquired Mutex [%s]\n",
				 this_thread_id, acpi_ut_get_mutex_name (mutex_id)));

		acpi_gbl_acpi_mutex_info[mutex_id].use_count++;
		acpi_gbl_acpi_mutex_info[mutex_id].owner_id = this_thread_id;
	}

	else {
		ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Thread %X could not acquire Mutex [%s] %s\n",
				 this_thread_id, acpi_ut_get_mutex_name (mutex_id),
				 acpi_format_exception (status)));
	}

	return (status);
}