Beispiel #1
0
acpi_status
acpi_ex_acquire_mutex_object(u16 timeout,
			     union acpi_operand_object *obj_desc,
			     acpi_thread_id thread_id)
{
	acpi_status status;

	ACPI_FUNCTION_TRACE_PTR(ex_acquire_mutex_object, obj_desc);

	if (!obj_desc) {
		return_ACPI_STATUS(AE_BAD_PARAMETER);
	}

	/* Support for multiple acquires by the owning thread */

	if (obj_desc->mutex.thread_id == thread_id) {
		/*
		 * The mutex is already owned by this thread, just increment the
		 * acquisition depth
		 */
		obj_desc->mutex.acquisition_depth++;
		return_ACPI_STATUS(AE_OK);
	}

	/* Acquire the mutex, wait if necessary. Special case for Global Lock */

	if (obj_desc == acpi_gbl_global_lock_mutex) {
		status = acpi_ev_acquire_global_lock(timeout);
	} else {
		status = acpi_ex_system_wait_mutex(obj_desc->mutex.os_mutex,
						   timeout);
	}

	if (ACPI_FAILURE(status)) {

		/* Includes failure from a timeout on time_desc */

		return_ACPI_STATUS(status);
	}

	/* Acquired the mutex: update mutex object */

	obj_desc->mutex.thread_id = thread_id;
	obj_desc->mutex.acquisition_depth = 1;
	obj_desc->mutex.original_sync_level = 0;
	obj_desc->mutex.owner_thread = NULL;	/* Used only for AML Acquire() */

	return_ACPI_STATUS(AE_OK);
}
Beispiel #2
0
acpi_status
acpi_ex_system_acquire_mutex(union acpi_operand_object * time_desc,
			     union acpi_operand_object * obj_desc)
{
	acpi_status status = AE_OK;

	ACPI_FUNCTION_TRACE_PTR(ex_system_acquire_mutex, obj_desc);

	if (!obj_desc) {
		return_ACPI_STATUS(AE_BAD_PARAMETER);
	}

	/* Support for the _GL_ Mutex object -- go get the global lock */

	if (obj_desc->mutex.os_mutex == ACPI_GLOBAL_LOCK) {
		status =
		    acpi_ev_acquire_global_lock((u16) time_desc->integer.value);
		return_ACPI_STATUS(status);
	}

	status = acpi_ex_system_wait_mutex(obj_desc->mutex.os_mutex,
					   (u16) time_desc->integer.value);
	return_ACPI_STATUS(status);
}
Beispiel #3
0
acpi_status
acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
			       union acpi_operand_object *obj_desc,
			       struct acpi_walk_state *walk_state)
{
	acpi_status status = AE_OK;

	ACPI_FUNCTION_TRACE_PTR(ds_begin_method_execution, method_node);

	if (!method_node) {
		return_ACPI_STATUS(AE_NULL_ENTRY);
	}

	/* Prevent wraparound of thread count */

	if (obj_desc->method.thread_count == ACPI_UINT8_MAX) {
		ACPI_ERROR((AE_INFO,
			    "Method reached maximum reentrancy limit (255)"));
		return_ACPI_STATUS(AE_AML_METHOD_LIMIT);
	}

	/*
	 * If this method is serialized, we need to acquire the method mutex.
	 */
	if (obj_desc->method.method_flags & AML_METHOD_SERIALIZED) {
		/*
		 * Create a mutex for the method if it is defined to be Serialized
		 * and a mutex has not already been created. We defer the mutex creation
		 * until a method is actually executed, to minimize the object count
		 */
		if (!obj_desc->method.mutex) {
			status = acpi_ds_create_method_mutex(obj_desc);
			if (ACPI_FAILURE(status)) {
				return_ACPI_STATUS(status);
			}
		}

		/*
		 * The current_sync_level (per-thread) must be less than or equal to
		 * the sync level of the method. This mechanism provides some
		 * deadlock prevention
		 *
		 * Top-level method invocation has no walk state at this point
		 */
		if (walk_state &&
		    (walk_state->thread->current_sync_level >
		     obj_desc->method.mutex->mutex.sync_level)) {
			ACPI_ERROR((AE_INFO,
				    "Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%d)",
				    acpi_ut_get_node_name(method_node),
				    walk_state->thread->current_sync_level));

			return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
		}

		/*
		 * Obtain the method mutex if necessary. Do not acquire mutex for a
		 * recursive call.
		 */
		if (acpi_os_get_thread_id() !=
		    obj_desc->method.mutex->mutex.owner_thread_id) {
			/*
			 * Acquire the method mutex. This releases the interpreter if we
			 * block (and reacquires it before it returns)
			 */
			status =
			    acpi_ex_system_wait_mutex(obj_desc->method.mutex->
						      mutex.os_mutex,
						      ACPI_WAIT_FOREVER);
			if (ACPI_FAILURE(status)) {
				return_ACPI_STATUS(status);
			}

			/* Update the mutex and walk info and save the original sync_level */
			obj_desc->method.mutex->mutex.owner_thread_id =
				acpi_os_get_thread_id();

			if (walk_state) {
				obj_desc->method.mutex->mutex.
				    original_sync_level =
				    walk_state->thread->current_sync_level;

				walk_state->thread->current_sync_level =
				    obj_desc->method.sync_level;
			} else {
				obj_desc->method.mutex->mutex.
				    original_sync_level =
				    obj_desc->method.mutex->mutex.sync_level;
			}
		}

		/* Always increase acquisition depth */

		obj_desc->method.mutex->mutex.acquisition_depth++;
	}

	/*
	 * Allocate an Owner ID for this method, only if this is the first thread
	 * to begin concurrent execution. We only need one owner_id, even if the
	 * method is invoked recursively.
	 */
	if (!obj_desc->method.owner_id) {
		status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id);
		if (ACPI_FAILURE(status)) {
			goto cleanup;
		}
	}

	/*
	 * Increment the method parse tree thread count since it has been
	 * reentered one more time (even if it is the same thread)
	 */
	obj_desc->method.thread_count++;
	return_ACPI_STATUS(status);

      cleanup:
	/* On error, must release the method mutex (if present) */

	if (obj_desc->method.mutex) {
		acpi_os_release_mutex(obj_desc->method.mutex->mutex.os_mutex);
	}
	return_ACPI_STATUS(status);
}
acpi_status acpi_ev_acquire_global_lock(u16 timeout)
{
	acpi_cpu_flags flags;
	acpi_status status;
	u8 acquired = FALSE;

	ACPI_FUNCTION_TRACE(ev_acquire_global_lock);

	/*
	 * Only one thread can acquire the GL at a time, the global_lock_mutex
	 * enforces this. This interface releases the interpreter if we must wait.
	 */
	status =
	    acpi_ex_system_wait_mutex(acpi_gbl_global_lock_mutex->mutex.
				      os_mutex, timeout);
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}

	/*
	 * Update the global lock handle and check for wraparound. The handle is
	 * only used for the external global lock interfaces, but it is updated
	 * here to properly handle the case where a single thread may acquire the
	 * lock via both the AML and the acpi_acquire_global_lock interfaces. The
	 * handle is therefore updated on the first acquire from a given thread
	 * regardless of where the acquisition request originated.
	 */
	acpi_gbl_global_lock_handle++;
	if (acpi_gbl_global_lock_handle == 0) {
		acpi_gbl_global_lock_handle = 1;
	}

	/*
	 * Make sure that a global lock actually exists. If not, just
	 * treat the lock as a standard mutex.
	 */
	if (!acpi_gbl_global_lock_present) {
		acpi_gbl_global_lock_acquired = TRUE;
		return_ACPI_STATUS(AE_OK);
	}

	flags = acpi_os_acquire_lock(acpi_gbl_global_lock_pending_lock);

	do {

		/* Attempt to acquire the actual hardware lock */

		ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
		if (acquired) {
			acpi_gbl_global_lock_acquired = TRUE;
			ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
					  "Acquired hardware Global Lock\n"));
			break;
		}

		/*
		 * Did not get the lock. The pending bit was set above, and
		 * we must now wait until we receive the global lock
		 * released interrupt.
		 */
		acpi_gbl_global_lock_pending = TRUE;
		acpi_os_release_lock(acpi_gbl_global_lock_pending_lock, flags);

		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
				  "Waiting for hardware Global Lock\n"));

		/*
		 * Wait for handshake with the global lock interrupt handler.
		 * This interface releases the interpreter if we must wait.
		 */
		status =
		    acpi_ex_system_wait_semaphore
		    (acpi_gbl_global_lock_semaphore, ACPI_WAIT_FOREVER);

		flags = acpi_os_acquire_lock(acpi_gbl_global_lock_pending_lock);

	} while (ACPI_SUCCESS(status));

	acpi_gbl_global_lock_pending = FALSE;
	acpi_os_release_lock(acpi_gbl_global_lock_pending_lock, flags);

	return_ACPI_STATUS(status);
}
Beispiel #5
0
acpi_status
acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
		      union acpi_operand_object *obj_desc,
		      struct acpi_walk_state *walk_state)
{
	acpi_status status;

	ACPI_FUNCTION_TRACE_PTR(ex_acquire_mutex, obj_desc);

	if (!obj_desc) {
		return_ACPI_STATUS(AE_BAD_PARAMETER);
	}

	/* Sanity check: we must have a valid thread ID */

	if (!walk_state->thread) {
		ACPI_ERROR((AE_INFO,
			    "Cannot acquire Mutex [%4.4s], null thread info",
			    acpi_ut_get_node_name(obj_desc->mutex.node)));
		return_ACPI_STATUS(AE_AML_INTERNAL);
	}

	/*
	 * Current Sync must be less than or equal to the sync level of the
	 * mutex. This mechanism provides some deadlock prevention
	 */
	if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) {
		ACPI_ERROR((AE_INFO,
			    "Cannot acquire Mutex [%4.4s], current SyncLevel is too large (%d)",
			    acpi_ut_get_node_name(obj_desc->mutex.node),
			    walk_state->thread->current_sync_level));
		return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
	}

	/* Support for multiple acquires by the owning thread */

	if (obj_desc->mutex.owner_thread_id == acpi_os_get_thread_id()) {
		/*
		 * The mutex is already owned by this thread, just increment the
		 * acquisition depth
		 */
		obj_desc->mutex.acquisition_depth++;
		return_ACPI_STATUS(AE_OK);
	}

	/* Acquire the mutex, wait if necessary. Special case for Global Lock */

	if (obj_desc->mutex.os_mutex == acpi_gbl_global_lock_mutex) {
		status =
		    acpi_ev_acquire_global_lock((u16) time_desc->integer.value);
	} else {
		status = acpi_ex_system_wait_mutex(obj_desc->mutex.os_mutex,
						   (u16) time_desc->integer.
						   value);
	}

	if (ACPI_FAILURE(status)) {

		/* Includes failure from a timeout on time_desc */

		return_ACPI_STATUS(status);
	}

	/* Have the mutex: update mutex and walk info and save the sync_level */

	obj_desc->mutex.owner_thread_id = acpi_os_get_thread_id();
	obj_desc->mutex.acquisition_depth = 1;
	obj_desc->mutex.original_sync_level =
	    walk_state->thread->current_sync_level;

	walk_state->thread->current_sync_level = obj_desc->mutex.sync_level;

	/* Link the mutex to the current thread for force-unlock at method exit */

	acpi_ex_link_mutex(obj_desc, walk_state->thread);
	return_ACPI_STATUS(AE_OK);
}
Beispiel #6
0
acpi_status acpi_ev_acquire_global_lock(u16 timeout)
{
	acpi_status status = AE_OK;
	u8 acquired = FALSE;

	ACPI_FUNCTION_TRACE(ev_acquire_global_lock);

	/*
	 * Only one thread can acquire the GL at a time, the global_lock_mutex
	 * enforces this. This interface releases the interpreter if we must wait.
	 */
	status = acpi_ex_system_wait_mutex(acpi_gbl_global_lock_mutex, 0);
	if (status == AE_TIME) {
		if (acpi_ev_global_lock_thread_id == acpi_os_get_thread_id()) {
			acpi_ev_global_lock_acquired++;
			return AE_OK;
		}
	}

	if (ACPI_FAILURE(status)) {
		status = acpi_ex_system_wait_mutex(acpi_gbl_global_lock_mutex, timeout);
	}
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}

	acpi_ev_global_lock_thread_id = acpi_os_get_thread_id();
	acpi_ev_global_lock_acquired++;

	/*
	 * Make sure that a global lock actually exists. If not, just treat
	 * the lock as a standard mutex.
	 */
	if (!acpi_gbl_global_lock_present) {
		acpi_gbl_global_lock_acquired = TRUE;
		return_ACPI_STATUS(AE_OK);
	}

	/* Attempt to acquire the actual hardware lock */

	ACPI_ACQUIRE_GLOBAL_LOCK(facs, acquired);
	if (acquired) {

		/* We got the lock */

		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
				  "Acquired hardware Global Lock\n"));

		acpi_gbl_global_lock_acquired = TRUE;
		return_ACPI_STATUS(AE_OK);
	}

	/*
	 * Did not get the lock. The pending bit was set above, and we must now
	 * wait until we get the global lock released interrupt.
	 */
	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Waiting for hardware Global Lock\n"));

	/*
	 * Wait for handshake with the global lock interrupt handler.
	 * This interface releases the interpreter if we must wait.
	 */
	status = acpi_ex_system_wait_semaphore(acpi_gbl_global_lock_semaphore,
					       ACPI_WAIT_FOREVER);

	return_ACPI_STATUS(status);
}