acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle) { acpi_status status; if (!handle) { return (AE_BAD_PARAMETER); } acpi_ex_enter_interpreter(); status = acpi_ex_acquire_mutex_object(timeout, acpi_gbl_global_lock_mutex, acpi_os_get_thread_id()); if (ACPI_SUCCESS(status)) { *handle = acpi_gbl_global_lock_handle; } acpi_ex_exit_interpreter(); return (status); }
acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout) { acpi_status status; ACPI_FUNCTION_TRACE(ex_system_wait_mutex); status = acpi_os_acquire_mutex(mutex, ACPI_DO_NOT_WAIT); if (ACPI_SUCCESS(status)) { return_ACPI_STATUS(status); } if (status == AE_TIME) { /* We must wait, so unlock the interpreter */ acpi_ex_exit_interpreter(); status = acpi_os_acquire_mutex(mutex, timeout); ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "*** Thread awake after blocking, %s\n", acpi_format_exception(status))); /* Reacquire the interpreter */ acpi_ex_enter_interpreter(); } return_ACPI_STATUS(status); }
acpi_status acpi_acquire_global_lock ( u16 timeout, u32 *handle) { acpi_status status; if (!handle) { return (AE_BAD_PARAMETER); } status = acpi_ex_enter_interpreter (); if (ACPI_FAILURE (status)) { return (status); } status = acpi_ev_acquire_global_lock (timeout); acpi_ex_exit_interpreter (); if (ACPI_SUCCESS (status)) { acpi_gbl_global_lock_handle++; *handle = acpi_gbl_global_lock_handle; } return (status); }
/******************************************************************************* * * FUNCTION: acpi_acquire_global_lock * * PARAMETERS: Timeout - How long the caller is willing to wait * Handle - Where the handle to the lock is returned * (if acquired) * * RETURN: Status * * DESCRIPTION: Acquire the ACPI Global Lock * * Note: Allows callers with the same thread ID to acquire the global lock * multiple times. In other words, externally, the behavior of the global lock * is identical to an AML mutex. On the first acquire, a new handle is * returned. On any subsequent calls to acquire by the same thread, the same * handle is returned. * ******************************************************************************/ acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle) { acpi_status status; if (!handle) { return (AE_BAD_PARAMETER); } /* Must lock interpreter to prevent race conditions */ acpi_ex_enter_interpreter(); status = acpi_ex_acquire_mutex_object(timeout, acpi_gbl_global_lock_mutex, acpi_os_get_thread_id()); if (ACPI_SUCCESS(status)) { /* Return the global lock handle (updated in acpi_ev_acquire_global_lock) */ *handle = acpi_gbl_global_lock_handle; } acpi_ex_exit_interpreter(); return (status); }
acpi_status acpi_ds_auto_serialize_method(struct acpi_namespace_node *node, union acpi_operand_object *obj_desc) { acpi_status status; union acpi_parse_object *op = NULL; struct acpi_walk_state *walk_state; ACPI_FUNCTION_TRACE_PTR(ds_auto_serialize_method, node); ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Method auto-serialization parse [%4.4s] %p\n", acpi_ut_get_node_name(node), node)); acpi_ex_enter_interpreter(); /* Create/Init a root op for the method parse tree */ op = acpi_ps_alloc_op(AML_METHOD_OP, obj_desc->method.aml_start); if (!op) { status = AE_NO_MEMORY; goto unlock; } acpi_ps_set_name(op, node->name.integer); op->common.node = node; /* Create and initialize a new walk state */ walk_state = acpi_ds_create_walk_state(node->owner_id, NULL, NULL, NULL); if (!walk_state) { acpi_ps_free_op(op); status = AE_NO_MEMORY; goto unlock; } status = acpi_ds_init_aml_walk(walk_state, op, node, obj_desc->method.aml_start, obj_desc->method.aml_length, NULL, 0); if (ACPI_FAILURE(status)) { acpi_ds_delete_walk_state(walk_state); acpi_ps_free_op(op); return_ACPI_STATUS(status); } walk_state->descending_callback = acpi_ds_detect_named_opcodes; /* Parse the method, scan for creation of named objects */ status = acpi_ps_parse_aml(walk_state); acpi_ps_delete_parse_tree(op); unlock: acpi_ex_exit_interpreter(); return_ACPI_STATUS(status); }
acpi_status acpi_ns_execute_control_method ( acpi_namespace_node *method_node, acpi_operand_object **params, acpi_operand_object **return_obj_desc) { acpi_status status; acpi_operand_object *obj_desc; FUNCTION_TRACE ("Ns_execute_control_method"); /* Verify that there is a method associated with this object */ obj_desc = acpi_ns_get_attached_object (method_node); if (!obj_desc) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "No attached method object\n")); acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); return_ACPI_STATUS (AE_NULL_OBJECT); } ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Control method at Offset %p Length %x]\n", obj_desc->method.aml_start + 1, obj_desc->method.aml_length - 1)); DUMP_PATHNAME (method_node, "Ns_execute_control_method: Executing", ACPI_LV_NAMES, _COMPONENT); ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "At offset %p\n", obj_desc->method.aml_start + 1)); /* * Unlock the namespace before execution. This allows namespace access * via the external Acpi* interfaces while a method is being executed. * However, any namespace deletion must acquire both the namespace and * interpreter locks to ensure that no thread is using the portion of the * namespace that is being deleted. */ acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); /* * Execute the method via the interpreter. The interpreter is locked * here before calling into the AML parser */ status = acpi_ex_enter_interpreter (); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } status = acpi_psx_execute (method_node, params, return_obj_desc); acpi_ex_exit_interpreter (); return_ACPI_STATUS (status); }
acpi_status acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state) { u32 aml_offset; acpi_name name = 0; ACPI_FUNCTION_ENTRY(); /* Ignore AE_OK and control exception codes */ if (ACPI_SUCCESS(status) || (status & AE_CODE_CONTROL)) { return (status); } /* Invoke the global exception handler */ if (acpi_gbl_exception_handler) { /* Exit the interpreter, allow handler to execute methods */ acpi_ex_exit_interpreter(); /* * Handler can map the exception code to anything it wants, including * AE_OK, in which case the executing method will not be aborted. */ aml_offset = (u32)ACPI_PTR_DIFF(walk_state->aml, walk_state->parser_state. aml_start); if (walk_state->method_node) { name = walk_state->method_node->name.integer; } else if (walk_state->deferred_node) { name = walk_state->deferred_node->name.integer; } status = acpi_gbl_exception_handler(status, name, walk_state->opcode, aml_offset, NULL); acpi_ex_enter_interpreter(); } acpi_ds_clear_implicit_return(walk_state); if (ACPI_FAILURE(status)) { acpi_ds_dump_method_stack(status, walk_state, walk_state->op); /* Display method locals/args if debugger is present */ #ifdef ACPI_DEBUGGER acpi_db_dump_method_info(status, walk_state); #endif } return (status); }
acpi_status acpi_ds_method_error(acpi_status status, struct acpi_walk_state * walk_state) { ACPI_FUNCTION_ENTRY(); /* Ignore AE_OK and control exception codes */ if (ACPI_SUCCESS(status) || (status & AE_CODE_CONTROL)) { return (status); } /* Invoke the global exception handler */ if (acpi_gbl_exception_handler) { /* Exit the interpreter, allow handler to execute methods */ acpi_ex_exit_interpreter(); /* * Handler can map the exception code to anything it wants, including * AE_OK, in which case the executing method will not be aborted. */ status = acpi_gbl_exception_handler(status, walk_state->method_node ? walk_state->method_node-> name.integer : 0, walk_state->opcode, walk_state->aml_offset, NULL); acpi_ex_enter_interpreter(); } acpi_ds_clear_implicit_return(walk_state); #ifdef ACPI_DISASSEMBLER if (ACPI_FAILURE(status)) { /* Display method locals/args if disassembler is present */ acpi_dm_dump_method_info(status, walk_state, walk_state->op); } #endif return (status); }
acpi_status acpi_ex_system_do_suspend(acpi_integer how_long) { acpi_status status; ACPI_FUNCTION_ENTRY(); /* Since this thread will sleep, we must release the interpreter */ acpi_ex_exit_interpreter(); acpi_os_sleep(how_long); /* And now we must get the interpreter again */ status = acpi_ex_enter_interpreter(); return (status); }
/******************************************************************************* * * FUNCTION: acpi_acquire_global_lock * * PARAMETERS: Timeout - How long the caller is willing to wait * Handle - Where the handle to the lock is returned * (if acquired) * * RETURN: Status * * DESCRIPTION: Acquire the ACPI Global Lock * ******************************************************************************/ acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle) { acpi_status status; if (!handle) { return (AE_BAD_PARAMETER); } /* Must lock interpreter to prevent race conditions */ acpi_ex_enter_interpreter(); status = acpi_ev_acquire_global_lock(timeout); acpi_ex_exit_interpreter(); if (ACPI_SUCCESS(status)) { acpi_gbl_global_lock_handle++; *handle = acpi_gbl_global_lock_handle; } return (status); }
acpi_status acpi_acquire_global_lock ( void) { acpi_status status; status = acpi_ex_enter_interpreter (); if (ACPI_FAILURE (status)) { return (status); } /* * TBD: [Restructure] add timeout param to internal interface, and * perhaps INTERPRETER_LOCKED */ status = acpi_ev_acquire_global_lock (); acpi_ex_exit_interpreter (); return (status); }
acpi_status acpi_ex_system_wait_semaphore ( acpi_handle semaphore, u16 timeout) { acpi_status status; acpi_status status2; ACPI_FUNCTION_TRACE ("ex_system_wait_semaphore"); status = acpi_os_wait_semaphore (semaphore, 1, 0); if (ACPI_SUCCESS (status)) { return_ACPI_STATUS (status); } if (status == AE_TIME) { /* We must wait, so unlock the interpreter */ acpi_ex_exit_interpreter (); status = acpi_os_wait_semaphore (semaphore, 1, timeout); ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "*** Thread awake after blocking, %s\n", acpi_format_exception (status))); /* Reacquire the interpreter */ status2 = acpi_ex_enter_interpreter (); if (ACPI_FAILURE (status2)) { /* Report fatal error, could not acquire interpreter */ return_ACPI_STATUS (status2); } } return_ACPI_STATUS (status); }
acpi_status acpi_ex_system_do_sleep(u64 how_long) { ACPI_FUNCTION_ENTRY(); /* Since this thread will sleep, we must release the interpreter */ acpi_ex_exit_interpreter(); /* * For compatibility with other ACPI implementations and to prevent * accidental deep sleeps, limit the sleep time to something reasonable. */ if (how_long > ACPI_MAX_SLEEP) { how_long = ACPI_MAX_SLEEP; } acpi_os_sleep(how_long); /* And now we must get the interpreter again */ acpi_ex_enter_interpreter(); return (AE_OK); }
/******************************************************************************* * * FUNCTION: acpi_evaluate_object * * PARAMETERS: handle - Object handle (optional) * pathname - Object pathname (optional) * external_params - List of parameters to pass to method, * terminated by NULL. May be NULL * if no parameters are being passed. * return_buffer - Where to put method's return value (if * any). If NULL, no value is returned. * * RETURN: Status * * DESCRIPTION: Find and evaluate the given object, passing the given * parameters if necessary. One of "Handle" or "Pathname" must * be valid (non-null) * ******************************************************************************/ acpi_status acpi_evaluate_object(acpi_handle handle, acpi_string pathname, struct acpi_object_list *external_params, struct acpi_buffer *return_buffer) { acpi_status status; struct acpi_evaluate_info *info; acpi_size buffer_space_needed; u32 i; ACPI_FUNCTION_TRACE(acpi_evaluate_object); /* Allocate and initialize the evaluation information block */ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); if (!info) { return_ACPI_STATUS(AE_NO_MEMORY); } info->pathname = pathname; /* Convert and validate the device handle */ info->prefix_node = acpi_ns_validate_handle(handle); if (!info->prefix_node) { status = AE_BAD_PARAMETER; goto cleanup; } /* * If there are parameters to be passed to a control method, the external * objects must all be converted to internal objects */ if (external_params && external_params->count) { /* * Allocate a new parameter block for the internal objects * Add 1 to count to allow for null terminated internal list */ info->parameters = ACPI_ALLOCATE_ZEROED(((acpi_size) external_params-> count + 1) * sizeof(void *)); if (!info->parameters) { status = AE_NO_MEMORY; goto cleanup; } /* Convert each external object in the list to an internal object */ for (i = 0; i < external_params->count; i++) { status = acpi_ut_copy_eobject_to_iobject(&external_params-> pointer[i], &info-> parameters[i]); if (ACPI_FAILURE(status)) { goto cleanup; } } info->parameters[external_params->count] = NULL; } /* * Three major cases: * 1) Fully qualified pathname * 2) No handle, not fully qualified pathname (error) * 3) Valid handle */ if ((pathname) && (ACPI_IS_ROOT_PREFIX(pathname[0]))) { /* The path is fully qualified, just evaluate by name */ info->prefix_node = NULL; status = acpi_ns_evaluate(info); } else if (!handle) { /* * A handle is optional iff a fully qualified pathname is specified. * Since we've already handled fully qualified names above, this is * an error */ if (!pathname) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Both Handle and Pathname are NULL")); } else { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Null Handle with relative pathname [%s]", pathname)); } status = AE_BAD_PARAMETER; } else { /* We have a namespace a node and a possible relative path */ status = acpi_ns_evaluate(info); } /* * If we are expecting a return value, and all went well above, * copy the return value to an external object. */ if (return_buffer) { if (!info->return_object) { return_buffer->length = 0; } else { if (ACPI_GET_DESCRIPTOR_TYPE(info->return_object) == ACPI_DESC_TYPE_NAMED) { /* * If we received a NS Node as a return object, this means that * the object we are evaluating has nothing interesting to * return (such as a mutex, etc.) We return an error because * these types are essentially unsupported by this interface. * We don't check up front because this makes it easier to add * support for various types at a later date if necessary. */ status = AE_TYPE; info->return_object = NULL; /* No need to delete a NS Node */ return_buffer->length = 0; } if (ACPI_SUCCESS(status)) { /* Dereference Index and ref_of references */ acpi_ns_resolve_references(info); /* Get the size of the returned object */ status = acpi_ut_get_object_size(info->return_object, &buffer_space_needed); if (ACPI_SUCCESS(status)) { /* Validate/Allocate/Clear caller buffer */ status = acpi_ut_initialize_buffer (return_buffer, buffer_space_needed); if (ACPI_FAILURE(status)) { /* * Caller's buffer is too small or a new one can't * be allocated */ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Needed buffer size %X, %s\n", (u32) buffer_space_needed, acpi_format_exception (status))); } else { /* We have enough space for the object, build it */ status = acpi_ut_copy_iobject_to_eobject (info->return_object, return_buffer); } } } } } if (info->return_object) { /* * Delete the internal return object. NOTE: Interpreter must be * locked to avoid race condition. */ acpi_ex_enter_interpreter(); /* Remove one reference on the return object (should delete it) */ acpi_ut_remove_reference(info->return_object); acpi_ex_exit_interpreter(); } cleanup: /* Free the input parameter list (if we created one) */ if (info->parameters) { /* Free the allocated parameter block */ acpi_ut_delete_internal_object_list(info->parameters); } ACPI_FREE(info); return_ACPI_STATUS(status); }
acpi_status acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node) { acpi_status status; ACPI_FUNCTION_TRACE(ns_load_table); /* If table already loaded into namespace, just return */ if (acpi_tb_is_table_loaded(table_index)) { status = AE_ALREADY_EXISTS; goto unlock; } ACPI_DEBUG_PRINT((ACPI_DB_INFO, "**** Loading table into namespace ****\n")); status = acpi_tb_allocate_owner_id(table_index); if (ACPI_FAILURE(status)) { goto unlock; } /* * Parse the table and load the namespace with all named * objects found within. Control methods are NOT parsed * at this time. In fact, the control methods cannot be * parsed until the entire namespace is loaded, because * if a control method makes a forward reference (call) * to another control method, we can't continue parsing * because we don't know how many arguments to parse next! */ status = acpi_ns_parse_table(table_index, node); if (ACPI_SUCCESS(status)) { acpi_tb_set_table_loaded_flag(table_index, TRUE); } else { /* * On error, delete any namespace objects created by this table. * We cannot initialize these objects, so delete them. There are * a couple of especially bad cases: * AE_ALREADY_EXISTS - namespace collision. * AE_NOT_FOUND - the target of a Scope operator does not * exist. This target of Scope must already exist in the * namespace, as per the ACPI specification. */ acpi_ns_delete_namespace_by_owner(acpi_gbl_root_table_list. tables[table_index].owner_id); acpi_tb_release_owner_id(table_index); return_ACPI_STATUS(status); } unlock: if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* * Now we can parse the control methods. We always parse * them here for a sanity check, and if configured for * just-in-time parsing, we delete the control method * parse trees. */ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "**** Begin Table Object Initialization\n")); acpi_ex_enter_interpreter(); status = acpi_ds_initialize_objects(table_index, node); acpi_ex_exit_interpreter(); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "**** Completed Table Object Initialization\n")); /* * This case handles the legacy option that groups all module-level * code blocks together and defers execution until all of the tables * are loaded. Execute all of these blocks at this time. * Execute any module-level code that was detected during the table * load phase. * * Note: this option is deprecated and will be eliminated in the * future. Use of this option can cause problems with AML code that * depends upon in-order immediate execution of module-level code. */ acpi_ns_exec_module_code_list(); return_ACPI_STATUS(status); }
/******************************************************************************* * * FUNCTION: acpi_evaluate_object * * PARAMETERS: handle - Object handle (optional) * pathname - Object pathname (optional) * external_params - List of parameters to pass to method, * terminated by NULL. May be NULL * if no parameters are being passed. * return_buffer - Where to put method's return value (if * any). If NULL, no value is returned. * * RETURN: Status * * DESCRIPTION: Find and evaluate the given object, passing the given * parameters if necessary. One of "Handle" or "Pathname" must * be valid (non-null) * ******************************************************************************/ acpi_status acpi_evaluate_object(acpi_handle handle, acpi_string pathname, struct acpi_object_list *external_params, struct acpi_buffer *return_buffer) { acpi_status status; struct acpi_evaluate_info *info; acpi_size buffer_space_needed; u32 i; ACPI_FUNCTION_TRACE(acpi_evaluate_object); /* Allocate and initialize the evaluation information block */ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); if (!info) { return_ACPI_STATUS(AE_NO_MEMORY); } /* Convert and validate the device handle */ info->prefix_node = acpi_ns_validate_handle(handle); if (!info->prefix_node) { status = AE_BAD_PARAMETER; goto cleanup; } /* * Get the actual namespace node for the target object. * Handles these cases: * * 1) Null node, valid pathname from root (absolute path) * 2) Node and valid pathname (path relative to Node) * 3) Node, Null pathname */ if ((pathname) && (ACPI_IS_ROOT_PREFIX(pathname[0]))) { /* The path is fully qualified, just evaluate by name */ info->prefix_node = NULL; } else if (!handle) { /* * A handle is optional iff a fully qualified pathname is specified. * Since we've already handled fully qualified names above, this is * an error. */ if (!pathname) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Both Handle and Pathname are NULL")); } else { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Null Handle with relative pathname [%s]", pathname)); } status = AE_BAD_PARAMETER; goto cleanup; } info->relative_pathname = pathname; /* * Convert all external objects passed as arguments to the * internal version(s). */ if (external_params && external_params->count) { info->param_count = (u16)external_params->count; /* Warn on impossible argument count */ if (info->param_count > ACPI_METHOD_NUM_ARGS) { ACPI_WARN_PREDEFINED((AE_INFO, pathname, ACPI_WARN_ALWAYS, "Excess arguments (%u) - using only %u", info->param_count, ACPI_METHOD_NUM_ARGS)); info->param_count = ACPI_METHOD_NUM_ARGS; } /* * Allocate a new parameter block for the internal objects * Add 1 to count to allow for null terminated internal list */ info->parameters = ACPI_ALLOCATE_ZEROED(((acpi_size)info-> param_count + 1) * sizeof(void *)); if (!info->parameters) { status = AE_NO_MEMORY; goto cleanup; } /* Convert each external object in the list to an internal object */ for (i = 0; i < info->param_count; i++) { status = acpi_ut_copy_eobject_to_iobject(&external_params-> pointer[i], &info-> parameters[i]); if (ACPI_FAILURE(status)) { goto cleanup; } } info->parameters[info->param_count] = NULL; } #ifdef _FUTURE_FEATURE /* * Begin incoming argument count analysis. Check for too few args * and too many args. */ switch (acpi_ns_get_type(info->node)) { case ACPI_TYPE_METHOD: /* Check incoming argument count against the method definition */ if (info->obj_desc->method.param_count > info->param_count) { ACPI_ERROR((AE_INFO, "Insufficient arguments (%u) - %u are required", info->param_count, info->obj_desc->method.param_count)); status = AE_MISSING_ARGUMENTS; goto cleanup; } else if (info->obj_desc->method.param_count < info->param_count) { ACPI_WARNING((AE_INFO, "Excess arguments (%u) - only %u are required", info->param_count, info->obj_desc->method.param_count)); /* Just pass the required number of arguments */ info->param_count = info->obj_desc->method.param_count; } /* * Any incoming external objects to be passed as arguments to the * method must be converted to internal objects */ if (info->param_count) { /* * Allocate a new parameter block for the internal objects * Add 1 to count to allow for null terminated internal list */ info->parameters = ACPI_ALLOCATE_ZEROED(((acpi_size) info-> param_count + 1) * sizeof(void *)); if (!info->parameters) { status = AE_NO_MEMORY; goto cleanup; } /* Convert each external object in the list to an internal object */ for (i = 0; i < info->param_count; i++) { status = acpi_ut_copy_eobject_to_iobject (&external_params->pointer[i], &info->parameters[i]); if (ACPI_FAILURE(status)) { goto cleanup; } } info->parameters[info->param_count] = NULL; } break; default: /* Warn if arguments passed to an object that is not a method */ if (info->param_count) { ACPI_WARNING((AE_INFO, "%u arguments were passed to a non-method ACPI object", info->param_count)); } break; } #endif /* Now we can evaluate the object */ status = acpi_ns_evaluate(info); /* * If we are expecting a return value, and all went well above, * copy the return value to an external object. */ if (!return_buffer) { goto cleanup_return_object; } if (!info->return_object) { return_buffer->length = 0; goto cleanup; } if (ACPI_GET_DESCRIPTOR_TYPE(info->return_object) == ACPI_DESC_TYPE_NAMED) { /* * If we received a NS Node as a return object, this means that * the object we are evaluating has nothing interesting to * return (such as a mutex, etc.) We return an error because * these types are essentially unsupported by this interface. * We don't check up front because this makes it easier to add * support for various types at a later date if necessary. */ status = AE_TYPE; info->return_object = NULL; /* No need to delete a NS Node */ return_buffer->length = 0; } if (ACPI_FAILURE(status)) { goto cleanup_return_object; } /* Dereference Index and ref_of references */ acpi_ns_resolve_references(info); /* Get the size of the returned object */ status = acpi_ut_get_object_size(info->return_object, &buffer_space_needed); if (ACPI_SUCCESS(status)) { /* Validate/Allocate/Clear caller buffer */ status = acpi_ut_initialize_buffer(return_buffer, buffer_space_needed); if (ACPI_FAILURE(status)) { /* * Caller's buffer is too small or a new one can't * be allocated */ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Needed buffer size %X, %s\n", (u32)buffer_space_needed, acpi_format_exception(status))); } else { /* We have enough space for the object, build it */ status = acpi_ut_copy_iobject_to_eobject(info->return_object, return_buffer); } } cleanup_return_object: if (info->return_object) { /* * Delete the internal return object. NOTE: Interpreter must be * locked to avoid race condition. */ acpi_ex_enter_interpreter(); /* Remove one reference on the return object (should delete it) */ acpi_ut_remove_reference(info->return_object); acpi_ex_exit_interpreter(); } cleanup: /* Free the input parameter list (if we created one) */ if (info->parameters) { /* Free the allocated parameter block */ acpi_ut_delete_internal_object_list(info->parameters); } ACPI_FREE(info); return_ACPI_STATUS(status); }
acpi_status acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, union acpi_operand_object *field_obj, u32 function, u32 region_offset, u32 bit_width, u64 *value) { acpi_status status; acpi_adr_space_handler handler; acpi_adr_space_setup region_setup; union acpi_operand_object *handler_desc; union acpi_operand_object *region_obj2; void *region_context = NULL; struct acpi_connection_info *context; acpi_physical_address address; ACPI_FUNCTION_TRACE(ev_address_space_dispatch); region_obj2 = acpi_ns_get_secondary_object(region_obj); if (!region_obj2) { return_ACPI_STATUS(AE_NOT_EXIST); } /* Ensure that there is a handler associated with this region */ handler_desc = region_obj->region.handler; if (!handler_desc) { ACPI_ERROR((AE_INFO, "No handler for Region [%4.4s] (%p) [%s]", acpi_ut_get_node_name(region_obj->region.node), region_obj, acpi_ut_get_region_name(region_obj->region. space_id))); return_ACPI_STATUS(AE_NOT_EXIST); } context = handler_desc->address_space.context; /* * It may be the case that the region has never been initialized. * Some types of regions require special init code */ if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) { /* This region has not been initialized yet, do it */ region_setup = handler_desc->address_space.setup; if (!region_setup) { /* No initialization routine, exit with error */ ACPI_ERROR((AE_INFO, "No init routine for region(%p) [%s]", region_obj, acpi_ut_get_region_name(region_obj->region. space_id))); return_ACPI_STATUS(AE_NOT_EXIST); } /* * We must exit the interpreter because the region setup will * potentially execute control methods (for example, the _REG method * for this region) */ acpi_ex_exit_interpreter(); status = region_setup(region_obj, ACPI_REGION_ACTIVATE, context, ®ion_context); /* Re-enter the interpreter */ acpi_ex_enter_interpreter(); /* Check for failure of the Region Setup */ if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "During region initialization: [%s]", acpi_ut_get_region_name(region_obj-> region. space_id))); return_ACPI_STATUS(status); } /* Region initialization may have been completed by region_setup */ if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) { region_obj->region.flags |= AOPOBJ_SETUP_COMPLETE; /* * Save the returned context for use in all accesses to * the handler for this particular region */ if (!(region_obj2->extra.region_context)) { region_obj2->extra.region_context = region_context; } } } /* We have everything we need, we can invoke the address space handler */ handler = handler_desc->address_space.handler; address = (region_obj->region.address + region_offset); /* * Special handling for generic_serial_bus and general_purpose_io: * There are three extra parameters that must be passed to the * handler via the context: * 1) Connection buffer, a resource template from Connection() op * 2) Length of the above buffer * 3) Actual access length from the access_as() op * * In addition, for general_purpose_io, the Address and bit_width fields * are defined as follows: * 1) Address is the pin number index of the field (bit offset from * the previous Connection) * 2) bit_width is the actual bit length of the field (number of pins) */ if ((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) && context && field_obj) { /* Get the Connection (resource_template) buffer */ context->connection = field_obj->field.resource_buffer; context->length = field_obj->field.resource_length; context->access_length = field_obj->field.access_length; } if ((region_obj->region.space_id == ACPI_ADR_SPACE_GPIO) && context && field_obj) { /* Get the Connection (resource_template) buffer */ context->connection = field_obj->field.resource_buffer; context->length = field_obj->field.resource_length; context->access_length = field_obj->field.access_length; address = field_obj->field.pin_number_index; bit_width = field_obj->field.bit_length; } ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, "Handler %p (@%p) Address %8.8X%8.8X [%s]\n", ®ion_obj->region.handler->address_space, handler, ACPI_FORMAT_NATIVE_UINT(address), acpi_ut_get_region_name(region_obj->region. space_id))); if (!(handler_desc->address_space.handler_flags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) { /* * For handlers other than the default (supplied) handlers, we must * exit the interpreter because the handler *might* block -- we don't * know what it will do, so we can't hold the lock on the intepreter. */ acpi_ex_exit_interpreter(); } /* Call the handler */ status = handler(function, address, bit_width, value, context, region_obj2->extra.region_context); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Returned by Handler for [%s]", acpi_ut_get_region_name(region_obj->region. space_id))); } if (!(handler_desc->address_space.handler_flags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) { /* * We just returned from a non-default handler, we must re-enter the * interpreter */ acpi_ex_enter_interpreter(); } return_ACPI_STATUS(status); }
static acpi_status acpi_ns_init_one_object(acpi_handle obj_handle, u32 level, void *context, void **return_value) { acpi_object_type type; acpi_status status = AE_OK; struct acpi_init_walk_info *info = (struct acpi_init_walk_info *)context; struct acpi_namespace_node *node = (struct acpi_namespace_node *)obj_handle; union acpi_operand_object *obj_desc; ACPI_FUNCTION_NAME(ns_init_one_object); info->object_count++; /* And even then, we are only interested in a few object types */ type = acpi_ns_get_type(obj_handle); obj_desc = acpi_ns_get_attached_object(node); if (!obj_desc) { return (AE_OK); } /* Increment counters for object types we are looking for */ switch (type) { case ACPI_TYPE_REGION: info->op_region_count++; break; case ACPI_TYPE_BUFFER_FIELD: info->field_count++; break; case ACPI_TYPE_LOCAL_BANK_FIELD: info->field_count++; break; case ACPI_TYPE_BUFFER: info->buffer_count++; break; case ACPI_TYPE_PACKAGE: info->package_count++; break; default: /* No init required, just exit now */ return (AE_OK); } /* If the object is already initialized, nothing else to do */ if (obj_desc->common.flags & AOPOBJ_DATA_VALID) { return (AE_OK); } /* Must lock the interpreter before executing AML code */ acpi_ex_enter_interpreter(); /* * Each of these types can contain executable AML code within the * declaration. */ switch (type) { case ACPI_TYPE_REGION: info->op_region_init++; status = acpi_ds_get_region_arguments(obj_desc); break; case ACPI_TYPE_BUFFER_FIELD: info->field_init++; status = acpi_ds_get_buffer_field_arguments(obj_desc); break; case ACPI_TYPE_LOCAL_BANK_FIELD: info->field_init++; status = acpi_ds_get_bank_field_arguments(obj_desc); break; case ACPI_TYPE_BUFFER: info->buffer_init++; status = acpi_ds_get_buffer_arguments(obj_desc); break; case ACPI_TYPE_PACKAGE: info->package_init++; status = acpi_ds_get_package_arguments(obj_desc); break; default: /* No other types can get here */ break; } if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Could not execute arguments for [%4.4s] (%s)", acpi_ut_get_node_name(node), acpi_ut_get_type_name(type))); } /* * We ignore errors from above, and always return OK, since we don't want * to abort the walk on any single error. */ acpi_ex_exit_interpreter(); return (AE_OK); }
acpi_status acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node) { acpi_status status; ACPI_FUNCTION_TRACE(ns_load_table); acpi_ex_enter_interpreter(); /* * Parse the table and load the namespace with all named * objects found within. Control methods are NOT parsed * at this time. In fact, the control methods cannot be * parsed until the entire namespace is loaded, because * if a control method makes a forward reference (call) * to another control method, we can't continue parsing * because we don't know how many arguments to parse next! */ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { goto unlock_interp; } /* If table already loaded into namespace, just return */ if (acpi_tb_is_table_loaded(table_index)) { status = AE_ALREADY_EXISTS; goto unlock; } ACPI_DEBUG_PRINT((ACPI_DB_INFO, "**** Loading table into namespace ****\n")); status = acpi_tb_allocate_owner_id(table_index); if (ACPI_FAILURE(status)) { goto unlock; } status = acpi_ns_parse_table(table_index, node); if (ACPI_SUCCESS(status)) { acpi_tb_set_table_loaded_flag(table_index, TRUE); } else { /* * On error, delete any namespace objects created by this table. * We cannot initialize these objects, so delete them. There are * a couple of expecially bad cases: * AE_ALREADY_EXISTS - namespace collision. * AE_NOT_FOUND - the target of a Scope operator does not * exist. This target of Scope must already exist in the * namespace, as per the ACPI specification. */ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); acpi_ns_delete_namespace_by_owner(acpi_gbl_root_table_list. tables[table_index].owner_id); acpi_tb_release_owner_id(table_index); return_ACPI_STATUS(status); } unlock: (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); unlock_interp: (void)acpi_ex_exit_interpreter(); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* * Now we can parse the control methods. We always parse * them here for a sanity check, and if configured for * just-in-time parsing, we delete the control method * parse trees. */ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "**** Begin Table Object Initialization\n")); status = acpi_ds_initialize_objects(table_index, node); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "**** Completed Table Object Initialization\n")); /* * Execute any module-level code that was detected during the table load * phase. Although illegal since ACPI 2.0, there are many machines that * contain this type of code. Each block of detected executable AML code * outside of any control method is wrapped with a temporary control * method object and placed on a global list. The methods on this list * are executed below. * * This case executes the module-level code for each table immediately * after the table has been loaded. This provides compatibility with * other ACPI implementations. Optionally, the execution can be deferred * until later, see acpi_initialize_objects. */ if (!acpi_gbl_group_module_level_code) { acpi_ns_exec_module_code_list(); } return_ACPI_STATUS(status); }
void acpi_ds_terminate_control_method(union acpi_operand_object *method_desc, struct acpi_walk_state *walk_state) { ACPI_FUNCTION_TRACE_PTR(ds_terminate_control_method, walk_state); /* method_desc is required, walk_state is optional */ if (!method_desc) { return_VOID; } if (walk_state) { /* Delete all arguments and locals */ acpi_ds_method_data_delete_all(walk_state); /* * Delete any namespace objects created anywhere within the * namespace by the execution of this method. Unless: * 1) This method is a module-level executable code method, in which * case we want make the objects permanent. * 2) There are other threads executing the method, in which case we * will wait until the last thread has completed. */ if (!(method_desc->method.info_flags & ACPI_METHOD_MODULE_LEVEL) && (method_desc->method.thread_count == 1)) { /* Delete any direct children of (created by) this method */ (void)acpi_ex_exit_interpreter(); acpi_ns_delete_namespace_subtree(walk_state-> method_node); (void)acpi_ex_enter_interpreter(); /* * Delete any objects that were created by this method * elsewhere in the namespace (if any were created). * Use of the ACPI_METHOD_MODIFIED_NAMESPACE optimizes the * deletion such that we don't have to perform an entire * namespace walk for every control method execution. */ if (method_desc->method. info_flags & ACPI_METHOD_MODIFIED_NAMESPACE) { (void)acpi_ex_exit_interpreter(); acpi_ns_delete_namespace_by_owner(method_desc-> method. owner_id); (void)acpi_ex_enter_interpreter(); method_desc->method.info_flags &= ~ACPI_METHOD_MODIFIED_NAMESPACE; } } /* * If method is serialized, release the mutex and restore the * current sync level for this thread */ if (method_desc->method.mutex) { /* Acquisition Depth handles recursive calls */ method_desc->method.mutex->mutex.acquisition_depth--; if (!method_desc->method.mutex->mutex.acquisition_depth) { walk_state->thread->current_sync_level = method_desc->method.mutex->mutex. original_sync_level; acpi_os_release_mutex(method_desc->method. mutex->mutex.os_mutex); method_desc->method.mutex->mutex.thread_id = 0; } } } /* Decrement the thread count on the method */ if (method_desc->method.thread_count) { method_desc->method.thread_count--; } else { ACPI_ERROR((AE_INFO, "Invalid zero thread count in method")); } /* Are there any other threads currently executing this method? */ if (method_desc->method.thread_count) { /* * Additional threads. Do not release the owner_id in this case, * we immediately reuse it for the next thread executing this method */ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "*** Completed execution of one thread, %u threads remaining\n", method_desc->method.thread_count)); } else { /* This is the only executing thread for this method */ /* * Support to dynamically change a method from not_serialized to * Serialized if it appears that the method is incorrectly written and * does not support multiple thread execution. The best example of this * is if such a method creates namespace objects and blocks. A second * thread will fail with an AE_ALREADY_EXISTS exception. * * This code is here because we must wait until the last thread exits * before marking the method as serialized. */ if (method_desc->method. info_flags & ACPI_METHOD_SERIALIZED_PENDING) { if (walk_state) { ACPI_INFO(("Marking method %4.4s as Serialized " "because of AE_ALREADY_EXISTS error", walk_state->method_node->name. ascii)); } /* * Method tried to create an object twice and was marked as * "pending serialized". The probable cause is that the method * cannot handle reentrancy. * * The method was created as not_serialized, but it tried to create * a named object and then blocked, causing the second thread * entrance to begin and then fail. Workaround this problem by * marking the method permanently as Serialized when the last * thread exits here. */ method_desc->method.info_flags &= ~ACPI_METHOD_SERIALIZED_PENDING; method_desc->method.info_flags |= (ACPI_METHOD_SERIALIZED | ACPI_METHOD_IGNORE_SYNC_LEVEL); method_desc->method.sync_level = 0; } /* No more threads, we can free the owner_id */ if (! (method_desc->method. info_flags & ACPI_METHOD_MODULE_LEVEL)) { acpi_ut_release_owner_id(&method_desc->method.owner_id); } } acpi_ex_stop_trace_method((struct acpi_namespace_node *)method_desc-> method.node, method_desc, walk_state); return_VOID; }
acpi_status acpi_ns_one_complete_parse(u32 pass_number, u32 table_index, struct acpi_namespace_node *start_node) { union acpi_parse_object *parse_root; acpi_status status; u32 aml_length; u8 *aml_start; struct acpi_walk_state *walk_state; struct acpi_table_header *table; acpi_owner_id owner_id; ACPI_FUNCTION_TRACE(ns_one_complete_parse); status = acpi_get_table_by_index(table_index, &table); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Table must consist of at least a complete header */ if (table->length < sizeof(struct acpi_table_header)) { return_ACPI_STATUS(AE_BAD_HEADER); } aml_start = (u8 *)table + sizeof(struct acpi_table_header); aml_length = table->length - sizeof(struct acpi_table_header); status = acpi_tb_get_owner_id(table_index, &owner_id); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Create and init a Root Node */ parse_root = acpi_ps_create_scope_op(aml_start); if (!parse_root) { return_ACPI_STATUS(AE_NO_MEMORY); } /* Create and initialize a new walk state */ walk_state = acpi_ds_create_walk_state(owner_id, NULL, NULL, NULL); if (!walk_state) { acpi_ps_free_op(parse_root); return_ACPI_STATUS(AE_NO_MEMORY); } status = acpi_ds_init_aml_walk(walk_state, parse_root, NULL, aml_start, aml_length, NULL, (u8)pass_number); if (ACPI_FAILURE(status)) { acpi_ds_delete_walk_state(walk_state); goto cleanup; } /* Found OSDT table, enable the namespace override feature */ if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_OSDT) && pass_number == ACPI_IMODE_LOAD_PASS1) { walk_state->namespace_override = TRUE; } /* start_node is the default location to load the table */ if (start_node && start_node != acpi_gbl_root_node) { status = acpi_ds_scope_stack_push(start_node, ACPI_TYPE_METHOD, walk_state); if (ACPI_FAILURE(status)) { acpi_ds_delete_walk_state(walk_state); goto cleanup; } } /* Parse the AML */ ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "*PARSE* pass %u parse\n", pass_number)); acpi_ex_enter_interpreter(); status = acpi_ps_parse_aml(walk_state); acpi_ex_exit_interpreter(); cleanup: acpi_ps_delete_parse_tree(parse_root); return_ACPI_STATUS(status); }
acpi_status acpi_ns_get_object_value ( acpi_namespace_node *node, acpi_operand_object **return_obj_desc) { acpi_status status = AE_OK; acpi_operand_object *obj_desc; acpi_operand_object *source_desc; FUNCTION_TRACE ("Ns_get_object_value"); /* * We take the value from certain objects directly */ if ((node->type == ACPI_TYPE_PROCESSOR) || (node->type == ACPI_TYPE_POWER)) { /* * Create a Reference object to contain the object */ obj_desc = acpi_ut_create_internal_object (node->type); if (!obj_desc) { status = AE_NO_MEMORY; goto unlock_and_exit; } /* * Get the attached object */ source_desc = acpi_ns_get_attached_object (node); if (!source_desc) { status = AE_NULL_OBJECT; goto unlock_and_exit; } /* * Just copy from the original to the return object * * TBD: [Future] - need a low-level object copy that handles * the reference count automatically. (Don't want to copy it) */ MEMCPY (obj_desc, source_desc, sizeof (acpi_operand_object)); obj_desc->common.reference_count = 1; acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); } /* * Other objects require a reference object wrapper which we * then attempt to resolve. */ else { /* Create an Reference object to contain the object */ obj_desc = acpi_ut_create_internal_object (INTERNAL_TYPE_REFERENCE); if (!obj_desc) { status = AE_NO_MEMORY; goto unlock_and_exit; } /* Construct a descriptor pointing to the name */ obj_desc->reference.opcode = (u8) AML_NAME_OP; obj_desc->reference.object = (void *) node; /* * Use Resolve_to_value() to get the associated value. This call * always deletes Obj_desc (allocated above). * * NOTE: we can get away with passing in NULL for a walk state * because Obj_desc is guaranteed to not be a reference to either * a method local or a method argument * * Even though we do not directly invoke the interpreter * for this, we must enter it because we could access an opregion. * The opregion access code assumes that the interpreter * is locked. * * We must release the namespace lock before entering the * intepreter. */ acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); status = acpi_ex_enter_interpreter (); if (ACPI_SUCCESS (status)) { status = acpi_ex_resolve_to_value (&obj_desc, NULL); acpi_ex_exit_interpreter (); } } /* * If Acpi_ex_resolve_to_value() succeeded, the return value was * placed in Obj_desc. */ if (ACPI_SUCCESS (status)) { status = AE_CTRL_RETURN_VALUE; *return_obj_desc = obj_desc; ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Returning obj %p\n", *return_obj_desc)); } /* Namespace is unlocked */ return_ACPI_STATUS (status); unlock_and_exit: /* Unlock the namespace */ acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); return_ACPI_STATUS (status); }
acpi_status acpi_ns_init_one_object ( acpi_handle obj_handle, u32 level, void *context, void **return_value) { acpi_object_type8 type; acpi_status status; acpi_init_walk_info *info = (acpi_init_walk_info *) context; acpi_namespace_node *node = (acpi_namespace_node *) obj_handle; acpi_operand_object *obj_desc; PROC_NAME ("Ns_init_one_object"); info->object_count++; /* And even then, we are only interested in a few object types */ type = acpi_ns_get_type (obj_handle); obj_desc = node->object; if (!obj_desc) { return (AE_OK); } if ((type != ACPI_TYPE_REGION) && (type != ACPI_TYPE_BUFFER_FIELD)) { return (AE_OK); } /* * Must lock the interpreter before executing AML code */ status = acpi_ex_enter_interpreter (); if (ACPI_FAILURE (status)) { return (status); } switch (type) { case ACPI_TYPE_REGION: info->op_region_count++; if (obj_desc->common.flags & AOPOBJ_DATA_VALID) { break; } info->op_region_init++; status = acpi_ds_get_region_arguments (obj_desc); if (ACPI_FAILURE (status)) { ACPI_DEBUG_PRINT_RAW ((ACPI_DB_ERROR, "\n")); ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "%s while getting region arguments [%4.4s]\n", acpi_format_exception (status), (char*)&node->name)); } if (!(acpi_dbg_level & ACPI_LV_INIT)) { ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OK, ".")); } break; case ACPI_TYPE_BUFFER_FIELD: info->field_count++; if (obj_desc->common.flags & AOPOBJ_DATA_VALID) { break; } info->field_init++; status = acpi_ds_get_buffer_field_arguments (obj_desc); if (ACPI_FAILURE (status)) { ACPI_DEBUG_PRINT_RAW ((ACPI_DB_ERROR, "\n")); ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "%s while getting buffer field arguments [%4.4s]\n", acpi_format_exception (status), (char*)&node->name)); } if (!(acpi_dbg_level & ACPI_LV_INIT)) { ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OK, ".")); } break; default: break; } /* * We ignore errors from above, and always return OK, since * we don't want to abort the walk on a single error. */ acpi_ex_exit_interpreter (); return (AE_OK); }
static acpi_status acpi_ns_init_one_object(acpi_handle obj_handle, u32 level, void *context, void **return_value) { acpi_object_type type; acpi_status status = AE_OK; struct acpi_init_walk_info *info = (struct acpi_init_walk_info *)context; struct acpi_namespace_node *node = (struct acpi_namespace_node *)obj_handle; union acpi_operand_object *obj_desc; ACPI_FUNCTION_NAME(ns_init_one_object); info->object_count++; type = acpi_ns_get_type(obj_handle); obj_desc = acpi_ns_get_attached_object(node); if (!obj_desc) { return (AE_OK); } switch (type) { case ACPI_TYPE_REGION: info->op_region_count++; break; case ACPI_TYPE_BUFFER_FIELD: info->field_count++; break; case ACPI_TYPE_LOCAL_BANK_FIELD: info->field_count++; break; case ACPI_TYPE_BUFFER: info->buffer_count++; break; case ACPI_TYPE_PACKAGE: info->package_count++; break; default: return (AE_OK); } if (obj_desc->common.flags & AOPOBJ_DATA_VALID) { return (AE_OK); } acpi_ex_enter_interpreter(); switch (type) { case ACPI_TYPE_REGION: info->op_region_init++; status = acpi_ds_get_region_arguments(obj_desc); break; case ACPI_TYPE_BUFFER_FIELD: info->field_init++; status = acpi_ds_get_buffer_field_arguments(obj_desc); break; case ACPI_TYPE_LOCAL_BANK_FIELD: info->field_init++; status = acpi_ds_get_bank_field_arguments(obj_desc); break; case ACPI_TYPE_BUFFER: info->buffer_init++; status = acpi_ds_get_buffer_arguments(obj_desc); break; case ACPI_TYPE_PACKAGE: info->package_init++; status = acpi_ds_get_package_arguments(obj_desc); break; default: break; } if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Could not execute arguments for [%4.4s] (%s)", acpi_ut_get_node_name(node), acpi_ut_get_type_name(type))); } if (!(acpi_dbg_level & ACPI_LV_INIT_NAMES)) { ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, ".")); } acpi_ex_exit_interpreter(); return (AE_OK); }
acpi_status acpi_evaluate_object ( acpi_handle handle, acpi_string pathname, struct acpi_object_list *external_params, struct acpi_buffer *return_buffer) { acpi_status status; acpi_status status2; struct acpi_parameter_info info; acpi_size buffer_space_needed; u32 i; ACPI_FUNCTION_TRACE ("acpi_evaluate_object"); info.node = handle; info.parameters = NULL; info.return_object = NULL; info.parameter_type = ACPI_PARAM_ARGS; /* * If there are parameters to be passed to the object * (which must be a control method), the external objects * must be converted to internal objects */ if (external_params && external_params->count) { /* * Allocate a new parameter block for the internal objects * Add 1 to count to allow for null terminated internal list */ info.parameters = ACPI_MEM_CALLOCATE ( ((acpi_size) external_params->count + 1) * sizeof (void *)); if (!info.parameters) { return_ACPI_STATUS (AE_NO_MEMORY); } /* * Convert each external object in the list to an * internal object */ for (i = 0; i < external_params->count; i++) { status = acpi_ut_copy_eobject_to_iobject (&external_params->pointer[i], &info.parameters[i]); if (ACPI_FAILURE (status)) { acpi_ut_delete_internal_object_list (info.parameters); return_ACPI_STATUS (status); } } info.parameters[external_params->count] = NULL; } /* * Three major cases: * 1) Fully qualified pathname * 2) No handle, not fully qualified pathname (error) * 3) Valid handle */ if ((pathname) && (acpi_ns_valid_root_prefix (pathname[0]))) { /* * The path is fully qualified, just evaluate by name */ status = acpi_ns_evaluate_by_name (pathname, &info); } else if (!handle) { /* * A handle is optional iff a fully qualified pathname * is specified. Since we've already handled fully * qualified names above, this is an error */ if (!pathname) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Both Handle and Pathname are NULL\n")); } else { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Handle is NULL and Pathname is relative\n")); } status = AE_BAD_PARAMETER; } else { /* * We get here if we have a handle -- and if we have a * pathname it is relative. The handle will be validated * in the lower procedures */ if (!pathname) { /* * The null pathname case means the handle is for * the actual object to be evaluated */ status = acpi_ns_evaluate_by_handle (&info); } else { /* * Both a Handle and a relative Pathname */ status = acpi_ns_evaluate_relative (pathname, &info); } } /* * If we are expecting a return value, and all went well above, * copy the return value to an external object. */ if (return_buffer) { if (!info.return_object) { return_buffer->length = 0; } else { if (ACPI_GET_DESCRIPTOR_TYPE (info.return_object) == ACPI_DESC_TYPE_NAMED) { /* * If we received a NS Node as a return object, this means that * the object we are evaluating has nothing interesting to * return (such as a mutex, etc.) We return an error because * these types are essentially unsupported by this interface. * We don't check up front because this makes it easier to add * support for various types at a later date if necessary. */ status = AE_TYPE; info.return_object = NULL; /* No need to delete a NS Node */ return_buffer->length = 0; } if (ACPI_SUCCESS (status)) { /* * Find out how large a buffer is needed * to contain the returned object */ status = acpi_ut_get_object_size (info.return_object, &buffer_space_needed); if (ACPI_SUCCESS (status)) { /* Validate/Allocate/Clear caller buffer */ status = acpi_ut_initialize_buffer (return_buffer, buffer_space_needed); if (ACPI_FAILURE (status)) { /* * Caller's buffer is too small or a new one can't be allocated */ ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Needed buffer size %X, %s\n", (u32) buffer_space_needed, acpi_format_exception (status))); } else { /* * We have enough space for the object, build it */ status = acpi_ut_copy_iobject_to_eobject (info.return_object, return_buffer); } } } } } if (info.return_object) { /* * Delete the internal return object. NOTE: Interpreter * must be locked to avoid race condition. */ status2 = acpi_ex_enter_interpreter (); if (ACPI_SUCCESS (status2)) { /* * Delete the internal return object. (Or at least * decrement the reference count by one) */ acpi_ut_remove_reference (info.return_object); acpi_ex_exit_interpreter (); } } /* * Free the input parameter list (if we created one), */ if (info.parameters) { /* Free the allocated parameter block */ acpi_ut_delete_internal_object_list (info.parameters); } return_ACPI_STATUS (status); }
acpi_status acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, u32 function, acpi_physical_address address, u32 bit_width, acpi_integer * value) { acpi_status status; acpi_status status2; acpi_adr_space_handler handler; acpi_adr_space_setup region_setup; union acpi_operand_object *handler_desc; union acpi_operand_object *region_obj2; void *region_context = NULL; ACPI_FUNCTION_TRACE(ev_address_space_dispatch); region_obj2 = acpi_ns_get_secondary_object(region_obj); if (!region_obj2) { return_ACPI_STATUS(AE_NOT_EXIST); } /* Ensure that there is a handler associated with this region */ handler_desc = region_obj->region.handler; if (!handler_desc) { ACPI_ERROR((AE_INFO, "No handler for Region [%4.4s] (%p) [%s]", acpi_ut_get_node_name(region_obj->region.node), region_obj, acpi_ut_get_region_name(region_obj->region. space_id))); return_ACPI_STATUS(AE_NOT_EXIST); } /* * It may be the case that the region has never been initialized * Some types of regions require special init code */ if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) { /* * This region has not been initialized yet, do it */ region_setup = handler_desc->address_space.setup; if (!region_setup) { /* No initialization routine, exit with error */ ACPI_ERROR((AE_INFO, "No init routine for region(%p) [%s]", region_obj, acpi_ut_get_region_name(region_obj->region. space_id))); return_ACPI_STATUS(AE_NOT_EXIST); } /* * We must exit the interpreter because the region * setup will potentially execute control methods * (e.g., _REG method for this region) */ acpi_ex_exit_interpreter(); status = region_setup(region_obj, ACPI_REGION_ACTIVATE, handler_desc->address_space.context, ®ion_context); /* Re-enter the interpreter */ status2 = acpi_ex_enter_interpreter(); if (ACPI_FAILURE(status2)) { return_ACPI_STATUS(status2); } /* Check for failure of the Region Setup */ if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "During region initialization: [%s]", acpi_ut_get_region_name(region_obj-> region. space_id))); return_ACPI_STATUS(status); } /* * Region initialization may have been completed by region_setup */ if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) { region_obj->region.flags |= AOPOBJ_SETUP_COMPLETE; if (region_obj2->extra.region_context) { /* The handler for this region was already installed */ ACPI_FREE(region_context); } else { /* * Save the returned context for use in all accesses to * this particular region */ region_obj2->extra.region_context = region_context; } } } /* We have everything we need, we can invoke the address space handler */ handler = handler_desc->address_space.handler; ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, "Handler %p (@%p) Address %8.8X%8.8X [%s]\n", ®ion_obj->region.handler->address_space, handler, ACPI_FORMAT_UINT64(address), acpi_ut_get_region_name(region_obj->region. space_id))); if (!(handler_desc->address_space.handler_flags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) { /* * For handlers other than the default (supplied) handlers, we must * exit the interpreter because the handler *might* block -- we don't * know what it will do, so we can't hold the lock on the intepreter. */ acpi_ex_exit_interpreter(); } /* Call the handler */ status = handler(function, address, bit_width, value, handler_desc->address_space.context, region_obj2->extra.region_context); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Returned by Handler for [%s]", acpi_ut_get_region_name(region_obj->region. space_id))); } if (!(handler_desc->address_space.handler_flags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) { /* * We just returned from a non-default handler, we must re-enter the * interpreter */ status2 = acpi_ex_enter_interpreter(); if (ACPI_FAILURE(status2)) { return_ACPI_STATUS(status2); } } return_ACPI_STATUS(status); }
acpi_status acpi_ps_execute_table(struct acpi_evaluate_info *info) { acpi_status status; union acpi_parse_object *op = NULL; struct acpi_walk_state *walk_state = NULL; ACPI_FUNCTION_TRACE(ps_execute_table); /* Create and init a Root Node */ op = acpi_ps_create_scope_op(info->obj_desc->method.aml_start); if (!op) { status = AE_NO_MEMORY; goto cleanup; } /* Create and initialize a new walk state */ walk_state = acpi_ds_create_walk_state(info->obj_desc->method.owner_id, NULL, NULL, NULL); if (!walk_state) { status = AE_NO_MEMORY; goto cleanup; } status = acpi_ds_init_aml_walk(walk_state, op, info->node, info->obj_desc->method.aml_start, info->obj_desc->method.aml_length, info, info->pass_number); if (ACPI_FAILURE(status)) { goto cleanup; } if (info->obj_desc->method.info_flags & ACPI_METHOD_MODULE_LEVEL) { walk_state->parse_flags |= ACPI_PARSE_MODULE_LEVEL; } /* Info->Node is the default location to load the table */ if (info->node && info->node != acpi_gbl_root_node) { status = acpi_ds_scope_stack_push(info->node, ACPI_TYPE_METHOD, walk_state); if (ACPI_FAILURE(status)) { goto cleanup; } } /* * Parse the AML, walk_state will be deleted by parse_aml */ acpi_ex_enter_interpreter(); status = acpi_ps_parse_aml(walk_state); acpi_ex_exit_interpreter(); walk_state = NULL; cleanup: if (walk_state) { acpi_ds_delete_walk_state(walk_state); } if (op) { acpi_ps_delete_parse_tree(op); } return_ACPI_STATUS(status); }