acpi_status acpi_ns_init_one_object ( acpi_handle obj_handle, u32 level, void *context, void **return_value) { acpi_object_type8 type; acpi_status status; acpi_init_walk_info *info = (acpi_init_walk_info *) context; acpi_namespace_node *node = (acpi_namespace_node *) obj_handle; acpi_operand_object *obj_desc; PROC_NAME ("Ns_init_one_object"); info->object_count++; /* And even then, we are only interested in a few object types */ type = acpi_ns_get_type (obj_handle); obj_desc = node->object; if (!obj_desc) { return (AE_OK); } if ((type != ACPI_TYPE_REGION) && (type != ACPI_TYPE_BUFFER_FIELD)) { return (AE_OK); } /* * Must lock the interpreter before executing AML code */ status = acpi_ex_enter_interpreter (); if (ACPI_FAILURE (status)) { return (status); } switch (type) { case ACPI_TYPE_REGION: info->op_region_count++; if (obj_desc->common.flags & AOPOBJ_DATA_VALID) { break; } info->op_region_init++; status = acpi_ds_get_region_arguments (obj_desc); if (ACPI_FAILURE (status)) { ACPI_DEBUG_PRINT_RAW ((ACPI_DB_ERROR, "\n")); ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "%s while getting region arguments [%4.4s]\n", acpi_format_exception (status), (char*)&node->name)); } if (!(acpi_dbg_level & ACPI_LV_INIT)) { ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OK, ".")); } break; case ACPI_TYPE_BUFFER_FIELD: info->field_count++; if (obj_desc->common.flags & AOPOBJ_DATA_VALID) { break; } info->field_init++; status = acpi_ds_get_buffer_field_arguments (obj_desc); if (ACPI_FAILURE (status)) { ACPI_DEBUG_PRINT_RAW ((ACPI_DB_ERROR, "\n")); ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "%s while getting buffer field arguments [%4.4s]\n", acpi_format_exception (status), (char*)&node->name)); } if (!(acpi_dbg_level & ACPI_LV_INIT)) { ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OK, ".")); } break; default: break; } /* * We ignore errors from above, and always return OK, since * we don't want to abort the walk on a single error. */ acpi_ex_exit_interpreter (); return (AE_OK); }
static ACPI_STATUS OptSearchToRoot ( ACPI_PARSE_OBJECT *Op, ACPI_WALK_STATE *WalkState, ACPI_NAMESPACE_NODE *CurrentNode, ACPI_NAMESPACE_NODE *TargetNode, ACPI_BUFFER *TargetPath, char **NewPath) { ACPI_NAMESPACE_NODE *Node; ACPI_GENERIC_STATE ScopeInfo; ACPI_STATUS Status; char *Path; ACPI_FUNCTION_NAME (OptSearchToRoot); /* * Check if search-to-root can be utilized. Use the last NameSeg of * the NamePath and 1) See if can be found and 2) If found, make * sure that it is the same node that we want. If there is another * name in the search path before the one we want, the nodes will * not match, and we cannot use this optimization. */ Path = &(((char *) TargetPath->Pointer)[ TargetPath->Length - ACPI_NAME_SIZE]), ScopeInfo.Scope.Node = CurrentNode; /* Lookup the NameSeg using SEARCH_PARENT (search-to-root) */ Status = AcpiNsLookup (&ScopeInfo, Path, ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT | ACPI_NS_DONT_OPEN_SCOPE, WalkState, &(Node)); if (ACPI_FAILURE (Status)) { return (Status); } /* * We found the name, but we must check to make sure that the node * matches. Otherwise, there is another identical name in the search * path that precludes the use of this optimization. */ if (Node != TargetNode) { /* * This means that another object with the same name was found first, * and we cannot use this optimization. */ return (AE_NOT_FOUND); } /* Found the node, we can use this optimization */ ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OPTIMIZATIONS, "NAMESEG: %-24s", Path)); /* We must allocate a new string for the name (TargetPath gets deleted) */ *NewPath = UtStringCacheCalloc (ACPI_NAME_SIZE + 1); strcpy (*NewPath, Path); if (strncmp (*NewPath, "_T_", 3)) { AslError (ASL_OPTIMIZATION, ASL_MSG_SINGLE_NAME_OPTIMIZATION, Op, *NewPath); } return (AE_OK); }
static ACPI_STATUS OptOptimizeNameDeclaration ( ACPI_PARSE_OBJECT *Op, ACPI_WALK_STATE *WalkState, ACPI_NAMESPACE_NODE *CurrentNode, ACPI_NAMESPACE_NODE *TargetNode, char *AmlNameString, char **NewPath) { ACPI_STATUS Status; char *NewPathExternal; ACPI_NAMESPACE_NODE *Node; ACPI_FUNCTION_TRACE (OptOptimizeNameDeclaration); if (((CurrentNode == AcpiGbl_RootNode) || (Op->Common.Parent->Asl.ParseOpcode == PARSEOP_DEFINITION_BLOCK)) && (ACPI_IS_ROOT_PREFIX (AmlNameString[0]))) { /* * The current scope is the root, and the namepath has a root prefix * that is therefore extraneous. Remove it. */ *NewPath = &AmlNameString[1]; /* Debug output */ Status = AcpiNsExternalizeName (ACPI_UINT32_MAX, *NewPath, NULL, &NewPathExternal); if (ACPI_FAILURE (Status)) { AslCoreSubsystemError (Op, Status, "Externalizing NamePath", ASL_NO_ABORT); return (Status); } /* * Check to make sure that the optimization finds the node we are * looking for. This is simply a sanity check on the new * path that has been created. * * We know that we are at the root, so NULL is used for the scope. */ Status = AcpiNsLookup (NULL, *NewPath, ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE, ACPI_NS_DONT_OPEN_SCOPE, WalkState, &(Node)); if (ACPI_SUCCESS (Status)) { /* Found the namepath, but make sure the node is correct */ if (Node == TargetNode) { /* The lookup matched the node, accept this optimization */ AslError (ASL_OPTIMIZATION, ASL_MSG_NAME_OPTIMIZATION, Op, NewPathExternal); ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OPTIMIZATIONS, "AT ROOT: %-24s", NewPathExternal)); } else { /* Node is not correct, do not use this optimization */ Status = AE_NOT_FOUND; ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OPTIMIZATIONS, " ***** WRONG NODE")); AslError (ASL_WARNING, ASL_MSG_COMPILER_INTERNAL, Op, "Not using optimized name - found wrong node"); } } else { /* The lookup failed, we obviously cannot use this optimization */ ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OPTIMIZATIONS, " ***** NOT FOUND")); AslError (ASL_WARNING, ASL_MSG_COMPILER_INTERNAL, Op, "Not using optimized name - did not find node"); } ACPI_FREE (NewPathExternal); return (Status); } /* Could not optimize */ return (AE_NOT_FOUND); }
ACPI_STATUS AcpiUtOsiImplementation ( ACPI_WALK_STATE *WalkState) { ACPI_OPERAND_OBJECT *StringDesc; ACPI_OPERAND_OBJECT *ReturnDesc; ACPI_INTERFACE_INFO *InterfaceInfo; ACPI_INTERFACE_HANDLER InterfaceHandler; ACPI_STATUS Status; UINT32 ReturnValue; ACPI_FUNCTION_TRACE (UtOsiImplementation); /* Validate the string input argument (from the AML caller) */ StringDesc = WalkState->Arguments[0].Object; if (!StringDesc || (StringDesc->Common.Type != ACPI_TYPE_STRING)) { return_ACPI_STATUS (AE_TYPE); } /* Create a return object */ ReturnDesc = AcpiUtCreateInternalObject (ACPI_TYPE_INTEGER); if (!ReturnDesc) { return_ACPI_STATUS (AE_NO_MEMORY); } /* Default return value is 0, NOT SUPPORTED */ ReturnValue = 0; Status = AcpiOsAcquireMutex (AcpiGbl_OsiMutex, ACPI_WAIT_FOREVER); if (ACPI_FAILURE (Status)) { AcpiUtRemoveReference (ReturnDesc); return_ACPI_STATUS (Status); } /* Lookup the interface in the global _OSI list */ InterfaceInfo = AcpiUtGetInterface (StringDesc->String.Pointer); if (InterfaceInfo && !(InterfaceInfo->Flags & ACPI_OSI_INVALID)) { /* * The interface is supported. * Update the OsiData if necessary. We keep track of the latest * version of Windows that has been requested by the BIOS. */ if (InterfaceInfo->Value > AcpiGbl_OsiData) { AcpiGbl_OsiData = InterfaceInfo->Value; } ReturnValue = ACPI_UINT32_MAX; } AcpiOsReleaseMutex (AcpiGbl_OsiMutex); /* * Invoke an optional _OSI interface handler. The host OS may wish * to do some interface-specific handling. For example, warn about * certain interfaces or override the true/false support value. */ InterfaceHandler = AcpiGbl_InterfaceHandler; if (InterfaceHandler) { ReturnValue = InterfaceHandler ( StringDesc->String.Pointer, ReturnValue); } ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "ACPI: BIOS _OSI(\"%s\") is %ssupported\n", StringDesc->String.Pointer, ReturnValue == 0 ? "not " : "")); /* Complete the return object */ ReturnDesc->Integer.Value = ReturnValue; WalkState->ReturnDesc = ReturnDesc; return_ACPI_STATUS (AE_OK); }
ACPI_STATUS AcpiEvCreateGpeBlock ( ACPI_NAMESPACE_NODE *GpeDevice, ACPI_GENERIC_ADDRESS *GpeBlockAddress, UINT32 RegisterCount, UINT8 GpeBlockBaseNumber, UINT32 InterruptNumber, ACPI_GPE_BLOCK_INFO **ReturnGpeBlock) { ACPI_STATUS Status; ACPI_GPE_BLOCK_INFO *GpeBlock; ACPI_GPE_WALK_INFO WalkInfo; ACPI_FUNCTION_TRACE (EvCreateGpeBlock); if (!RegisterCount) { return_ACPI_STATUS (AE_OK); } /* Allocate a new GPE block */ GpeBlock = ACPI_ALLOCATE_ZEROED (sizeof (ACPI_GPE_BLOCK_INFO)); if (!GpeBlock) { return_ACPI_STATUS (AE_NO_MEMORY); } /* Initialize the new GPE block */ GpeBlock->Node = GpeDevice; GpeBlock->GpeCount = (UINT16) (RegisterCount * ACPI_GPE_REGISTER_WIDTH); GpeBlock->Initialized = FALSE; GpeBlock->RegisterCount = RegisterCount; GpeBlock->BlockBaseNumber = GpeBlockBaseNumber; ACPI_MEMCPY (&GpeBlock->BlockAddress, GpeBlockAddress, sizeof (ACPI_GENERIC_ADDRESS)); /* * Create the RegisterInfo and EventInfo sub-structures * Note: disables and clears all GPEs in the block */ Status = AcpiEvCreateGpeInfoBlocks (GpeBlock); if (ACPI_FAILURE (Status)) { ACPI_FREE (GpeBlock); return_ACPI_STATUS (Status); } /* Install the new block in the global lists */ Status = AcpiEvInstallGpeBlock (GpeBlock, InterruptNumber); if (ACPI_FAILURE (Status)) { ACPI_FREE (GpeBlock->RegisterInfo); ACPI_FREE (GpeBlock->EventInfo); ACPI_FREE (GpeBlock); return_ACPI_STATUS (Status); } AcpiGbl_AllGpesInitialized = FALSE; /* Find all GPE methods (_Lxx or_Exx) for this block */ WalkInfo.GpeBlock = GpeBlock; WalkInfo.GpeDevice = GpeDevice; WalkInfo.ExecuteByOwnerId = FALSE; Status = AcpiNsWalkNamespace (ACPI_TYPE_METHOD, GpeDevice, ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK, AcpiEvMatchGpeMethod, NULL, &WalkInfo, NULL); /* Return the new block */ if (ReturnGpeBlock) { (*ReturnGpeBlock) = GpeBlock; } ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INIT, " Initialized GPE %02X to %02X [%4.4s] %u regs on interrupt 0x%X\n", (UINT32) GpeBlock->BlockBaseNumber, (UINT32) (GpeBlock->BlockBaseNumber + (GpeBlock->GpeCount - 1)), GpeDevice->Name.Ascii, GpeBlock->RegisterCount, InterruptNumber)); /* Update global count of currently available GPEs */ AcpiCurrentGpeCount += GpeBlock->GpeCount; return_ACPI_STATUS (AE_OK); }
/******************************************************************************* * * FUNCTION: acpi_ev_gpe_initialize * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Initialize the GPE data structures and the FADT GPE 0/1 blocks * ******************************************************************************/ acpi_status acpi_ev_gpe_initialize(void) { u32 register_count0 = 0; u32 register_count1 = 0; u32 gpe_number_max = 0; acpi_status status; ACPI_FUNCTION_TRACE(ev_gpe_initialize); ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "Initializing General Purpose Events (GPEs):\n")); status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* * Initialize the GPE Block(s) defined in the FADT * * Why the GPE register block lengths are divided by 2: From the ACPI * Spec, section "General-Purpose Event Registers", we have: * * "Each register block contains two registers of equal length * GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the * GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN * The length of the GPE1_STS and GPE1_EN registers is equal to * half the GPE1_LEN. If a generic register block is not supported * then its respective block pointer and block length values in the * FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need * to be the same size." */ /* * Determine the maximum GPE number for this machine. * * Note: both GPE0 and GPE1 are optional, and either can exist without * the other. * * If EITHER the register length OR the block address are zero, then that * particular block is not supported. */ if (acpi_gbl_FADT.gpe0_block_length && acpi_gbl_FADT.xgpe0_block.address) { /* GPE block 0 exists (has both length and address > 0) */ register_count0 = (u16)(acpi_gbl_FADT.gpe0_block_length / 2); gpe_number_max = (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1; /* Install GPE Block 0 */ status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device, acpi_gbl_FADT.xgpe0_block. address, acpi_gbl_FADT.xgpe0_block. space_id, register_count0, 0, acpi_gbl_FADT.sci_interrupt, &acpi_gbl_gpe_fadt_blocks[0]); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Could not create GPE Block 0")); } } if (acpi_gbl_FADT.gpe1_block_length && acpi_gbl_FADT.xgpe1_block.address) { /* GPE block 1 exists (has both length and address > 0) */ register_count1 = (u16)(acpi_gbl_FADT.gpe1_block_length / 2); /* Check for GPE0/GPE1 overlap (if both banks exist) */ if ((register_count0) && (gpe_number_max >= acpi_gbl_FADT.gpe1_base)) { ACPI_ERROR((AE_INFO, "GPE0 block (GPE 0 to %u) overlaps the GPE1 block " "(GPE %u to %u) - Ignoring GPE1", gpe_number_max, acpi_gbl_FADT.gpe1_base, acpi_gbl_FADT.gpe1_base + ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1))); /* Ignore GPE1 block by setting the register count to zero */ register_count1 = 0; } else { /* Install GPE Block 1 */ status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device, acpi_gbl_FADT.xgpe1_block. address, acpi_gbl_FADT.xgpe1_block. space_id, register_count1, acpi_gbl_FADT.gpe1_base, acpi_gbl_FADT. sci_interrupt, &acpi_gbl_gpe_fadt_blocks [1]); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Could not create GPE Block 1")); } /* * GPE0 and GPE1 do not have to be contiguous in the GPE number * space. However, GPE0 always starts at GPE number zero. */ gpe_number_max = acpi_gbl_FADT.gpe1_base + ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1); } } /* Exit if there are no GPE registers */ if ((register_count0 + register_count1) == 0) { /* GPEs are not required by ACPI, this is OK */ ACPI_DEBUG_PRINT((ACPI_DB_INIT, "There are no GPE blocks defined in the FADT\n")); status = AE_OK; goto cleanup; } cleanup: (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); return_ACPI_STATUS(AE_OK); }
ACPI_STATUS AcpiDsScopeStackPush ( ACPI_NAMESPACE_NODE *Node, ACPI_OBJECT_TYPE Type, ACPI_WALK_STATE *WalkState) { ACPI_GENERIC_STATE *ScopeInfo; ACPI_GENERIC_STATE *OldScopeInfo; ACPI_FUNCTION_TRACE (DsScopeStackPush); if (!Node) { /* Invalid scope */ ACPI_ERROR ((AE_INFO, "Null scope parameter")); return_ACPI_STATUS (AE_BAD_PARAMETER); } /* Make sure object type is valid */ if (!AcpiUtValidObjectType (Type)) { ACPI_WARNING ((AE_INFO, "Invalid object type: 0x%X", Type)); } /* Allocate a new scope object */ ScopeInfo = AcpiUtCreateGenericState (); if (!ScopeInfo) { return_ACPI_STATUS (AE_NO_MEMORY); } /* Init new scope object */ ScopeInfo->Common.DescriptorType = ACPI_DESC_TYPE_STATE_WSCOPE; ScopeInfo->Scope.Node = Node; ScopeInfo->Common.Value = (UINT16) Type; WalkState->ScopeDepth++; ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[%.2d] Pushed scope ", (UINT32) WalkState->ScopeDepth)); OldScopeInfo = WalkState->ScopeInfo; if (OldScopeInfo) { ACPI_DEBUG_PRINT_RAW ((ACPI_DB_EXEC, "[%4.4s] (%s)", AcpiUtGetNodeName (OldScopeInfo->Scope.Node), AcpiUtGetTypeName (OldScopeInfo->Common.Value))); } else { ACPI_DEBUG_PRINT_RAW ((ACPI_DB_EXEC, "[\\___] (%s)", "ROOT")); } ACPI_DEBUG_PRINT_RAW ((ACPI_DB_EXEC, ", New scope -> [%4.4s] (%s)\n", AcpiUtGetNodeName (ScopeInfo->Scope.Node), AcpiUtGetTypeName (ScopeInfo->Common.Value))); /* Push new scope object onto stack */ AcpiUtPushGenericState (&WalkState->ScopeInfo, ScopeInfo); return_ACPI_STATUS (AE_OK); }
acpi_status acpi_ns_initialize_devices(void) { acpi_status status; struct acpi_device_walk_info info; ACPI_FUNCTION_TRACE(ns_initialize_devices); /* Init counters */ info.device_count = 0; info.num_STA = 0; info.num_INI = 0; ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "Initializing Device/Processor/Thermal objects " "and executing _INI/_STA methods:\n")); /* Tree analysis: find all subtrees that contain _INI methods */ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, FALSE, acpi_ns_find_ini_methods, NULL, &info, NULL); if (ACPI_FAILURE(status)) { goto error_exit; } /* Allocate the evaluation information block */ info.evaluate_info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); if (!info.evaluate_info) { status = AE_NO_MEMORY; goto error_exit; } /* * Execute the "global" _INI method that may appear at the root. This * support is provided for Windows compatibility (Vista+) and is not * part of the ACPI specification. */ info.evaluate_info->prefix_node = acpi_gbl_root_node; info.evaluate_info->relative_pathname = METHOD_NAME__INI; info.evaluate_info->parameters = NULL; info.evaluate_info->flags = ACPI_IGNORE_RETURN_VALUE; status = acpi_ns_evaluate(info.evaluate_info); if (ACPI_SUCCESS(status)) { info.num_INI++; } /* Walk namespace to execute all _INIs on present devices */ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, FALSE, acpi_ns_init_one_device, NULL, &info, NULL); /* * Any _OSI requests should be completed by now. If the BIOS has * requested any Windows OSI strings, we will always truncate * I/O addresses to 16 bits -- for Windows compatibility. */ if (acpi_gbl_osi_data >= ACPI_OSI_WIN_2000) { acpi_gbl_truncate_io_addresses = TRUE; } ACPI_FREE(info.evaluate_info); if (ACPI_FAILURE(status)) { goto error_exit; } ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, " Executed %u _INI methods requiring %u _STA executions " "(examined %u objects)\n", info.num_INI, info.num_STA, info.device_count)); return_ACPI_STATUS(status); error_exit: ACPI_EXCEPTION((AE_INFO, status, "During device initialization")); return_ACPI_STATUS(status); }
ACPI_STATUS AcpiDsInitializeObjects ( UINT32 TableIndex, ACPI_NAMESPACE_NODE *StartNode) { ACPI_STATUS Status; ACPI_INIT_WALK_INFO Info; ACPI_TABLE_HEADER *Table; ACPI_OWNER_ID OwnerId; ACPI_FUNCTION_TRACE (DsInitializeObjects); Status = AcpiTbGetOwnerId (TableIndex, &OwnerId); if (ACPI_FAILURE (Status)) { return_ACPI_STATUS (Status); } ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, "**** Starting initialization of namespace objects ****\n")); /* Set all init info to zero */ memset (&Info, 0, sizeof (ACPI_INIT_WALK_INFO)); Info.OwnerId = OwnerId; Info.TableIndex = TableIndex; /* Walk entire namespace from the supplied root */ /* * We don't use AcpiWalkNamespace since we do not want to acquire * the namespace reader lock. */ Status = AcpiNsWalkNamespace (ACPI_TYPE_ANY, StartNode, ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK, AcpiDsInitOneObject, NULL, &Info, NULL); if (ACPI_FAILURE (Status)) { ACPI_EXCEPTION ((AE_INFO, Status, "During WalkNamespace")); } Status = AcpiGetTableByIndex (TableIndex, &Table); if (ACPI_FAILURE (Status)) { return_ACPI_STATUS (Status); } /* DSDT is always the first AML table */ if (ACPI_COMPARE_NAME (Table->Signature, ACPI_SIG_DSDT)) { ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INIT, "\nInitializing Namespace objects:\n")); } /* Summary of objects initialized */ ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INIT, "Table [%4.4s: %-8.8s] (id %.2X) - %4u Objects with %3u Devices, " "%3u Regions, %4u Methods (%u/%u/%u Serial/Non/Cvt)\n", Table->Signature, Table->OemTableId, OwnerId, Info.ObjectCount, Info.DeviceCount,Info.OpRegionCount, Info.MethodCount, Info.SerialMethodCount, Info.NonSerialMethodCount, Info.SerializedMethodCount)); ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, "%u Methods, %u Regions\n", Info.MethodCount, Info.OpRegionCount)); return_ACPI_STATUS (AE_OK); }
acpi_status acpi_ex_access_region ( union acpi_operand_object *obj_desc, u32 field_datum_byte_offset, acpi_integer *value, u32 function) { acpi_status status; union acpi_operand_object *rgn_desc; acpi_physical_address address; ACPI_FUNCTION_TRACE ("ex_access_region"); /* * Ensure that the region operands are fully evaluated and verify * the validity of the request */ status = acpi_ex_setup_region (obj_desc, field_datum_byte_offset); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } /* * The physical address of this field datum is: * * 1) The base of the region, plus * 2) The base offset of the field, plus * 3) The current offset into the field */ rgn_desc = obj_desc->common_field.region_obj; address = rgn_desc->region.address + obj_desc->common_field.base_byte_offset + field_datum_byte_offset; if ((function & ACPI_IO_MASK) == ACPI_READ) { ACPI_DEBUG_PRINT ((ACPI_DB_BFIELD, "[READ]")); } else { ACPI_DEBUG_PRINT ((ACPI_DB_BFIELD, "[WRITE]")); } ACPI_DEBUG_PRINT_RAW ((ACPI_DB_BFIELD, " Region [%s:%X], Width %X, byte_base %X, Offset %X at %8.8X%8.8X\n", acpi_ut_get_region_name (rgn_desc->region.space_id), rgn_desc->region.space_id, obj_desc->common_field.access_byte_width, obj_desc->common_field.base_byte_offset, field_datum_byte_offset, ACPI_HIDWORD (address), ACPI_LODWORD (address))); /* Invoke the appropriate address_space/op_region handler */ status = acpi_ev_address_space_dispatch (rgn_desc, function, address, ACPI_MUL_8 (obj_desc->common_field.access_byte_width), value); if (ACPI_FAILURE (status)) { if (status == AE_NOT_IMPLEMENTED) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Region %s(%X) not implemented\n", acpi_ut_get_region_name (rgn_desc->region.space_id), rgn_desc->region.space_id)); } else if (status == AE_NOT_EXIST) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Region %s(%X) has no handler\n", acpi_ut_get_region_name (rgn_desc->region.space_id), rgn_desc->region.space_id)); } } return_ACPI_STATUS (status); }
acpi_status acpi_ds_call_control_method(struct acpi_thread_state *thread, struct acpi_walk_state *this_walk_state, union acpi_parse_object *op) { acpi_status status; struct acpi_namespace_node *method_node; struct acpi_walk_state *next_walk_state = NULL; union acpi_operand_object *obj_desc; struct acpi_evaluate_info *info; u32 i; ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state); ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Calling method %p, currentstate=%p\n", this_walk_state->prev_op, this_walk_state)); /* * Get the namespace entry for the control method we are about to call */ method_node = this_walk_state->method_call_node; if (!method_node) { return_ACPI_STATUS(AE_NULL_ENTRY); } obj_desc = acpi_ns_get_attached_object(method_node); if (!obj_desc) { return_ACPI_STATUS(AE_NULL_OBJECT); } /* Init for new method, possibly wait on method mutex */ status = acpi_ds_begin_method_execution(method_node, obj_desc, this_walk_state); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Begin method parse/execution. Create a new walk state */ next_walk_state = acpi_ds_create_walk_state(obj_desc->method.owner_id, NULL, obj_desc, thread); if (!next_walk_state) { status = AE_NO_MEMORY; goto cleanup; } /* * The resolved arguments were put on the previous walk state's operand * stack. Operands on the previous walk state stack always * start at index 0. Also, null terminate the list of arguments */ this_walk_state->operands[this_walk_state->num_operands] = NULL; /* * Allocate and initialize the evaluation information block * TBD: this is somewhat inefficient, should change interface to * ds_init_aml_walk. For now, keeps this struct off the CPU stack */ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); if (!info) { status = AE_NO_MEMORY; goto cleanup; } info->parameters = &this_walk_state->operands[0]; status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node, obj_desc->method.aml_start, obj_desc->method.aml_length, info, ACPI_IMODE_EXECUTE); ACPI_FREE(info); if (ACPI_FAILURE(status)) { goto cleanup; } next_walk_state->method_nesting_depth = this_walk_state->method_nesting_depth + 1; /* * Delete the operands on the previous walkstate operand stack * (they were copied to new objects) */ for (i = 0; i < obj_desc->method.param_count; i++) { acpi_ut_remove_reference(this_walk_state->operands[i]); this_walk_state->operands[i] = NULL; } /* Clear the operand stack */ this_walk_state->num_operands = 0; ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "**** Begin nested execution of [%4.4s] **** WalkState=%p\n", method_node->name.ascii, next_walk_state)); this_walk_state->method_pathname = acpi_ns_get_normalized_pathname(method_node, TRUE); this_walk_state->method_is_nested = TRUE; /* Optional object evaluation log */ ACPI_DEBUG_PRINT_RAW((ACPI_DB_EVALUATION, "%-26s: %*s%s\n", " Nested method call", next_walk_state->method_nesting_depth * 3, " ", &this_walk_state->method_pathname[1])); /* Invoke an internal method if necessary */ if (obj_desc->method.info_flags & ACPI_METHOD_INTERNAL_ONLY) { status = obj_desc->method.dispatch.implementation(next_walk_state); if (status == AE_OK) { status = AE_CTRL_TERMINATE; } } return_ACPI_STATUS(status); cleanup: /* On error, we must terminate the method properly */ acpi_ds_terminate_control_method(obj_desc, next_walk_state); acpi_ds_delete_walk_state(next_walk_state); return_ACPI_STATUS(status); }
void AcpiExDoDebugObject ( ACPI_OPERAND_OBJECT *SourceDesc, UINT32 Level, UINT32 Index) { UINT32 i; UINT32 Timer; ACPI_OPERAND_OBJECT *ObjectDesc; UINT32 Value; ACPI_FUNCTION_TRACE_PTR (ExDoDebugObject, SourceDesc); /* Output must be enabled via the DebugObject global or the DbgLevel */ if (!AcpiGbl_EnableAmlDebugObject && !(AcpiDbgLevel & ACPI_LV_DEBUG_OBJECT)) { return_VOID; } /* * We will emit the current timer value (in microseconds) with each * debug output. Only need the lower 26 bits. This allows for 67 * million microseconds or 67 seconds before rollover. */ Timer = ((UINT32) AcpiOsGetTimer () / 10); /* (100 nanoseconds to microseconds) */ Timer &= 0x03FFFFFF; /* * Print line header as long as we are not in the middle of an * object display */ if (!((Level > 0) && Index == 0)) { AcpiOsPrintf ("[ACPI Debug %.8u] %*s", Timer, Level, " "); } /* Display the index for package output only */ if (Index > 0) { AcpiOsPrintf ("(%.2u) ", Index-1); } if (!SourceDesc) { AcpiOsPrintf ("[Null Object]\n"); return_VOID; } if (ACPI_GET_DESCRIPTOR_TYPE (SourceDesc) == ACPI_DESC_TYPE_OPERAND) { AcpiOsPrintf ("%s ", AcpiUtGetObjectTypeName (SourceDesc)); if (!AcpiUtValidInternalObject (SourceDesc)) { AcpiOsPrintf ("%p, Invalid Internal Object!\n", SourceDesc); return_VOID; } } else if (ACPI_GET_DESCRIPTOR_TYPE (SourceDesc) == ACPI_DESC_TYPE_NAMED) { AcpiOsPrintf ("%s: %p\n", AcpiUtGetTypeName (((ACPI_NAMESPACE_NODE *) SourceDesc)->Type), SourceDesc); return_VOID; } else { return_VOID; } /* SourceDesc is of type ACPI_DESC_TYPE_OPERAND */ switch (SourceDesc->Common.Type) { case ACPI_TYPE_INTEGER: /* Output correct integer width */ if (AcpiGbl_IntegerByteWidth == 4) { AcpiOsPrintf ("0x%8.8X\n", (UINT32) SourceDesc->Integer.Value); } else { AcpiOsPrintf ("0x%8.8X%8.8X\n", ACPI_FORMAT_UINT64 (SourceDesc->Integer.Value)); } break; case ACPI_TYPE_BUFFER: AcpiOsPrintf ("[0x%.2X]\n", (UINT32) SourceDesc->Buffer.Length); AcpiUtDumpBuffer (SourceDesc->Buffer.Pointer, (SourceDesc->Buffer.Length < 256) ? SourceDesc->Buffer.Length : 256, DB_BYTE_DISPLAY, 0); break; case ACPI_TYPE_STRING: AcpiOsPrintf ("[0x%.2X] \"%s\"\n", SourceDesc->String.Length, SourceDesc->String.Pointer); break; case ACPI_TYPE_PACKAGE: AcpiOsPrintf ("[Contains 0x%.2X Elements]\n", SourceDesc->Package.Count); /* Output the entire contents of the package */ for (i = 0; i < SourceDesc->Package.Count; i++) { AcpiExDoDebugObject (SourceDesc->Package.Elements[i], Level+4, i+1); } break; case ACPI_TYPE_LOCAL_REFERENCE: AcpiOsPrintf ("[%s] ", AcpiUtGetReferenceName (SourceDesc)); /* Decode the reference */ switch (SourceDesc->Reference.Class) { case ACPI_REFCLASS_INDEX: AcpiOsPrintf ("0x%X\n", SourceDesc->Reference.Value); break; case ACPI_REFCLASS_TABLE: /* Case for DdbHandle */ AcpiOsPrintf ("Table Index 0x%X\n", SourceDesc->Reference.Value); return_VOID; default: break; } AcpiOsPrintf (" "); /* Check for valid node first, then valid object */ if (SourceDesc->Reference.Node) { if (ACPI_GET_DESCRIPTOR_TYPE (SourceDesc->Reference.Node) != ACPI_DESC_TYPE_NAMED) { AcpiOsPrintf (" %p - Not a valid namespace node\n", SourceDesc->Reference.Node); } else { AcpiOsPrintf ("Node %p [%4.4s] ", SourceDesc->Reference.Node, (SourceDesc->Reference.Node)->Name.Ascii); switch ((SourceDesc->Reference.Node)->Type) { /* These types have no attached object */ case ACPI_TYPE_DEVICE: AcpiOsPrintf ("Device\n"); break; case ACPI_TYPE_THERMAL: AcpiOsPrintf ("Thermal Zone\n"); break; default: AcpiExDoDebugObject ((SourceDesc->Reference.Node)->Object, Level+4, 0); break; } } } else if (SourceDesc->Reference.Object) { if (ACPI_GET_DESCRIPTOR_TYPE (SourceDesc->Reference.Object) == ACPI_DESC_TYPE_NAMED) { AcpiExDoDebugObject (((ACPI_NAMESPACE_NODE *) SourceDesc->Reference.Object)->Object, Level+4, 0); } else { ObjectDesc = SourceDesc->Reference.Object; Value = SourceDesc->Reference.Value; switch (ObjectDesc->Common.Type) { case ACPI_TYPE_BUFFER: AcpiOsPrintf ("Buffer[%u] = 0x%2.2X\n", Value, *SourceDesc->Reference.IndexPointer); break; case ACPI_TYPE_STRING: AcpiOsPrintf ("String[%u] = \"%c\" (0x%2.2X)\n", Value, *SourceDesc->Reference.IndexPointer, *SourceDesc->Reference.IndexPointer); break; case ACPI_TYPE_PACKAGE: AcpiOsPrintf ("Package[%u] = ", Value); AcpiExDoDebugObject (*SourceDesc->Reference.Where, Level+4, 0); break; default: AcpiOsPrintf ("Unknown Reference object type %X\n", ObjectDesc->Common.Type); break; } } } break; default: AcpiOsPrintf ("%p\n", SourceDesc); break; } ACPI_DEBUG_PRINT_RAW ((ACPI_DB_EXEC, "\n")); return_VOID; }
acpi_status acpi_ds_initialize_objects(u32 table_index, struct acpi_namespace_node *start_node) { acpi_status status; struct acpi_init_walk_info info; struct acpi_table_header *table; acpi_owner_id owner_id; ACPI_FUNCTION_TRACE(ds_initialize_objects); status = acpi_tb_get_owner_id(table_index, &owner_id); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "**** Starting initialization of namespace objects ****\n")); /* Set all init info to zero */ memset(&info, 0, sizeof(struct acpi_init_walk_info)); info.owner_id = owner_id; info.table_index = table_index; /* Walk entire namespace from the supplied root */ /* * We don't use acpi_walk_namespace since we do not want to acquire * the namespace reader lock. */ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK, acpi_ds_init_one_object, NULL, &info, NULL); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace")); } status = acpi_get_table_by_index(table_index, &table); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* DSDT is always the first AML table */ if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT)) { ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "\nInitializing Namespace objects:\n")); } /* Summary of objects initialized */ ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "Table [%4.4s: %-8.8s] (id %.2X) - %4u Objects with %3u Devices, " "%3u Regions, %4u Methods (%u/%u/%u Serial/Non/Cvt)\n", table->signature, table->oem_table_id, owner_id, info.object_count, info.device_count, info.op_region_count, info.method_count, info.serial_method_count, info.non_serial_method_count, info.serialized_method_count)); ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "%u Methods, %u Regions\n", info.method_count, info.op_region_count)); return_ACPI_STATUS(AE_OK); }
acpi_status acpi_ns_init_one_device ( acpi_handle obj_handle, u32 nesting_level, void *context, void **return_value) { acpi_status status; acpi_namespace_node *node; u32 flags; acpi_device_walk_info *info = (acpi_device_walk_info *) context; FUNCTION_TRACE ("Ns_init_one_device"); if (!(acpi_dbg_level & ACPI_LV_INIT)) { ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OK, ".")); } info->device_count++; acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); node = acpi_ns_map_handle_to_node (obj_handle); if (!node) { acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); return (AE_BAD_PARAMETER); } acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); /* * Run _STA to determine if we can run _INI on the device. */ DEBUG_EXEC (acpi_ut_display_init_pathname (node, "_STA [Method]")); status = acpi_ut_execute_STA (node, &flags); if (ACPI_FAILURE (status)) { /* Ignore error and move on to next device */ return_ACPI_STATUS (AE_OK); } info->num_STA++; if (!(flags & 0x01)) { /* don't look at children of a not present device */ return_ACPI_STATUS(AE_CTRL_DEPTH); } /* * The device is present. Run _INI. */ DEBUG_EXEC (acpi_ut_display_init_pathname (obj_handle, "_INI [Method]")); status = acpi_ns_evaluate_relative (obj_handle, "_INI", NULL, NULL); if (AE_NOT_FOUND == status) { /* No _INI means device requires no initialization */ status = AE_OK; } else if (ACPI_FAILURE (status)) { /* Ignore error and move on to next device */ #ifdef ACPI_DEBUG NATIVE_CHAR *scope_name = acpi_ns_get_table_pathname (obj_handle); ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "%s._INI failed: %s\n", scope_name, acpi_format_exception (status))); ACPI_MEM_FREE (scope_name); #endif } else { /* Count of successful INIs */ info->num_INI++; } return_ACPI_STATUS (AE_OK); }
static void acpi_ex_do_debug_object(union acpi_operand_object *source_desc, u32 level, u32 index) { u32 i; ACPI_FUNCTION_TRACE_PTR(ex_do_debug_object, source_desc); ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[ACPI Debug] %*s", level, " ")); /* Display index for package output only */ if (index > 0) { ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "(%.2u) ", index - 1)); } if (!source_desc) { ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "<Null Object>\n")); return_VOID; } if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_OPERAND) { ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%s: ", acpi_ut_get_object_type_name (source_desc))); if (!acpi_ut_valid_internal_object(source_desc)) { ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%p, Invalid Internal Object!\n", source_desc)); return_VOID; } } else if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_NAMED) { ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%s: %p\n", acpi_ut_get_type_name(((struct acpi_namespace_node *)source_desc)-> type), source_desc)); return_VOID; } else { return_VOID; } switch (ACPI_GET_OBJECT_TYPE(source_desc)) { case ACPI_TYPE_INTEGER: /* Output correct integer width */ if (acpi_gbl_integer_byte_width == 4) { ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "0x%8.8X\n", (u32) source_desc->integer. value)); } else { ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "0x%8.8X%8.8X\n", ACPI_FORMAT_UINT64(source_desc-> integer. value))); } break; case ACPI_TYPE_BUFFER: ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[0x%.2X]\n", (u32) source_desc->buffer.length)); ACPI_DUMP_BUFFER(source_desc->buffer.pointer, (source_desc->buffer.length < 32) ? source_desc->buffer.length : 32); break; case ACPI_TYPE_STRING: ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[0x%.2X] \"%s\"\n", source_desc->string.length, source_desc->string.pointer)); break; case ACPI_TYPE_PACKAGE: ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[0x%.2X Elements]\n", source_desc->package.count)); /* Output the entire contents of the package */ for (i = 0; i < source_desc->package.count; i++) { acpi_ex_do_debug_object(source_desc->package. elements[i], level + 4, i + 1); } break; case ACPI_TYPE_LOCAL_REFERENCE: if (source_desc->reference.opcode == AML_INDEX_OP) { ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[%s, 0x%X]\n", acpi_ps_get_opcode_name (source_desc->reference.opcode), source_desc->reference.offset)); } else { ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[%s]\n", acpi_ps_get_opcode_name (source_desc->reference.opcode))); } if (source_desc->reference.object) { if (ACPI_GET_DESCRIPTOR_TYPE (source_desc->reference.object) == ACPI_DESC_TYPE_NAMED) { acpi_ex_do_debug_object(((struct acpi_namespace_node *) source_desc->reference. object)->object, level + 4, 0); } else { acpi_ex_do_debug_object(source_desc->reference. object, level + 4, 0); } } else if (source_desc->reference.node) { acpi_ex_do_debug_object((source_desc->reference.node)-> object, level + 4, 0); } break; default: ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%p %s\n", source_desc, acpi_ut_get_object_type_name (source_desc))); break; } ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC, "\n")); return_VOID; }
ACPI_STATUS AcpiTbLoadNamespace ( void) { ACPI_STATUS Status; UINT32 i; ACPI_TABLE_HEADER *NewDsdt; ACPI_TABLE_DESC *Table; UINT32 TablesLoaded = 0; UINT32 TablesFailed = 0; ACPI_FUNCTION_TRACE (TbLoadNamespace); (void) AcpiUtAcquireMutex (ACPI_MTX_TABLES); /* * Load the namespace. The DSDT is required, but any SSDT and * PSDT tables are optional. Verify the DSDT. */ Table = &AcpiGbl_RootTableList.Tables[AcpiGbl_DsdtIndex]; if (!AcpiGbl_RootTableList.CurrentTableCount || !ACPI_COMPARE_NAME (Table->Signature.Ascii, ACPI_SIG_DSDT) || ACPI_FAILURE (AcpiTbValidateTable (Table))) { Status = AE_NO_ACPI_TABLES; goto UnlockAndExit; } /* * Save the DSDT pointer for simple access. This is the mapped memory * address. We must take care here because the address of the .Tables * array can change dynamically as tables are loaded at run-time. Note: * .Pointer field is not validated until after call to AcpiTbValidateTable. */ AcpiGbl_DSDT = Table->Pointer; /* * Optionally copy the entire DSDT to local memory (instead of simply * mapping it.) There are some BIOSs that corrupt or replace the original * DSDT, creating the need for this option. Default is FALSE, do not copy * the DSDT. */ if (AcpiGbl_CopyDsdtLocally) { NewDsdt = AcpiTbCopyDsdt (AcpiGbl_DsdtIndex); if (NewDsdt) { AcpiGbl_DSDT = NewDsdt; } } /* * Save the original DSDT header for detection of table corruption * and/or replacement of the DSDT from outside the OS. */ memcpy (&AcpiGbl_OriginalDsdtHeader, AcpiGbl_DSDT, sizeof (ACPI_TABLE_HEADER)); (void) AcpiUtReleaseMutex (ACPI_MTX_TABLES); /* Load and parse tables */ Status = AcpiNsLoadTable (AcpiGbl_DsdtIndex, AcpiGbl_RootNode); if (ACPI_FAILURE (Status)) { ACPI_EXCEPTION ((AE_INFO, Status, "[DSDT] table load failed")); TablesFailed++; } else { TablesLoaded++; } /* Load any SSDT or PSDT tables. Note: Loop leaves tables locked */ (void) AcpiUtAcquireMutex (ACPI_MTX_TABLES); for (i = 0; i < AcpiGbl_RootTableList.CurrentTableCount; ++i) { Table = &AcpiGbl_RootTableList.Tables[i]; if (!AcpiGbl_RootTableList.Tables[i].Address || (!ACPI_COMPARE_NAME (Table->Signature.Ascii, ACPI_SIG_SSDT) && !ACPI_COMPARE_NAME (Table->Signature.Ascii, ACPI_SIG_PSDT) && !ACPI_COMPARE_NAME (Table->Signature.Ascii, ACPI_SIG_OSDT)) || ACPI_FAILURE (AcpiTbValidateTable (Table))) { continue; } /* Ignore errors while loading tables, get as many as possible */ (void) AcpiUtReleaseMutex (ACPI_MTX_TABLES); Status = AcpiNsLoadTable (i, AcpiGbl_RootNode); if (ACPI_FAILURE (Status)) { ACPI_EXCEPTION ((AE_INFO, Status, "(%4.4s:%8.8s) while loading table", Table->Signature.Ascii, Table->Pointer->OemTableId)); TablesFailed++; ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INIT, "Table [%4.4s:%8.8s] (id FF) - Table namespace load failed\n\n", Table->Signature.Ascii, Table->Pointer->OemTableId)); } else { TablesLoaded++; } (void) AcpiUtAcquireMutex (ACPI_MTX_TABLES); } if (!TablesFailed) { ACPI_INFO (( "%u ACPI AML tables successfully acquired and loaded\n", TablesLoaded)); } else { ACPI_ERROR ((AE_INFO, "%u table load failures, %u successful", TablesFailed, TablesLoaded)); /* Indicate at least one failure */ Status = AE_CTRL_TERMINATE; } UnlockAndExit: (void) AcpiUtReleaseMutex (ACPI_MTX_TABLES); return_ACPI_STATUS (Status); }
ACPI_STATUS AcpiDsInitOneObject ( ACPI_HANDLE ObjHandle, UINT32 Level, void *Context, void **ReturnValue) { ACPI_OBJECT_TYPE Type; ACPI_STATUS Status; ACPI_INIT_WALK_INFO *Info = (ACPI_INIT_WALK_INFO *) Context; ACPI_FUNCTION_NAME ("DsInitOneObject"); /* * We are only interested in objects owned by the table that * was just loaded */ if (((ACPI_NAMESPACE_NODE *) ObjHandle)->OwnerId != Info->TableDesc->TableId) { return (AE_OK); } Info->ObjectCount++; /* And even then, we are only interested in a few object types */ Type = AcpiNsGetType (ObjHandle); switch (Type) { case ACPI_TYPE_REGION: Status = AcpiDsInitializeRegion (ObjHandle); if (ACPI_FAILURE (Status)) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Region %p [%4.4s] - Init failure, %s\n", ObjHandle, ((ACPI_NAMESPACE_NODE *) ObjHandle)->Name.Ascii, AcpiFormatException (Status))); } Info->OpRegionCount++; break; case ACPI_TYPE_METHOD: Info->MethodCount++; /* Print a dot for each method unless we are going to print the entire pathname */ if (!(AcpiDbgLevel & ACPI_LV_INIT_NAMES)) { ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INIT, ".")); } /* * Set the execution data width (32 or 64) based upon the * revision number of the parent ACPI table. * TBD: This is really for possible future support of integer width * on a per-table basis. Currently, we just use a global for the width. */ if (Info->TableDesc->Pointer->Revision == 1) { ((ACPI_NAMESPACE_NODE *) ObjHandle)->Flags |= ANOBJ_DATA_WIDTH_32; } /* * Always parse methods to detect errors, we may delete * the parse tree below */ Status = AcpiDsParseMethod (ObjHandle); if (ACPI_FAILURE (Status)) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Method %p [%4.4s] - parse failure, %s\n", ObjHandle, ((ACPI_NAMESPACE_NODE *) ObjHandle)->Name.Ascii, AcpiFormatException (Status))); /* This parse failed, but we will continue parsing more methods */ break; } /* * Delete the parse tree. We simple re-parse the method * for every execution since there isn't much overhead */ AcpiNsDeleteNamespaceSubtree (ObjHandle); AcpiNsDeleteNamespaceByOwner (((ACPI_NAMESPACE_NODE *) ObjHandle)->Object->Method.OwningId); break; case ACPI_TYPE_DEVICE: Info->DeviceCount++; break; default: break; } /* * We ignore errors from above, and always return OK, since * we don't want to abort the walk on a single error. */ return (AE_OK); }
ACPI_STATUS AcpiNsInitializeDevices ( void) { ACPI_STATUS Status; ACPI_DEVICE_WALK_INFO Info; ACPI_FUNCTION_TRACE (NsInitializeDevices); /* Init counters */ Info.DeviceCount = 0; Info.Num_STA = 0; Info.Num_INI = 0; ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INIT, "Initializing Device/Processor/Thermal objects " "and executing _INI/_STA methods:\n")); /* Tree analysis: find all subtrees that contain _INI methods */ Status = AcpiNsWalkNamespace (ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, FALSE, AcpiNsFindIniMethods, NULL, &Info, NULL); if (ACPI_FAILURE (Status)) { goto ErrorExit; } /* Allocate the evaluation information block */ Info.EvaluateInfo = ACPI_ALLOCATE_ZEROED (sizeof (ACPI_EVALUATE_INFO)); if (!Info.EvaluateInfo) { Status = AE_NO_MEMORY; goto ErrorExit; } /* * Execute the "global" _INI method that may appear at the root. This * support is provided for Windows compatibility (Vista+) and is not * part of the ACPI specification. */ Info.EvaluateInfo->PrefixNode = AcpiGbl_RootNode; Info.EvaluateInfo->Pathname = METHOD_NAME__INI; Info.EvaluateInfo->Parameters = NULL; Info.EvaluateInfo->Flags = ACPI_IGNORE_RETURN_VALUE; Status = AcpiNsEvaluate (Info.EvaluateInfo); if (ACPI_SUCCESS (Status)) { Info.Num_INI++; } /* Walk namespace to execute all _INIs on present devices */ Status = AcpiNsWalkNamespace (ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, FALSE, AcpiNsInitOneDevice, NULL, &Info, NULL); /* * Any _OSI requests should be completed by now. If the BIOS has * requested any Windows OSI strings, we will always truncate * I/O addresses to 16 bits -- for Windows compatibility. */ if (AcpiGbl_OsiData >= ACPI_OSI_WIN_2000) { AcpiGbl_TruncateIoAddresses = TRUE; } ACPI_FREE (Info.EvaluateInfo); if (ACPI_FAILURE (Status)) { goto ErrorExit; } ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INIT, " Executed %u _INI methods requiring %u _STA executions " "(examined %u objects)\n", Info.Num_INI, Info.Num_STA, Info.DeviceCount)); return_ACPI_STATUS (Status); ErrorExit: ACPI_EXCEPTION ((AE_INFO, Status, "During device initialization")); return_ACPI_STATUS (Status); }
/******************************************************************************* * * FUNCTION: acpi_tb_load_namespace * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Load the namespace from the DSDT and all SSDTs/PSDTs found in * the RSDT/XSDT. * ******************************************************************************/ acpi_status acpi_tb_load_namespace(void) { acpi_status status; u32 i; struct acpi_table_header *new_dsdt; struct acpi_table_desc *table; u32 tables_loaded = 0; u32 tables_failed = 0; ACPI_FUNCTION_TRACE(tb_load_namespace); (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); /* * Load the namespace. The DSDT is required, but any SSDT and * PSDT tables are optional. Verify the DSDT. */ table = &acpi_gbl_root_table_list.tables[acpi_gbl_dsdt_index]; if (!acpi_gbl_root_table_list.current_table_count || !ACPI_COMPARE_NAMESEG(table->signature.ascii, ACPI_SIG_DSDT) || ACPI_FAILURE(acpi_tb_validate_table(table))) { status = AE_NO_ACPI_TABLES; goto unlock_and_exit; } /* * Save the DSDT pointer for simple access. This is the mapped memory * address. We must take care here because the address of the .Tables * array can change dynamically as tables are loaded at run-time. Note: * .Pointer field is not validated until after call to acpi_tb_validate_table. */ acpi_gbl_DSDT = table->pointer; /* * Optionally copy the entire DSDT to local memory (instead of simply * mapping it.) There are some BIOSs that corrupt or replace the original * DSDT, creating the need for this option. Default is FALSE, do not copy * the DSDT. */ if (acpi_gbl_copy_dsdt_locally) { new_dsdt = acpi_tb_copy_dsdt(acpi_gbl_dsdt_index); if (new_dsdt) { acpi_gbl_DSDT = new_dsdt; } } /* * Save the original DSDT header for detection of table corruption * and/or replacement of the DSDT from outside the OS. */ memcpy(&acpi_gbl_original_dsdt_header, acpi_gbl_DSDT, sizeof(struct acpi_table_header)); /* Load and parse tables */ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); status = acpi_ns_load_table(acpi_gbl_dsdt_index, acpi_gbl_root_node); (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "[DSDT] table load failed")); tables_failed++; } else { tables_loaded++; } /* Load any SSDT or PSDT tables. Note: Loop leaves tables locked */ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) { table = &acpi_gbl_root_table_list.tables[i]; if (!table->address || (!ACPI_COMPARE_NAMESEG (table->signature.ascii, ACPI_SIG_SSDT) && !ACPI_COMPARE_NAMESEG(table->signature.ascii, ACPI_SIG_PSDT) && !ACPI_COMPARE_NAMESEG(table->signature.ascii, ACPI_SIG_OSDT)) || ACPI_FAILURE(acpi_tb_validate_table(table))) { continue; } /* Ignore errors while loading tables, get as many as possible */ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); status = acpi_ns_load_table(i, acpi_gbl_root_node); (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "(%4.4s:%8.8s) while loading table", table->signature.ascii, table->pointer->oem_table_id)); tables_failed++; ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "Table [%4.4s:%8.8s] (id FF) - Table namespace load failed\n\n", table->signature.ascii, table->pointer->oem_table_id)); } else { tables_loaded++; } } if (!tables_failed) { ACPI_INFO(("%u ACPI AML tables successfully acquired and loaded", tables_loaded)); } else { ACPI_ERROR((AE_INFO, "%u table load failures, %u successful", tables_failed, tables_loaded)); /* Indicate at least one failure */ status = AE_CTRL_TERMINATE; } #ifdef ACPI_APPLICATION ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "\n")); #endif unlock_and_exit: (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); return_ACPI_STATUS(status); }
ACPI_STATUS AcpiDsInitializeObjects ( UINT32 TableIndex, ACPI_NAMESPACE_NODE *StartNode) { ACPI_STATUS Status; ACPI_INIT_WALK_INFO Info; ACPI_TABLE_HEADER *Table; ACPI_OWNER_ID OwnerId; ACPI_FUNCTION_TRACE (DsInitializeObjects); Status = AcpiTbGetOwnerId (TableIndex, &OwnerId); if (ACPI_FAILURE (Status)) { return_ACPI_STATUS (Status); } ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, "**** Starting initialization of namespace objects ****\n")); ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INIT, "Parsing all Control Methods:")); /* Set all init info to zero */ ACPI_MEMSET (&Info, 0, sizeof (ACPI_INIT_WALK_INFO)); Info.OwnerId = OwnerId; Info.TableIndex = TableIndex; /* Walk entire namespace from the supplied root */ Status = AcpiUtAcquireMutex (ACPI_MTX_NAMESPACE); if (ACPI_FAILURE (Status)) { return_ACPI_STATUS (Status); } /* * We don't use AcpiWalkNamespace since we do not want to acquire * the namespace reader lock. */ Status = AcpiNsWalkNamespace (ACPI_TYPE_ANY, StartNode, ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK, AcpiDsInitOneObject, NULL, &Info, NULL); if (ACPI_FAILURE (Status)) { ACPI_EXCEPTION ((AE_INFO, Status, "During WalkNamespace")); } (void) AcpiUtReleaseMutex (ACPI_MTX_NAMESPACE); Status = AcpiGetTableByIndex (TableIndex, &Table); if (ACPI_FAILURE (Status)) { return_ACPI_STATUS (Status); } ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INIT, "\nTable [%4.4s](id %4.4X) - %u Objects with %u Devices %u Methods %u Regions\n", Table->Signature, OwnerId, Info.ObjectCount, Info.DeviceCount, Info.MethodCount, Info.OpRegionCount)); ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, "%u Methods, %u Regions\n", Info.MethodCount, Info.OpRegionCount)); return_ACPI_STATUS (AE_OK); }
ACPI_STATUS AcpiExAccessRegion ( ACPI_OPERAND_OBJECT *ObjDesc, UINT32 FieldDatumByteOffset, UINT64 *Value, UINT32 Function) { ACPI_STATUS Status; ACPI_OPERAND_OBJECT *RgnDesc; UINT32 RegionOffset; ACPI_FUNCTION_TRACE (ExAccessRegion); /* * Ensure that the region operands are fully evaluated and verify * the validity of the request */ Status = AcpiExSetupRegion (ObjDesc, FieldDatumByteOffset); if (ACPI_FAILURE (Status)) { return_ACPI_STATUS (Status); } /* * The physical address of this field datum is: * * 1) The base of the region, plus * 2) The base offset of the field, plus * 3) The current offset into the field */ RgnDesc = ObjDesc->CommonField.RegionObj; RegionOffset = ObjDesc->CommonField.BaseByteOffset + FieldDatumByteOffset; if ((Function & ACPI_IO_MASK) == ACPI_READ) { ACPI_DEBUG_PRINT ((ACPI_DB_BFIELD, "[READ]")); } else { ACPI_DEBUG_PRINT ((ACPI_DB_BFIELD, "[WRITE]")); } ACPI_DEBUG_PRINT_RAW ((ACPI_DB_BFIELD, " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %8.8X%8.8X\n", AcpiUtGetRegionName (RgnDesc->Region.SpaceId), RgnDesc->Region.SpaceId, ObjDesc->CommonField.AccessByteWidth, ObjDesc->CommonField.BaseByteOffset, FieldDatumByteOffset, ACPI_FORMAT_UINT64 (RgnDesc->Region.Address + RegionOffset))); /* Invoke the appropriate AddressSpace/OpRegion handler */ Status = AcpiEvAddressSpaceDispatch (RgnDesc, ObjDesc, Function, RegionOffset, ACPI_MUL_8 (ObjDesc->CommonField.AccessByteWidth), Value); if (ACPI_FAILURE (Status)) { if (Status == AE_NOT_IMPLEMENTED) { ACPI_ERROR ((AE_INFO, "Region %s (ID=%u) not implemented", AcpiUtGetRegionName (RgnDesc->Region.SpaceId), RgnDesc->Region.SpaceId)); } else if (Status == AE_NOT_EXIST) { ACPI_ERROR ((AE_INFO, "Region %s (ID=%u) has no handler", AcpiUtGetRegionName (RgnDesc->Region.SpaceId), RgnDesc->Region.SpaceId)); } } return_ACPI_STATUS (Status); }
static acpi_status acpi_ns_init_one_object(acpi_handle obj_handle, u32 level, void *context, void **return_value) { acpi_object_type type; acpi_status status = AE_OK; struct acpi_init_walk_info *info = (struct acpi_init_walk_info *)context; struct acpi_namespace_node *node = (struct acpi_namespace_node *)obj_handle; union acpi_operand_object *obj_desc; ACPI_FUNCTION_NAME(ns_init_one_object); info->object_count++; /* And even then, we are only interested in a few object types */ type = acpi_ns_get_type(obj_handle); obj_desc = acpi_ns_get_attached_object(node); if (!obj_desc) { return (AE_OK); } /* Increment counters for object types we are looking for */ switch (type) { case ACPI_TYPE_REGION: info->op_region_count++; break; case ACPI_TYPE_BUFFER_FIELD: info->field_count++; break; case ACPI_TYPE_LOCAL_BANK_FIELD: info->field_count++; break; case ACPI_TYPE_BUFFER: info->buffer_count++; break; case ACPI_TYPE_PACKAGE: info->package_count++; break; default: /* No init required, just exit now */ return (AE_OK); } /* * If the object is already initialized, nothing else to do */ if (obj_desc->common.flags & AOPOBJ_DATA_VALID) { return (AE_OK); } /* * Must lock the interpreter before executing AML code */ acpi_ex_enter_interpreter(); /* * Each of these types can contain executable AML code within the * declaration. */ switch (type) { case ACPI_TYPE_REGION: info->op_region_init++; status = acpi_ds_get_region_arguments(obj_desc); break; case ACPI_TYPE_BUFFER_FIELD: info->field_init++; status = acpi_ds_get_buffer_field_arguments(obj_desc); break; case ACPI_TYPE_LOCAL_BANK_FIELD: info->field_init++; status = acpi_ds_get_bank_field_arguments(obj_desc); break; case ACPI_TYPE_BUFFER: info->buffer_init++; status = acpi_ds_get_buffer_arguments(obj_desc); break; case ACPI_TYPE_PACKAGE: info->package_init++; status = acpi_ds_get_package_arguments(obj_desc); break; default: /* No other types can get here */ break; } if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Could not execute arguments for [%4.4s] (%s)", acpi_ut_get_node_name(node), acpi_ut_get_type_name(type))); } /* * Print a dot for each object unless we are going to print the entire * pathname */ if (!(acpi_dbg_level & ACPI_LV_INIT_NAMES)) { ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, ".")); } /* * We ignore errors from above, and always return OK, since we don't want * to abort the walk on any single error. */ acpi_ex_exit_interpreter(); return (AE_OK); }
ACPI_STATUS AcpiNsInitializeDevices ( UINT32 Flags) { ACPI_STATUS Status = AE_OK; ACPI_DEVICE_WALK_INFO Info; ACPI_HANDLE Handle; ACPI_FUNCTION_TRACE (NsInitializeDevices); if (!(Flags & ACPI_NO_DEVICE_INIT)) { ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[Init] Initializing ACPI Devices\n")); /* Init counters */ Info.DeviceCount = 0; Info.Num_STA = 0; Info.Num_INI = 0; ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INIT, "Initializing Device/Processor/Thermal objects " "and executing _INI/_STA methods:\n")); /* Tree analysis: find all subtrees that contain _INI methods */ Status = AcpiNsWalkNamespace (ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, FALSE, AcpiNsFindIniMethods, NULL, &Info, NULL); if (ACPI_FAILURE (Status)) { goto ErrorExit; } /* Allocate the evaluation information block */ Info.EvaluateInfo = ACPI_ALLOCATE_ZEROED (sizeof (ACPI_EVALUATE_INFO)); if (!Info.EvaluateInfo) { Status = AE_NO_MEMORY; goto ErrorExit; } /* * Execute the "global" _INI method that may appear at the root. * This support is provided for Windows compatibility (Vista+) and * is not part of the ACPI specification. */ Info.EvaluateInfo->PrefixNode = AcpiGbl_RootNode; Info.EvaluateInfo->RelativePathname = METHOD_NAME__INI; Info.EvaluateInfo->Parameters = NULL; Info.EvaluateInfo->Flags = ACPI_IGNORE_RETURN_VALUE; Status = AcpiNsEvaluate (Info.EvaluateInfo); if (ACPI_SUCCESS (Status)) { Info.Num_INI++; } /* * Execute \_SB._INI. * There appears to be a strict order requirement for \_SB._INI, * which should be evaluated before any _REG evaluations. */ Status = AcpiGetHandle (NULL, "\\_SB", &Handle); if (ACPI_SUCCESS (Status)) { memset (Info.EvaluateInfo, 0, sizeof (ACPI_EVALUATE_INFO)); Info.EvaluateInfo->PrefixNode = Handle; Info.EvaluateInfo->RelativePathname = METHOD_NAME__INI; Info.EvaluateInfo->Parameters = NULL; Info.EvaluateInfo->Flags = ACPI_IGNORE_RETURN_VALUE; Status = AcpiNsEvaluate (Info.EvaluateInfo); if (ACPI_SUCCESS (Status)) { Info.Num_INI++; } } } /* * Run all _REG methods * * Note: Any objects accessed by the _REG methods will be automatically * initialized, even if they contain executable AML (see the call to * AcpiNsInitializeObjects below). * * Note: According to the ACPI specification, we actually needn't execute * _REG for SystemMemory/SystemIo operation regions, but for PCI_Config * operation regions, it is required to evaluate _REG for those on a PCI * root bus that doesn't contain _BBN object. So this code is kept here * in order not to break things. */ if (!(Flags & ACPI_NO_ADDRESS_SPACE_INIT)) { ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[Init] Executing _REG OpRegion methods\n")); Status = AcpiEvInitializeOpRegions (); if (ACPI_FAILURE (Status)) { goto ErrorExit; } } if (!(Flags & ACPI_NO_DEVICE_INIT)) { /* Walk namespace to execute all _INIs on present devices */ Status = AcpiNsWalkNamespace (ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, FALSE, AcpiNsInitOneDevice, NULL, &Info, NULL); /* * Any _OSI requests should be completed by now. If the BIOS has * requested any Windows OSI strings, we will always truncate * I/O addresses to 16 bits -- for Windows compatibility. */ if (AcpiGbl_OsiData >= ACPI_OSI_WIN_2000) { AcpiGbl_TruncateIoAddresses = TRUE; } ACPI_FREE (Info.EvaluateInfo); if (ACPI_FAILURE (Status)) { goto ErrorExit; } ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INIT, " Executed %u _INI methods requiring %u _STA executions " "(examined %u objects)\n", Info.Num_INI, Info.Num_STA, Info.DeviceCount)); } return_ACPI_STATUS (Status); ErrorExit: ACPI_EXCEPTION ((AE_INFO, Status, "During device initialization")); return_ACPI_STATUS (Status); }
static acpi_status acpi_ns_init_one_device(acpi_handle obj_handle, u32 nesting_level, void *context, void **return_value) { struct acpi_device_walk_info *walk_info = ACPI_CAST_PTR(struct acpi_device_walk_info, context); struct acpi_evaluate_info *info = walk_info->evaluate_info; u32 flags; acpi_status status; struct acpi_namespace_node *device_node; ACPI_FUNCTION_TRACE(ns_init_one_device); /* We are interested in Devices, Processors and thermal_zones only */ device_node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle); if ((device_node->type != ACPI_TYPE_DEVICE) && (device_node->type != ACPI_TYPE_PROCESSOR) && (device_node->type != ACPI_TYPE_THERMAL)) { return_ACPI_STATUS(AE_OK); } /* * Because of an earlier namespace analysis, all subtrees that contain an * _INI method are tagged. * * If this device subtree does not contain any _INI methods, we * can exit now and stop traversing this entire subtree. */ if (!(device_node->flags & ANOBJ_SUBTREE_HAS_INI)) { return_ACPI_STATUS(AE_CTRL_DEPTH); } /* * Run _STA to determine if this device is present and functioning. We * must know this information for two important reasons (from ACPI spec): * * 1) We can only run _INI if the device is present. * 2) We must abort the device tree walk on this subtree if the device is * not present and is not functional (we will not examine the children) * * The _STA method is not required to be present under the device, we * assume the device is present if _STA does not exist. */ ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname (ACPI_TYPE_METHOD, device_node, METHOD_NAME__STA)); status = acpi_ut_execute_STA(device_node, &flags); if (ACPI_FAILURE(status)) { /* Ignore error and move on to next device */ return_ACPI_STATUS(AE_OK); } /* * Flags == -1 means that _STA was not found. In this case, we assume that * the device is both present and functional. * * From the ACPI spec, description of _STA: * * "If a device object (including the processor object) does not have an * _STA object, then OSPM assumes that all of the above bits are set (in * other words, the device is present, ..., and functioning)" */ if (flags != ACPI_UINT32_MAX) { walk_info->num_STA++; } /* * Examine the PRESENT and FUNCTIONING status bits * * Note: ACPI spec does not seem to specify behavior for the present but * not functioning case, so we assume functioning if present. */ if (!(flags & ACPI_STA_DEVICE_PRESENT)) { /* Device is not present, we must examine the Functioning bit */ if (flags & ACPI_STA_DEVICE_FUNCTIONING) { /* * Device is not present but is "functioning". In this case, * we will not run _INI, but we continue to examine the children * of this device. * * From the ACPI spec, description of _STA: (Note - no mention * of whether to run _INI or not on the device in question) * * "_STA may return bit 0 clear (not present) with bit 3 set * (device is functional). This case is used to indicate a valid * device for which no device driver should be loaded (for example, * a bridge device.) Children of this device may be present and * valid. OSPM should continue enumeration below a device whose * _STA returns this bit combination" */ return_ACPI_STATUS(AE_OK); } else { /* * Device is not present and is not functioning. We must abort the * walk of this subtree immediately -- don't look at the children * of such a device. * * From the ACPI spec, description of _INI: * * "If the _STA method indicates that the device is not present, * OSPM will not run the _INI and will not examine the children * of the device for _INI methods" */ return_ACPI_STATUS(AE_CTRL_DEPTH); } } /* * The device is present or is assumed present if no _STA exists. * Run the _INI if it exists (not required to exist) * * Note: We know there is an _INI within this subtree, but it may not be * under this particular device, it may be lower in the branch. */ ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname (ACPI_TYPE_METHOD, device_node, METHOD_NAME__INI)); info->prefix_node = device_node; info->pathname = METHOD_NAME__INI; info->parameters = NULL; info->parameter_type = ACPI_PARAM_ARGS; info->flags = ACPI_IGNORE_RETURN_VALUE; /* * Some hardware relies on this being executed as atomically * as possible (without an NMI being received in the middle of * this) - so disable NMIs and initialize the device: */ //acpi_nmi_disable(); status = acpi_ns_evaluate(info); //acpi_nmi_enable(); if (ACPI_SUCCESS(status)) { walk_info->num_INI++; if ((acpi_dbg_level <= ACPI_LV_ALL_EXCEPTIONS) && (!(acpi_dbg_level & ACPI_LV_INFO))) { ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, ".")); } } #ifdef ACPI_DEBUG_OUTPUT else if (status != AE_NOT_FOUND) { /* Ignore error and move on to next device */ char *scope_name = acpi_ns_get_external_pathname(info->resolved_node); ACPI_EXCEPTION((AE_INFO, status, "during %s._INI execution", scope_name)); ACPI_FREE(scope_name); } #endif /* Ignore errors from above */ status = AE_OK; /* * The _INI method has been run if present; call the Global Initialization * Handler for this device. */ if (acpi_gbl_init_handler) { status = acpi_gbl_init_handler(device_node, ACPI_INIT_DEVICE_INI); } return_ACPI_STATUS(status); }
ACPI_STATUS AcpiEvGpeInitialize ( void) { UINT32 RegisterCount0 = 0; UINT32 RegisterCount1 = 0; UINT32 GpeNumberMax = 0; ACPI_STATUS Status; ACPI_FUNCTION_TRACE (EvGpeInitialize); ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INIT, "Initializing General Purpose Events (GPEs):\n")); Status = AcpiUtAcquireMutex (ACPI_MTX_NAMESPACE); if (ACPI_FAILURE (Status)) { return_ACPI_STATUS (Status); } /* * Initialize the GPE Block(s) defined in the FADT * * Why the GPE register block lengths are divided by 2: From the ACPI * Spec, section "General-Purpose Event Registers", we have: * * "Each register block contains two registers of equal length * GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the * GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN * The length of the GPE1_STS and GPE1_EN registers is equal to * half the GPE1_LEN. If a generic register block is not supported * then its respective block pointer and block length values in the * FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need * to be the same size." */ /* * Determine the maximum GPE number for this machine. * * Note: both GPE0 and GPE1 are optional, and either can exist without * the other. * * If EITHER the register length OR the block address are zero, then that * particular block is not supported. */ if (AcpiGbl_FADT.Gpe0BlockLength && AcpiGbl_FADT.XGpe0Block.Address) { /* GPE block 0 exists (has both length and address > 0) */ RegisterCount0 = (UINT16) (AcpiGbl_FADT.Gpe0BlockLength / 2); GpeNumberMax = (RegisterCount0 * ACPI_GPE_REGISTER_WIDTH) - 1; /* Install GPE Block 0 */ Status = AcpiEvCreateGpeBlock (AcpiGbl_FadtGpeDevice, AcpiGbl_FADT.XGpe0Block.Address, AcpiGbl_FADT.XGpe0Block.SpaceId, RegisterCount0, 0, AcpiGbl_FADT.SciInterrupt, &AcpiGbl_GpeFadtBlocks[0]); if (ACPI_FAILURE (Status)) { ACPI_EXCEPTION ((AE_INFO, Status, "Could not create GPE Block 0")); } } if (AcpiGbl_FADT.Gpe1BlockLength && AcpiGbl_FADT.XGpe1Block.Address) { /* GPE block 1 exists (has both length and address > 0) */ RegisterCount1 = (UINT16) (AcpiGbl_FADT.Gpe1BlockLength / 2); /* Check for GPE0/GPE1 overlap (if both banks exist) */ if ((RegisterCount0) && (GpeNumberMax >= AcpiGbl_FADT.Gpe1Base)) { ACPI_ERROR ((AE_INFO, "GPE0 block (GPE 0 to %u) overlaps the GPE1 block " "(GPE %u to %u) - Ignoring GPE1", GpeNumberMax, AcpiGbl_FADT.Gpe1Base, AcpiGbl_FADT.Gpe1Base + ((RegisterCount1 * ACPI_GPE_REGISTER_WIDTH) - 1))); /* Ignore GPE1 block by setting the register count to zero */ RegisterCount1 = 0; } else { /* Install GPE Block 1 */ Status = AcpiEvCreateGpeBlock (AcpiGbl_FadtGpeDevice, AcpiGbl_FADT.XGpe1Block.Address, AcpiGbl_FADT.XGpe1Block.SpaceId, RegisterCount1, AcpiGbl_FADT.Gpe1Base, AcpiGbl_FADT.SciInterrupt, &AcpiGbl_GpeFadtBlocks[1]); if (ACPI_FAILURE (Status)) { ACPI_EXCEPTION ((AE_INFO, Status, "Could not create GPE Block 1")); } /* * GPE0 and GPE1 do not have to be contiguous in the GPE number * space. However, GPE0 always starts at GPE number zero. */ GpeNumberMax = AcpiGbl_FADT.Gpe1Base + ((RegisterCount1 * ACPI_GPE_REGISTER_WIDTH) - 1); } } /* Exit if there are no GPE registers */ if ((RegisterCount0 + RegisterCount1) == 0) { /* GPEs are not required by ACPI, this is OK */ ACPI_DEBUG_PRINT ((ACPI_DB_INIT, "There are no GPE blocks defined in the FADT\n")); Status = AE_OK; goto Cleanup; } Cleanup: (void) AcpiUtReleaseMutex (ACPI_MTX_NAMESPACE); return_ACPI_STATUS (AE_OK); }
static void acpi_ex_do_debug_object(union acpi_operand_object *source_desc, u32 level, u32 index) { u32 i; ACPI_FUNCTION_TRACE_PTR(ex_do_debug_object, source_desc); /* Print line header as long as we are not in the middle of an object display */ if (!((level > 0) && index == 0)) { ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[ACPI Debug] %*s", level, " ")); } /* Display index for package output only */ if (index > 0) { ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "(%.2u) ", index - 1)); } if (!source_desc) { ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[Null Object]\n")); return_VOID; } if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_OPERAND) { ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%s ", acpi_ut_get_object_type_name (source_desc))); if (!acpi_ut_valid_internal_object(source_desc)) { ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%p, Invalid Internal Object!\n", source_desc)); return_VOID; } } else if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_NAMED) { ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%s: %p\n", acpi_ut_get_type_name(((struct acpi_namespace_node *)source_desc)-> type), source_desc)); return_VOID; } else { return_VOID; } /* source_desc is of type ACPI_DESC_TYPE_OPERAND */ switch (ACPI_GET_OBJECT_TYPE(source_desc)) { case ACPI_TYPE_INTEGER: /* Output correct integer width */ if (acpi_gbl_integer_byte_width == 4) { ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "0x%8.8X\n", (u32) source_desc->integer. value)); } else { ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "0x%8.8X%8.8X\n", ACPI_FORMAT_UINT64(source_desc-> integer. value))); } break; case ACPI_TYPE_BUFFER: ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[0x%.2X]\n", (u32) source_desc->buffer.length)); ACPI_DUMP_BUFFER(source_desc->buffer.pointer, (source_desc->buffer.length < 256) ? source_desc->buffer.length : 256); break; case ACPI_TYPE_STRING: ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[0x%.2X] \"%s\"\n", source_desc->string.length, source_desc->string.pointer)); break; case ACPI_TYPE_PACKAGE: ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[Contains 0x%.2X Elements]\n", source_desc->package.count)); /* Output the entire contents of the package */ for (i = 0; i < source_desc->package.count; i++) { acpi_ex_do_debug_object(source_desc->package. elements[i], level + 4, i + 1); } break; case ACPI_TYPE_LOCAL_REFERENCE: if (source_desc->reference.opcode == AML_INDEX_OP) { ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[%s, 0x%X]\n", acpi_ps_get_opcode_name (source_desc->reference.opcode), source_desc->reference.offset)); } else { ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[%s]", acpi_ps_get_opcode_name (source_desc->reference.opcode))); } if (source_desc->reference.opcode == AML_LOAD_OP) { /* Load and load_table */ ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, " Table OwnerId %p\n", source_desc->reference.object)); break; } ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, " ")); /* Check for valid node first, then valid object */ if (source_desc->reference.node) { if (ACPI_GET_DESCRIPTOR_TYPE (source_desc->reference.node) != ACPI_DESC_TYPE_NAMED) { ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, " %p - Not a valid namespace node\n", source_desc->reference. node)); } else { ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "Node %p [%4.4s] ", source_desc->reference. node, (source_desc->reference. node)->name.ascii)); switch ((source_desc->reference.node)->type) { /* These types have no attached object */ case ACPI_TYPE_DEVICE: acpi_os_printf("Device\n"); break; case ACPI_TYPE_THERMAL: acpi_os_printf("Thermal Zone\n"); break; default: acpi_ex_do_debug_object((source_desc-> reference. node)->object, level + 4, 0); break; } } } else if (source_desc->reference.object) { if (ACPI_GET_DESCRIPTOR_TYPE (source_desc->reference.object) == ACPI_DESC_TYPE_NAMED) { acpi_ex_do_debug_object(((struct acpi_namespace_node *) source_desc->reference. object)->object, level + 4, 0); } else { acpi_ex_do_debug_object(source_desc->reference. object, level + 4, 0); } } break; default: ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%p\n", source_desc)); break; } ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC, "\n")); return_VOID; }
static ACPI_STATUS OptBuildShortestPath ( ACPI_PARSE_OBJECT *Op, ACPI_WALK_STATE *WalkState, ACPI_NAMESPACE_NODE *CurrentNode, ACPI_NAMESPACE_NODE *TargetNode, ACPI_BUFFER *CurrentPath, ACPI_BUFFER *TargetPath, ACPI_SIZE AmlNameStringLength, UINT8 IsDeclaration, char **ReturnNewPath) { UINT32 NumCommonSegments; UINT32 MaxCommonSegments; UINT32 Index; UINT32 NumCarats; UINT32 i; char *NewPath; char *NewPathExternal; ACPI_NAMESPACE_NODE *Node; ACPI_GENERIC_STATE ScopeInfo; ACPI_STATUS Status; BOOLEAN SubPath = FALSE; ACPI_FUNCTION_NAME (OptBuildShortestPath); ScopeInfo.Scope.Node = CurrentNode; /* * Determine the maximum number of NameSegs that the Target and Current paths * can possibly have in common. (To optimize, we have to have at least 1) * * Note: The external NamePath string lengths are always a multiple of 5 * (ACPI_NAME_SIZE + separator) */ MaxCommonSegments = TargetPath->Length / ACPI_PATH_SEGMENT_LENGTH; if (CurrentPath->Length < TargetPath->Length) { MaxCommonSegments = CurrentPath->Length / ACPI_PATH_SEGMENT_LENGTH; } /* * Determine how many NameSegs the two paths have in common. * (Starting from the root) */ for (NumCommonSegments = 0; NumCommonSegments < MaxCommonSegments; NumCommonSegments++) { /* Compare two single NameSegs */ if (!ACPI_COMPARE_NAME ( &((char *) TargetPath->Pointer)[ (NumCommonSegments * ACPI_PATH_SEGMENT_LENGTH) + 1], &((char *) CurrentPath->Pointer)[ (NumCommonSegments * ACPI_PATH_SEGMENT_LENGTH) + 1])) { /* Mismatch */ break; } } ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OPTIMIZATIONS, " COMMON: %u", NumCommonSegments)); /* There must be at least 1 common NameSeg in order to optimize */ if (NumCommonSegments == 0) { return (AE_NOT_FOUND); } if (NumCommonSegments == MaxCommonSegments) { if (CurrentPath->Length == TargetPath->Length) { ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OPTIMIZATIONS, " SAME PATH")); return (AE_NOT_FOUND); } else { ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OPTIMIZATIONS, " SUBPATH")); SubPath = TRUE; } } /* Determine how many prefix Carats are required */ NumCarats = (CurrentPath->Length / ACPI_PATH_SEGMENT_LENGTH) - NumCommonSegments; /* * Construct a new target string */ NewPathExternal = ACPI_ALLOCATE_ZEROED ( TargetPath->Length + NumCarats + 1); /* Insert the Carats into the Target string */ for (i = 0; i < NumCarats; i++) { NewPathExternal[i] = AML_PARENT_PREFIX; } /* * Copy only the necessary (optimal) segments from the original * target string */ Index = (NumCommonSegments * ACPI_PATH_SEGMENT_LENGTH) + 1; /* Special handling for exact subpath in a name declaration */ if (IsDeclaration && SubPath && (CurrentPath->Length > TargetPath->Length)) { /* * The current path is longer than the target, and the target is a * subpath of the current path. We must include one more NameSeg of * the target path */ Index -= ACPI_PATH_SEGMENT_LENGTH; /* Special handling for Scope() operator */ if (Op->Asl.AmlOpcode == AML_SCOPE_OP) { NewPathExternal[i] = AML_PARENT_PREFIX; i++; ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OPTIMIZATIONS, "(EXTRA ^)")); } } /* Make sure we haven't gone off the end of the target path */ if (Index > TargetPath->Length) { Index = TargetPath->Length; } strcpy (&NewPathExternal[i], &((char *) TargetPath->Pointer)[Index]); ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OPTIMIZATIONS, " %-24s", NewPathExternal)); /* * Internalize the new target string and check it against the original * string to make sure that this is in fact an optimization. If the * original string is already optimal, there is no point in continuing. */ Status = AcpiNsInternalizeName (NewPathExternal, &NewPath); if (ACPI_FAILURE (Status)) { AslCoreSubsystemError (Op, Status, "Internalizing new NamePath", ASL_NO_ABORT); ACPI_FREE (NewPathExternal); return (Status); } if (strlen (NewPath) >= AmlNameStringLength) { ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OPTIMIZATIONS, " NOT SHORTER (New %u old %u)", (UINT32) strlen (NewPath), (UINT32) AmlNameStringLength)); ACPI_FREE (NewPathExternal); return (AE_NOT_FOUND); } /* * Check to make sure that the optimization finds the node we are * looking for. This is simply a sanity check on the new * path that has been created. */ Status = AcpiNsLookup (&ScopeInfo, NewPath, ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE, ACPI_NS_DONT_OPEN_SCOPE, WalkState, &(Node)); if (ACPI_SUCCESS (Status)) { /* Found the namepath, but make sure the node is correct */ if (Node == TargetNode) { /* The lookup matched the node, accept this optimization */ AslError (ASL_OPTIMIZATION, ASL_MSG_NAME_OPTIMIZATION, Op, NewPathExternal); *ReturnNewPath = NewPath; } else { /* Node is not correct, do not use this optimization */ Status = AE_NOT_FOUND; ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OPTIMIZATIONS, " ***** WRONG NODE")); AslError (ASL_WARNING, ASL_MSG_COMPILER_INTERNAL, Op, "Not using optimized name - found wrong node"); } } else { /* The lookup failed, we obviously cannot use this optimization */ ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OPTIMIZATIONS, " ***** NOT FOUND")); AslError (ASL_WARNING, ASL_MSG_COMPILER_INTERNAL, Op, "Not using optimized name - did not find node"); } ACPI_FREE (NewPathExternal); return (Status); }
acpi_status acpi_ds_scope_stack_push(struct acpi_namespace_node *node, acpi_object_type type, struct acpi_walk_state *walk_state) { union acpi_generic_state *scope_info; union acpi_generic_state *old_scope_info; ACPI_FUNCTION_TRACE(ds_scope_stack_push); if (!node) { /* Invalid scope */ ACPI_ERROR((AE_INFO, "Null scope parameter")); return_ACPI_STATUS(AE_BAD_PARAMETER); } /* Make sure object type is valid */ if (!acpi_ut_valid_object_type(type)) { ACPI_WARNING((AE_INFO, "Invalid object type: 0x%X", type)); } /* Allocate a new scope object */ scope_info = acpi_ut_create_generic_state(); if (!scope_info) { return_ACPI_STATUS(AE_NO_MEMORY); } /* Init new scope object */ scope_info->common.descriptor_type = ACPI_DESC_TYPE_STATE_WSCOPE; scope_info->scope.node = node; scope_info->common.value = (u16) type; walk_state->scope_depth++; ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%.2d] Pushed scope ", (u32) walk_state->scope_depth)); old_scope_info = walk_state->scope_info; if (old_scope_info) { ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC, "[%4.4s] (%s)", acpi_ut_get_node_name(old_scope_info-> scope.node), acpi_ut_get_type_name(old_scope_info-> common.value))); } else { ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC, "[\\___] (%s)", "ROOT")); } ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC, ", New scope -> [%4.4s] (%s)\n", acpi_ut_get_node_name(scope_info->scope.node), acpi_ut_get_type_name(scope_info->common.value))); /* Push new scope object onto stack */ acpi_ut_push_generic_state(&walk_state->scope_info, scope_info); return_ACPI_STATUS(AE_OK); }
void OptOptimizeNamePath ( ACPI_PARSE_OBJECT *Op, UINT32 Flags, ACPI_WALK_STATE *WalkState, char *AmlNameString, ACPI_NAMESPACE_NODE *TargetNode) { ACPI_STATUS Status; ACPI_BUFFER TargetPath; ACPI_BUFFER CurrentPath; ACPI_SIZE AmlNameStringLength; ACPI_NAMESPACE_NODE *CurrentNode; char *ExternalNameString; char *NewPath = NULL; ACPI_SIZE HowMuchShorter; ACPI_PARSE_OBJECT *NextOp; ACPI_FUNCTION_TRACE (OptOptimizeNamePath); /* This is an optional optimization */ if (!Gbl_ReferenceOptimizationFlag) { return_VOID; } /* Various required items */ if (!TargetNode || !WalkState || !AmlNameString || !Op->Common.Parent) { return_VOID; } ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OPTIMIZATIONS, "PATH OPTIMIZE: Line %5d ParentOp [%12.12s] ThisOp [%12.12s] ", Op->Asl.LogicalLineNumber, AcpiPsGetOpcodeName (Op->Common.Parent->Common.AmlOpcode), AcpiPsGetOpcodeName (Op->Common.AmlOpcode))); if (!(Flags & (AML_NAMED | AML_CREATE))) { if (Op->Asl.CompileFlags & NODE_IS_NAME_DECLARATION) { /* We don't want to fuss with actual name declaration nodes here */ ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OPTIMIZATIONS, "******* NAME DECLARATION\n")); return_VOID; } } /* * The original path must be longer than one NameSeg (4 chars) for there * to be any possibility that it can be optimized to a shorter string */ AmlNameStringLength = strlen (AmlNameString); if (AmlNameStringLength <= ACPI_NAME_SIZE) { ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OPTIMIZATIONS, "NAMESEG %4.4s\n", AmlNameString)); return_VOID; } /* * We need to obtain the node that represents the current scope -- where * we are right now in the namespace. We will compare this path * against the Namepath, looking for commonality. */ CurrentNode = AcpiGbl_RootNode; if (WalkState->ScopeInfo) { CurrentNode = WalkState->ScopeInfo->Scope.Node; } if (Flags & (AML_NAMED | AML_CREATE)) { /* This is the declaration of a new name */ ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OPTIMIZATIONS, "NAME\n")); /* * The node of interest is the parent of this node (the containing * scope). The actual namespace node may be up more than one level * of parse op or it may not exist at all (if we traverse back * up to the root.) */ NextOp = Op->Asl.Parent; while (NextOp && (!NextOp->Asl.Node)) { NextOp = NextOp->Asl.Parent; } if (NextOp && NextOp->Asl.Node) { CurrentNode = NextOp->Asl.Node; } else { CurrentNode = AcpiGbl_RootNode; } } else { /* This is a reference to an existing named object */ ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OPTIMIZATIONS, "REFERENCE\n")); } /* * Obtain the full paths to the two nodes that we are interested in * (Target and current namespace location) in external * format -- something we can easily manipulate */ TargetPath.Length = ACPI_ALLOCATE_LOCAL_BUFFER; Status = AcpiNsHandleToPathname (TargetNode, &TargetPath, FALSE); if (ACPI_FAILURE (Status)) { AslCoreSubsystemError (Op, Status, "Getting Target NamePath", ASL_NO_ABORT); return_VOID; } TargetPath.Length--; /* Subtract one for null terminator */ /* CurrentPath is the path to this scope (where we are in the namespace) */ CurrentPath.Length = ACPI_ALLOCATE_LOCAL_BUFFER; Status = AcpiNsHandleToPathname (CurrentNode, &CurrentPath, FALSE); if (ACPI_FAILURE (Status)) { AslCoreSubsystemError (Op, Status, "Getting Current NamePath", ASL_NO_ABORT); return_VOID; } CurrentPath.Length--; /* Subtract one for null terminator */ /* Debug output only */ Status = AcpiNsExternalizeName (ACPI_UINT32_MAX, AmlNameString, NULL, &ExternalNameString); if (ACPI_FAILURE (Status)) { AslCoreSubsystemError (Op, Status, "Externalizing NamePath", ASL_NO_ABORT); return_VOID; } ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OPTIMIZATIONS, "CURRENT SCOPE: (%2u) %-37s FULL PATH TO NAME: (%2u) %-32s ACTUAL AML:%-32s\n", (UINT32) CurrentPath.Length, (char *) CurrentPath.Pointer, (UINT32) TargetPath.Length, (char *) TargetPath.Pointer, ExternalNameString)); ACPI_FREE (ExternalNameString); /* * Attempt an optmization depending on the type of namepath */ if (Flags & (AML_NAMED | AML_CREATE)) { /* * This is a named opcode and the namepath is a name declaration, not * a reference. */ Status = OptOptimizeNameDeclaration (Op, WalkState, CurrentNode, TargetNode, AmlNameString, &NewPath); if (ACPI_FAILURE (Status)) { /* * 2) now attempt to * optimize the namestring with carats (up-arrow) */ Status = OptBuildShortestPath (Op, WalkState, CurrentNode, TargetNode, &CurrentPath, &TargetPath, AmlNameStringLength, 1, &NewPath); } } else { /* * This is a reference to an existing named object * * 1) Check if search-to-root can be utilized using the last * NameSeg of the NamePath */ Status = OptSearchToRoot (Op, WalkState, CurrentNode, TargetNode, &TargetPath, &NewPath); if (ACPI_FAILURE (Status)) { /* * 2) Search-to-root could not be used, now attempt to * optimize the namestring with carats (up-arrow) */ Status = OptBuildShortestPath (Op, WalkState, CurrentNode, TargetNode, &CurrentPath, &TargetPath, AmlNameStringLength, 0, &NewPath); } } /* * Success from above indicates that the NamePath was successfully * optimized. We need to update the parse op with the new name */ if (ACPI_SUCCESS (Status)) { HowMuchShorter = (AmlNameStringLength - strlen (NewPath)); OptTotal += HowMuchShorter; ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OPTIMIZATIONS, " REDUCED BY %2u (TOTAL SAVED %2u)", (UINT32) HowMuchShorter, OptTotal)); if (Flags & AML_NAMED) { if (Op->Asl.AmlOpcode == AML_ALIAS_OP) { /* * ALIAS is the only oddball opcode, the name declaration * (alias name) is the second operand */ Op->Asl.Child->Asl.Next->Asl.Value.String = NewPath; Op->Asl.Child->Asl.Next->Asl.AmlLength = strlen (NewPath); } else { Op->Asl.Child->Asl.Value.String = NewPath; Op->Asl.Child->Asl.AmlLength = strlen (NewPath); } } else if (Flags & AML_CREATE) { /* Name must appear as the last parameter */ NextOp = Op->Asl.Child; while (!(NextOp->Asl.CompileFlags & NODE_IS_NAME_DECLARATION)) { NextOp = NextOp->Asl.Next; } /* Update the parse node with the new NamePath */ NextOp->Asl.Value.String = NewPath; NextOp->Asl.AmlLength = strlen (NewPath); } else { /* Update the parse node with the new NamePath */ Op->Asl.Value.String = NewPath; Op->Asl.AmlLength = strlen (NewPath); } } else { ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OPTIMIZATIONS, " ALREADY OPTIMAL")); } /* Cleanup path buffers */ ACPI_FREE (TargetPath.Pointer); ACPI_FREE (CurrentPath.Pointer); ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OPTIMIZATIONS, "\n")); return_VOID; }
/******************************************************************************* * * FUNCTION: acpi_ex_do_debug_object * * PARAMETERS: source_desc - Object to be output to "Debug Object" * level - Indentation level (used for packages) * index - Current package element, zero if not pkg * * RETURN: None * * DESCRIPTION: Handles stores to the AML Debug Object. For example: * Store(INT1, Debug) * * This function is not compiled if ACPI_NO_ERROR_MESSAGES is set. * * This function is only enabled if acpi_gbl_enable_aml_debug_object is set, or * if ACPI_LV_DEBUG_OBJECT is set in the acpi_dbg_level. Thus, in the normal * operational case, stores to the debug object are ignored but can be easily * enabled if necessary. * ******************************************************************************/ void acpi_ex_do_debug_object(union acpi_operand_object *source_desc, u32 level, u32 index) { u32 i; u32 timer; union acpi_operand_object *object_desc; u32 value; ACPI_FUNCTION_TRACE_PTR(ex_do_debug_object, source_desc); /* Output must be enabled via the debug_object global or the dbg_level */ if (!acpi_gbl_enable_aml_debug_object && !(acpi_dbg_level & ACPI_LV_DEBUG_OBJECT)) { return_VOID; } /* * We will emit the current timer value (in microseconds) with each * debug output. Only need the lower 26 bits. This allows for 67 * million microseconds or 67 seconds before rollover. */ timer = ((u32)acpi_os_get_timer() / 10); /* (100 nanoseconds to microseconds) */ timer &= 0x03FFFFFF; /* * Print line header as long as we are not in the middle of an * object display */ if (!((level > 0) && index == 0)) { acpi_os_printf("[ACPI Debug %.8u] %*s", timer, level, " "); } /* Display the index for package output only */ if (index > 0) { acpi_os_printf("(%.2u) ", index - 1); } if (!source_desc) { acpi_os_printf("[Null Object]\n"); return_VOID; } if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_OPERAND) { acpi_os_printf("%s ", acpi_ut_get_object_type_name(source_desc)); if (!acpi_ut_valid_internal_object(source_desc)) { acpi_os_printf("%p, Invalid Internal Object!\n", source_desc); return_VOID; } } else if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_NAMED) { acpi_os_printf("%s: %p\n", acpi_ut_get_type_name(((struct acpi_namespace_node *) source_desc)->type), source_desc); return_VOID; } else { return_VOID; } /* source_desc is of type ACPI_DESC_TYPE_OPERAND */ switch (source_desc->common.type) { case ACPI_TYPE_INTEGER: /* Output correct integer width */ if (acpi_gbl_integer_byte_width == 4) { acpi_os_printf("0x%8.8X\n", (u32)source_desc->integer.value); } else { acpi_os_printf("0x%8.8X%8.8X\n", ACPI_FORMAT_UINT64(source_desc->integer. value)); } break; case ACPI_TYPE_BUFFER: acpi_os_printf("[0x%.2X]\n", (u32)source_desc->buffer.length); acpi_ut_dump_buffer(source_desc->buffer.pointer, (source_desc->buffer.length < 256) ? source_desc->buffer.length : 256, DB_BYTE_DISPLAY, 0); break; case ACPI_TYPE_STRING: acpi_os_printf("[0x%.2X] \"%s\"\n", source_desc->string.length, source_desc->string.pointer); break; case ACPI_TYPE_PACKAGE: acpi_os_printf("[Contains 0x%.2X Elements]\n", source_desc->package.count); /* Output the entire contents of the package */ for (i = 0; i < source_desc->package.count; i++) { acpi_ex_do_debug_object(source_desc->package. elements[i], level + 4, i + 1); } break; case ACPI_TYPE_LOCAL_REFERENCE: acpi_os_printf("[%s] ", acpi_ut_get_reference_name(source_desc)); /* Decode the reference */ switch (source_desc->reference.class) { case ACPI_REFCLASS_INDEX: acpi_os_printf("0x%X\n", source_desc->reference.value); break; case ACPI_REFCLASS_TABLE: /* Case for ddb_handle */ acpi_os_printf("Table Index 0x%X\n", source_desc->reference.value); return_VOID; default: break; } acpi_os_printf(" "); /* Check for valid node first, then valid object */ if (source_desc->reference.node) { if (ACPI_GET_DESCRIPTOR_TYPE (source_desc->reference.node) != ACPI_DESC_TYPE_NAMED) { acpi_os_printf (" %p - Not a valid namespace node\n", source_desc->reference.node); } else { acpi_os_printf("Node %p [%4.4s] ", source_desc->reference.node, (source_desc->reference.node)-> name.ascii); switch ((source_desc->reference.node)->type) { /* These types have no attached object */ case ACPI_TYPE_DEVICE: acpi_os_printf("Device\n"); break; case ACPI_TYPE_THERMAL: acpi_os_printf("Thermal Zone\n"); break; default: acpi_ex_do_debug_object((source_desc-> reference. node)->object, level + 4, 0); break; } } } else if (source_desc->reference.object) { if (ACPI_GET_DESCRIPTOR_TYPE (source_desc->reference.object) == ACPI_DESC_TYPE_NAMED) { acpi_ex_do_debug_object(((struct acpi_namespace_node *) source_desc->reference. object)->object, level + 4, 0); } else { object_desc = source_desc->reference.object; value = source_desc->reference.value; switch (object_desc->common.type) { case ACPI_TYPE_BUFFER: acpi_os_printf("Buffer[%u] = 0x%2.2X\n", value, *source_desc->reference. index_pointer); break; case ACPI_TYPE_STRING: acpi_os_printf ("String[%u] = \"%c\" (0x%2.2X)\n", value, *source_desc->reference. index_pointer, *source_desc->reference. index_pointer); break; case ACPI_TYPE_PACKAGE: acpi_os_printf("Package[%u] = ", value); acpi_ex_do_debug_object(*source_desc-> reference.where, level + 4, 0); break; default: acpi_os_printf ("Unknown Reference object type %X\n", object_desc->common.type); break; } } } break; default: acpi_os_printf("%p\n", source_desc); break; } ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC, "\n")); return_VOID; }