acpi_status acpi_ns_initialize_devices(void) { acpi_status status; struct acpi_device_walk_info info; ACPI_FUNCTION_TRACE(ns_initialize_devices); /* Init counters */ info.device_count = 0; info.num_STA = 0; info.num_INI = 0; ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "Initializing Device/Processor/Thermal objects by executing _INI methods:")); /* Tree analysis: find all subtrees that contain _INI methods */ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, FALSE, acpi_ns_find_ini_methods, &info, NULL); if (ACPI_FAILURE(status)) { goto error_exit; } /* Allocate the evaluation information block */ info.evaluate_info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); if (!info.evaluate_info) { status = AE_NO_MEMORY; goto error_exit; } /* Walk namespace to execute all _INIs on present devices */ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, FALSE, acpi_ns_init_one_device, &info, NULL); ACPI_FREE(info.evaluate_info); if (ACPI_FAILURE(status)) { goto error_exit; } ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "\nExecuted %hd _INI methods requiring %hd _STA executions (examined %hd objects)\n", info.num_INI, info.num_STA, info.device_count)); return_ACPI_STATUS(status); error_exit: ACPI_EXCEPTION((AE_INFO, status, "During device initialization")); return_ACPI_STATUS(status); }
void acpi_ns_dump_objects(acpi_object_type type, u8 display_type, u32 max_depth, acpi_owner_id owner_id, acpi_handle start_handle) { struct acpi_walk_info info; acpi_status status; ACPI_FUNCTION_ENTRY(); /* * Just lock the entire namespace for the duration of the dump. * We don't want any changes to the namespace during this time, * especially the temporary nodes since we are going to display * them also. */ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { acpi_os_printf("Could not acquire namespace mutex\n"); return; } info.debug_level = ACPI_LV_TABLES; info.owner_id = owner_id; info.display_type = display_type; (void)acpi_ns_walk_namespace(type, start_handle, max_depth, ACPI_NS_WALK_NO_UNLOCK | ACPI_NS_WALK_TEMP_NODES, acpi_ns_dump_one_object, NULL, (void *)&info, NULL); (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); }
void acpi_ns_dump_root_devices(void) { acpi_handle sys_bus_handle; acpi_status status; ACPI_FUNCTION_NAME(ns_dump_root_devices); /* Only dump the table if tracing is enabled */ if (!(ACPI_LV_TABLES & acpi_dbg_level)) { return; } status = acpi_get_handle(NULL, ACPI_NS_SYSTEM_BUS, &sys_bus_handle); if (ACPI_FAILURE(status)) { return; } ACPI_DEBUG_PRINT((ACPI_DB_TABLES, "Display of all devices in the namespace:\n")); status = acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, sys_bus_handle, ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK, acpi_ns_dump_one_device, NULL, NULL, NULL); }
acpi_status acpi_ns_initialize_devices ( void) { acpi_status status; acpi_device_walk_info info; FUNCTION_TRACE ("Ns_initialize_devices"); info.device_count = 0; info.num_STA = 0; info.num_INI = 0; ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OK, "Executing device _INI methods:")); status = acpi_ns_walk_namespace (ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, FALSE, acpi_ns_init_one_device, &info, NULL); if (ACPI_FAILURE (status)) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Walk_namespace failed! %x\n", status)); } ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OK, "\n%d Devices found: %d _STA, %d _INI\n", info.device_count, info.num_STA, info.num_INI)); return_ACPI_STATUS (status); }
acpi_status acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, acpi_adr_space_type space_id) { acpi_status status; ACPI_FUNCTION_TRACE(ev_execute_reg_methods); /* * Run all _REG methods for all Operation Regions for this space ID. This * is a separate walk in order to handle any interdependencies between * regions and _REG methods. (i.e. handlers must be installed for all * regions of this Space ID before we can run any _REG methods) */ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run, NULL, &space_id, NULL); /* Special case for EC: handle "orphan" _REG methods with no region */ if (space_id == ACPI_ADR_SPACE_EC) { acpi_ev_orphan_ec_reg_method(node); } return_ACPI_STATUS(status); }
void acpi_ns_dump_objects(acpi_object_type type, u8 display_type, u32 max_depth, acpi_owner_id owner_id, acpi_handle start_handle) { struct acpi_walk_info info; acpi_status status; ACPI_FUNCTION_ENTRY(); /* */ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { acpi_os_printf("Could not acquire namespace mutex\n"); return; } info.debug_level = ACPI_LV_TABLES; info.owner_id = owner_id; info.display_type = display_type; (void)acpi_ns_walk_namespace(type, start_handle, max_depth, ACPI_NS_WALK_NO_UNLOCK | ACPI_NS_WALK_TEMP_NODES, acpi_ns_dump_one_object, NULL, (void *)&info, NULL); (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); }
/******************************************************************************* * * FUNCTION: acpi_walk_namespace * * PARAMETERS: Type - acpi_object_type to search for * start_object - Handle in namespace where search begins * max_depth - Depth to which search is to reach * user_function - Called when an object of "Type" is found * Context - Passed to user function * return_value - Location where return value of * user_function is put if terminated early * * RETURNS Return value from the user_function if terminated early. * Otherwise, returns NULL. * * DESCRIPTION: Performs a modified depth-first walk of the namespace tree, * starting (and ending) at the object specified by start_handle. * The user_function is called whenever an object that matches * the type parameter is found. If the user function returns * a non-zero value, the search is terminated immediately and this * value is returned to the caller. * * The point of this procedure is to provide a generic namespace * walk routine that can be called from multiple places to * provide multiple services; the User Function can be tailored * to each task, whether it is a print function, a compare * function, etc. * ******************************************************************************/ acpi_status acpi_walk_namespace(acpi_object_type type, acpi_handle start_object, u32 max_depth, acpi_walk_callback user_function, void *context, void **return_value) { acpi_status status; ACPI_FUNCTION_TRACE(acpi_walk_namespace); /* Parameter validation */ if ((type > ACPI_TYPE_LOCAL_MAX) || (!max_depth) || (!user_function)) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* * Lock the namespace around the walk. * The namespace will be unlocked/locked around each call * to the user function - since this function * must be allowed to make Acpi calls itself. */ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } status = acpi_ns_walk_namespace(type, start_object, max_depth, ACPI_NS_WALK_UNLOCK, user_function, context, return_value); (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); return_ACPI_STATUS(status); }
acpi_status acpi_walk_namespace(acpi_object_type type, acpi_handle start_object, u32 max_depth, acpi_walk_callback pre_order_visit, acpi_walk_callback post_order_visit, void *context, void **return_value) { acpi_status status; ACPI_FUNCTION_TRACE(acpi_walk_namespace); /* Parameter validation */ if ((type > ACPI_TYPE_LOCAL_MAX) || (!max_depth) || (!pre_order_visit && !post_order_visit)) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* * Need to acquire the namespace reader lock to prevent interference * with any concurrent table unloads (which causes the deletion of * namespace objects). We cannot allow the deletion of a namespace node * while the user function is using it. The exception to this are the * nodes created and deleted during control method execution -- these * nodes are marked as temporary nodes and are ignored by the namespace * walk. Thus, control methods can be executed while holding the * namespace deletion lock (and the user function can execute control * methods.) */ status = acpi_ut_acquire_read_lock(&acpi_gbl_namespace_rw_lock); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* * Lock the namespace around the walk. The namespace will be * unlocked/locked around each call to the user function - since the user * function must be allowed to make ACPICA calls itself (for example, it * will typically execute control methods during device enumeration.) */ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { goto unlock_and_exit; } status = acpi_ns_walk_namespace(type, start_object, max_depth, ACPI_NS_WALK_UNLOCK, pre_order_visit, post_order_visit, context, return_value); (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); unlock_and_exit: (void)acpi_ut_release_read_lock(&acpi_gbl_namespace_rw_lock); return_ACPI_STATUS(status); }
void acpi_ns_dump_object_paths(acpi_object_type type, u8 display_type, u32 max_depth, acpi_owner_id owner_id, acpi_handle start_handle) { acpi_status status; u32 max_level = 0; ACPI_FUNCTION_ENTRY(); /* * Just lock the entire namespace for the duration of the dump. * We don't want any changes to the namespace during this time, * especially the temporary nodes since we are going to display * them also. */ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { acpi_os_printf("Could not acquire namespace mutex\n"); return; } /* Get the max depth of the namespace tree, for formatting later */ (void)acpi_ns_walk_namespace(type, start_handle, max_depth, ACPI_NS_WALK_NO_UNLOCK | ACPI_NS_WALK_TEMP_NODES, acpi_ns_get_max_depth, NULL, (void *)&max_level, NULL); /* Now dump the entire namespace */ (void)acpi_ns_walk_namespace(type, start_handle, max_depth, ACPI_NS_WALK_NO_UNLOCK | ACPI_NS_WALK_TEMP_NODES, acpi_ns_dump_one_object_path, NULL, (void *)&max_level, NULL); (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); }
void acpi_ev_update_gpes(acpi_owner_id table_owner_id) { struct acpi_gpe_xrupt_info *gpe_xrupt_info; struct acpi_gpe_block_info *gpe_block; struct acpi_gpe_walk_info walk_info; acpi_status status = AE_OK; status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { return; } walk_info.count = 0; walk_info.owner_id = table_owner_id; walk_info.execute_by_owner_id = TRUE; gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head; while (gpe_xrupt_info) { gpe_block = gpe_xrupt_info->gpe_block_list_head; while (gpe_block) { walk_info.gpe_block = gpe_block; walk_info.gpe_device = gpe_block->node; status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, walk_info.gpe_device, ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK, acpi_ev_match_gpe_method, NULL, &walk_info, NULL); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "While decoding _Lxx/_Exx methods")); } gpe_block = gpe_block->next; } gpe_xrupt_info = gpe_xrupt_info->next; } if (walk_info.count) { ACPI_INFO((AE_INFO, "Enabled %u new GPEs", walk_info.count)); } (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); return; }
acpi_status acpi_get_devices ( char *HID, acpi_walk_callback user_function, void *context, void **return_value) { acpi_status status; struct acpi_get_devices_info info; ACPI_FUNCTION_TRACE ("acpi_get_devices"); /* Parameter validation */ if (!user_function) { return_ACPI_STATUS (AE_BAD_PARAMETER); } /* * We're going to call their callback from OUR callback, so we need * to know what it is, and their context parameter. */ info.context = context; info.user_function = user_function; info.hid = HID; /* * Lock the namespace around the walk. * The namespace will be unlocked/locked around each call * to the user function - since this function * must be allowed to make Acpi calls itself. */ status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } status = acpi_ns_walk_namespace (ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK, acpi_ns_get_device_callback, &info, return_value); (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); return_ACPI_STATUS (status); }
static void acpi_db_count_namespace_objects(void) { u32 i; acpi_gbl_num_nodes = 0; acpi_gbl_num_objects = 0; acpi_gbl_obj_type_count_misc = 0; for (i = 0; i < (ACPI_TYPE_NS_NODE_MAX - 1); i++) { acpi_gbl_obj_type_count[i] = 0; acpi_gbl_node_type_count[i] = 0; } (void)acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, FALSE, acpi_db_classify_one_object, NULL, NULL, NULL); }
void acpi_ns_dump_objects(acpi_object_type type, u8 display_type, u32 max_depth, acpi_owner_id owner_id, acpi_handle start_handle) { struct acpi_walk_info info; ACPI_FUNCTION_ENTRY(); info.debug_level = ACPI_LV_TABLES; info.owner_id = owner_id; info.display_type = display_type; (void)acpi_ns_walk_namespace(type, start_handle, max_depth, ACPI_NS_WALK_NO_UNLOCK, acpi_ns_dump_one_object, (void *)&info, NULL); }
ACPI_STATUS acpi_get_devices ( NATIVE_CHAR *HID, WALK_CALLBACK user_function, void *context, void **return_value) { ACPI_STATUS status; ACPI_GET_DEVICES_INFO info; /* Parameter validation */ if (!user_function) { return (AE_BAD_PARAMETER); } /* * We're going to call their callback from OUR callback, so we need * to know what it is, and their context parameter. */ info.context = context; info.user_function = user_function; info.hid = HID; /* * Lock the namespace around the walk. * The namespace will be unlocked/locked around each call * to the user function - since this function * must be allowed to make Acpi calls itself. */ acpi_cm_acquire_mutex (ACPI_MTX_NAMESPACE); status = acpi_ns_walk_namespace (ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, NS_WALK_UNLOCK, acpi_ns_get_device_callback, &info, return_value); acpi_cm_release_mutex (ACPI_MTX_NAMESPACE); return (status); }
ACPI_STATUS acpi_ns_initialize_devices ( u32 flags) { ACPI_STATUS status; ACPI_DEVICE_WALK_INFO info; info.flags = flags; info.device_count = 0; info.num_STA = 0; info.num_INI = 0; status = acpi_ns_walk_namespace (ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, FALSE, acpi_ns_init_one_device, &info, NULL); return (status); }
acpi_status acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, acpi_adr_space_type space_id) { acpi_status status; ACPI_FUNCTION_TRACE(ev_execute_reg_methods); /* * Run all _REG methods for all Operation Regions for this * space ID. This is a separate walk in order to handle any * interdependencies between regions and _REG methods. (i.e. handlers * must be installed for all regions of this Space ID before we * can run any _REG methods) */ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run, &space_id, NULL); return_ACPI_STATUS(status); }
acpi_status acpi_ns_initialize_devices(void) { acpi_status status; struct acpi_device_walk_info info; ACPI_FUNCTION_TRACE("ns_initialize_devices"); /* Init counters */ info.device_count = 0; info.num_STA = 0; info.num_INI = 0; ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "Executing all Device _STA and_INI methods:")); status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Walk namespace for all objects */ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, TRUE, acpi_ns_init_one_device, &info, NULL); (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "walk_namespace failed! %s\n", acpi_format_exception(status))); } ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "\n%hd Devices found containing: %hd _STA, %hd _INI methods\n", info.device_count, info.num_STA, info.num_INI)); return_ACPI_STATUS(status); }
acpi_status acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, acpi_adr_space_type space_id) { acpi_status status; struct acpi_reg_walk_info info; ACPI_FUNCTION_TRACE(ev_execute_reg_methods); info.space_id = space_id; info.reg_run_count = 0; ACPI_DEBUG_PRINT_RAW((ACPI_DB_NAMES, " Running _REG methods for SpaceId %s\n", acpi_ut_get_region_name(info.space_id))); /* * Run all _REG methods for all Operation Regions for this space ID. This * is a separate walk in order to handle any interdependencies between * regions and _REG methods. (i.e. handlers must be installed for all * regions of this Space ID before we can run any _REG methods) */ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run, NULL, &info, NULL); /* Special case for EC: handle "orphan" _REG methods with no region */ if (space_id == ACPI_ADR_SPACE_EC) { acpi_ev_orphan_ec_reg_method(node); } ACPI_DEBUG_PRINT_RAW((ACPI_DB_NAMES, " Executed %u _REG methods for SpaceId %s\n", info.reg_run_count, acpi_ut_get_region_name(info.space_id))); return_ACPI_STATUS(status); }
ACPI_STATUS acpi_walk_namespace ( ACPI_OBJECT_TYPE type, ACPI_HANDLE start_object, u32 max_depth, WALK_CALLBACK user_function, void *context, void **return_value) { ACPI_STATUS status; /* Parameter validation */ if ((type > ACPI_TYPE_MAX) || (!max_depth) || (!user_function)) { return (AE_BAD_PARAMETER); } /* * Lock the namespace around the walk. * The namespace will be unlocked/locked around each call * to the user function - since this function * must be allowed to make Acpi calls itself. */ acpi_cm_acquire_mutex (ACPI_MTX_NAMESPACE); status = acpi_ns_walk_namespace ((OBJECT_TYPE_INTERNAL) type, start_object, max_depth, NS_WALK_UNLOCK, user_function, context, return_value); acpi_cm_release_mutex (ACPI_MTX_NAMESPACE); return (status); }
void acpi_ev_update_gpes(acpi_owner_id table_owner_id) { struct acpi_gpe_xrupt_info *gpe_xrupt_info; struct acpi_gpe_block_info *gpe_block; struct acpi_gpe_walk_info walk_info; acpi_status status = AE_OK; /* * Find any _Lxx/_Exx GPE methods that have just been loaded. * * Any GPEs that correspond to new _Lxx/_Exx methods are immediately * enabled. * * Examine the namespace underneath each gpe_device within the * gpe_block lists. */ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { return; } walk_info.count = 0; walk_info.owner_id = table_owner_id; walk_info.execute_by_owner_id = TRUE; /* Walk the interrupt level descriptor list */ gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head; while (gpe_xrupt_info) { /* Walk all Gpe Blocks attached to this interrupt level */ gpe_block = gpe_xrupt_info->gpe_block_list_head; while (gpe_block) { walk_info.gpe_block = gpe_block; walk_info.gpe_device = gpe_block->node; status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, walk_info.gpe_device, ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK, acpi_ev_match_gpe_method, NULL, &walk_info, NULL); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "While decoding _Lxx/_Exx methods")); } gpe_block = gpe_block->next; } gpe_xrupt_info = gpe_xrupt_info->next; } if (walk_info.count) { ACPI_INFO((AE_INFO, "Enabled %u new GPEs", walk_info.count)); } (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); return; }
acpi_status acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device, struct acpi_gpe_block_info *gpe_block) { struct acpi_gpe_event_info *gpe_event_info; struct acpi_gpe_walk_info gpe_info; u32 wake_gpe_count; u32 gpe_enabled_count; u32 i; u32 j; ACPI_FUNCTION_TRACE(ev_initialize_gpe_block); /* Ignore a null GPE block (e.g., if no GPE block 1 exists) */ if (!gpe_block) { return_ACPI_STATUS(AE_OK); } /* * Runtime option: Should wake GPEs be enabled at runtime? The default * is no, they should only be enabled just as the machine goes to sleep. */ if (acpi_gbl_leave_wake_gpes_disabled) { /* * Differentiate runtime vs wake GPEs, via the _PRW control methods. * Each GPE that has one or more _PRWs that reference it is by * definition a wake GPE and will not be enabled while the machine * is running. */ gpe_info.gpe_block = gpe_block; gpe_info.gpe_device = gpe_device; acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK, acpi_ev_match_prw_and_gpe, NULL, &gpe_info, NULL); } /* * Enable all GPEs that have a corresponding method and aren't * capable of generating wakeups. Any other GPEs within this block * must be enabled via the acpi_enable_gpe() interface. */ wake_gpe_count = 0; gpe_enabled_count = 0; if (gpe_device == acpi_gbl_fadt_gpe_device) gpe_device = NULL; for (i = 0; i < gpe_block->register_count; i++) { for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { acpi_status status; acpi_size gpe_index; int gpe_number; /* Get the info block for this particular GPE */ gpe_index = (acpi_size)i * ACPI_GPE_REGISTER_WIDTH + j; gpe_event_info = &gpe_block->event_info[gpe_index]; if (gpe_event_info->flags & ACPI_GPE_CAN_WAKE) { wake_gpe_count++; if (acpi_gbl_leave_wake_gpes_disabled) continue; } if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD)) continue; gpe_number = gpe_index + gpe_block->block_base_number; status = acpi_enable_gpe(gpe_device, gpe_number, ACPI_GPE_TYPE_RUNTIME); if (ACPI_FAILURE(status)) ACPI_ERROR((AE_INFO, "Failed to enable GPE %02X\n", gpe_number)); else gpe_enabled_count++; } } ACPI_DEBUG_PRINT((ACPI_DB_INIT, "Found %u Wake, Enabled %u Runtime GPEs in this block\n", wake_gpe_count, gpe_enabled_count)); return_ACPI_STATUS(AE_OK); }
acpi_status acpi_ns_initialize_devices(void) { acpi_status status; struct acpi_device_walk_info info; ACPI_FUNCTION_TRACE(ns_initialize_devices); /* Init counters */ info.device_count = 0; info.num_STA = 0; info.num_INI = 0; ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "Initializing Device/Processor/Thermal objects " "and executing _INI/_STA methods:\n")); /* Tree analysis: find all subtrees that contain _INI methods */ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, FALSE, acpi_ns_find_ini_methods, NULL, &info, NULL); if (ACPI_FAILURE(status)) { goto error_exit; } /* Allocate the evaluation information block */ info.evaluate_info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); if (!info.evaluate_info) { status = AE_NO_MEMORY; goto error_exit; } /* * Execute the "global" _INI method that may appear at the root. This * support is provided for Windows compatibility (Vista+) and is not * part of the ACPI specification. */ info.evaluate_info->prefix_node = acpi_gbl_root_node; info.evaluate_info->relative_pathname = METHOD_NAME__INI; info.evaluate_info->parameters = NULL; info.evaluate_info->flags = ACPI_IGNORE_RETURN_VALUE; status = acpi_ns_evaluate(info.evaluate_info); if (ACPI_SUCCESS(status)) { info.num_INI++; } /* Walk namespace to execute all _INIs on present devices */ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, FALSE, acpi_ns_init_one_device, NULL, &info, NULL); /* * Any _OSI requests should be completed by now. If the BIOS has * requested any Windows OSI strings, we will always truncate * I/O addresses to 16 bits -- for Windows compatibility. */ if (acpi_gbl_osi_data >= ACPI_OSI_WIN_2000) { acpi_gbl_truncate_io_addresses = TRUE; } ACPI_FREE(info.evaluate_info); if (ACPI_FAILURE(status)) { goto error_exit; } ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, " Executed %u _INI methods requiring %u _STA executions " "(examined %u objects)\n", info.num_INI, info.num_STA, info.device_count)); return_ACPI_STATUS(status); error_exit: ACPI_EXCEPTION((AE_INFO, status, "During device initialization")); return_ACPI_STATUS(status); }
acpi_status acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, struct acpi_generic_address *gpe_block_address, u32 register_count, u8 gpe_block_base_number, u32 interrupt_number, struct acpi_gpe_block_info **return_gpe_block) { acpi_status status; struct acpi_gpe_block_info *gpe_block; struct acpi_gpe_walk_info walk_info; ACPI_FUNCTION_TRACE(ev_create_gpe_block); if (!register_count) { return_ACPI_STATUS(AE_OK); } /* Allocate a new GPE block */ gpe_block = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_block_info)); if (!gpe_block) { return_ACPI_STATUS(AE_NO_MEMORY); } /* Initialize the new GPE block */ gpe_block->node = gpe_device; gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH); gpe_block->initialized = FALSE; gpe_block->register_count = register_count; gpe_block->block_base_number = gpe_block_base_number; ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address, sizeof(struct acpi_generic_address)); /* * Create the register_info and event_info sub-structures * Note: disables and clears all GPEs in the block */ status = acpi_ev_create_gpe_info_blocks(gpe_block); if (ACPI_FAILURE(status)) { ACPI_FREE(gpe_block); return_ACPI_STATUS(status); } /* Install the new block in the global lists */ status = acpi_ev_install_gpe_block(gpe_block, interrupt_number); if (ACPI_FAILURE(status)) { ACPI_FREE(gpe_block); return_ACPI_STATUS(status); } acpi_gbl_all_gpes_initialized = FALSE; /* Find all GPE methods (_Lxx or_Exx) for this block */ walk_info.gpe_block = gpe_block; walk_info.gpe_device = gpe_device; walk_info.execute_by_owner_id = FALSE; status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device, ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK, acpi_ev_match_gpe_method, NULL, &walk_info, NULL); /* Return the new block */ if (return_gpe_block) { (*return_gpe_block) = gpe_block; } ACPI_DEBUG_PRINT((ACPI_DB_INIT, "GPE %02X to %02X [%4.4s] %u regs on int 0x%X\n", (u32) gpe_block->block_base_number, (u32) (gpe_block->block_base_number + (gpe_block->gpe_count - 1)), gpe_device->name.ascii, gpe_block->register_count, interrupt_number)); /* Update global count of currently available GPEs */ acpi_current_gpe_count += gpe_block->gpe_count; return_ACPI_STATUS(AE_OK); }
acpi_status acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device, struct acpi_gpe_block_info *gpe_block) { acpi_status status; struct acpi_gpe_event_info *gpe_event_info; struct acpi_gpe_walk_info gpe_info; u32 wake_gpe_count; u32 gpe_enabled_count; acpi_native_uint i; acpi_native_uint j; ACPI_FUNCTION_TRACE(ev_initialize_gpe_block); /* Ignore a null GPE block (e.g., if no GPE block 1 exists) */ if (!gpe_block) { return_ACPI_STATUS(AE_OK); } /* * Runtime option: Should wake GPEs be enabled at runtime? The default * is no, they should only be enabled just as the machine goes to sleep. */ if (acpi_gbl_leave_wake_gpes_disabled) { /* * Differentiate runtime vs wake GPEs, via the _PRW control methods. * Each GPE that has one or more _PRWs that reference it is by * definition a wake GPE and will not be enabled while the machine * is running. */ gpe_info.gpe_block = gpe_block; gpe_info.gpe_device = gpe_device; status = acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK, acpi_ev_match_prw_and_gpe, &gpe_info, NULL); } /* * Enable all GPEs in this block that have these attributes: * 1) are "runtime" or "run/wake" GPEs, and * 2) have a corresponding _Lxx or _Exx method * * Any other GPEs within this block must be enabled via the acpi_enable_gpe() * external interface. */ wake_gpe_count = 0; gpe_enabled_count = 0; for (i = 0; i < gpe_block->register_count; i++) { for (j = 0; j < 8; j++) { /* Get the info block for this particular GPE */ gpe_event_info = &gpe_block-> event_info[(i * ACPI_GPE_REGISTER_WIDTH) + j]; if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == ACPI_GPE_DISPATCH_METHOD) && (gpe_event_info->flags & ACPI_GPE_TYPE_RUNTIME)) { gpe_enabled_count++; } if (gpe_event_info->flags & ACPI_GPE_TYPE_WAKE) { wake_gpe_count++; } } } ACPI_DEBUG_PRINT((ACPI_DB_INIT, "Found %u Wake, Enabled %u Runtime GPEs in this block\n", wake_gpe_count, gpe_enabled_count)); /* Enable all valid runtime GPEs found above */ status = acpi_hw_enable_runtime_gpe_block(NULL, gpe_block); if (ACPI_FAILURE(status)) { ACPI_ERROR((AE_INFO, "Could not enable GPEs in GpeBlock %p", gpe_block)); } return_ACPI_STATUS(status); }
acpi_status acpi_ev_create_gpe_block ( struct acpi_namespace_node *gpe_device, struct acpi_generic_address *gpe_block_address, u32 register_count, u8 gpe_block_base_number, u32 interrupt_level, struct acpi_gpe_block_info **return_gpe_block) { struct acpi_gpe_block_info *gpe_block; acpi_status status; ACPI_FUNCTION_TRACE ("ev_create_gpe_block"); if (!register_count) { return_ACPI_STATUS (AE_OK); } /* Allocate a new GPE block */ gpe_block = ACPI_MEM_CALLOCATE (sizeof (struct acpi_gpe_block_info)); if (!gpe_block) { return_ACPI_STATUS (AE_NO_MEMORY); } /* Initialize the new GPE block */ gpe_block->register_count = register_count; gpe_block->block_base_number = gpe_block_base_number; ACPI_MEMCPY (&gpe_block->block_address, gpe_block_address, sizeof (struct acpi_generic_address)); /* Create the register_info and event_info sub-structures */ status = acpi_ev_create_gpe_info_blocks (gpe_block); if (ACPI_FAILURE (status)) { ACPI_MEM_FREE (gpe_block); return_ACPI_STATUS (status); } /* Install the new block in the global list(s) */ status = acpi_ev_install_gpe_block (gpe_block, interrupt_level); if (ACPI_FAILURE (status)) { ACPI_MEM_FREE (gpe_block); return_ACPI_STATUS (status); } /* Dump info about this GPE block */ ACPI_DEBUG_PRINT ((ACPI_DB_INIT, "GPE %02d to %02d [%4.4s] %d regs at %8.8X%8.8X on int %d\n", gpe_block->block_base_number, (u32) (gpe_block->block_base_number + ((gpe_block->register_count * ACPI_GPE_REGISTER_WIDTH) -1)), gpe_device->name.ascii, gpe_block->register_count, ACPI_HIDWORD (gpe_block->block_address.address), ACPI_LODWORD (gpe_block->block_address.address), interrupt_level)); /* Find all GPE methods (_Lxx, _Exx) for this block */ status = acpi_ns_walk_namespace (ACPI_TYPE_METHOD, gpe_device, ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK, acpi_ev_save_method_info, gpe_block, NULL); /* Return the new block */ if (return_gpe_block) { (*return_gpe_block) = gpe_block; } return_ACPI_STATUS (AE_OK); }
acpi_status acpi_ev_install_space_handler(struct acpi_namespace_node * node, acpi_adr_space_type space_id, acpi_adr_space_handler handler, acpi_adr_space_setup setup, void *context) { union acpi_operand_object *obj_desc; union acpi_operand_object *handler_obj; acpi_status status; acpi_object_type type; u8 flags = 0; ACPI_FUNCTION_TRACE(ev_install_space_handler); /* * This registration is valid for only the types below * and the root. This is where the default handlers * get placed. */ if ((node->type != ACPI_TYPE_DEVICE) && (node->type != ACPI_TYPE_PROCESSOR) && (node->type != ACPI_TYPE_THERMAL) && (node != acpi_gbl_root_node)) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } if (handler == ACPI_DEFAULT_HANDLER) { flags = ACPI_ADDR_HANDLER_DEFAULT_INSTALLED; switch (space_id) { case ACPI_ADR_SPACE_SYSTEM_MEMORY: handler = acpi_ex_system_memory_space_handler; setup = acpi_ev_system_memory_region_setup; break; case ACPI_ADR_SPACE_SYSTEM_IO: handler = acpi_ex_system_io_space_handler; setup = acpi_ev_io_space_region_setup; break; case ACPI_ADR_SPACE_PCI_CONFIG: handler = acpi_ex_pci_config_space_handler; setup = acpi_ev_pci_config_region_setup; break; case ACPI_ADR_SPACE_CMOS: handler = acpi_ex_cmos_space_handler; setup = acpi_ev_cmos_region_setup; break; case ACPI_ADR_SPACE_PCI_BAR_TARGET: handler = acpi_ex_pci_bar_space_handler; setup = acpi_ev_pci_bar_region_setup; break; case ACPI_ADR_SPACE_DATA_TABLE: handler = acpi_ex_data_table_space_handler; setup = NULL; break; default: status = AE_BAD_PARAMETER; goto unlock_and_exit; } } /* If the caller hasn't specified a setup routine, use the default */ if (!setup) { setup = acpi_ev_default_region_setup; } /* Check for an existing internal object */ obj_desc = acpi_ns_get_attached_object(node); if (obj_desc) { /* * The attached device object already exists. * Make sure the handler is not already installed. */ handler_obj = obj_desc->device.handler; /* Walk the handler list for this device */ while (handler_obj) { /* Same space_id indicates a handler already installed */ if (handler_obj->address_space.space_id == space_id) { if (handler_obj->address_space.handler == handler) { /* * It is (relatively) OK to attempt to install the SAME * handler twice. This can easily happen * with PCI_Config space. */ status = AE_SAME_HANDLER; goto unlock_and_exit; } else { /* A handler is already installed */ status = AE_ALREADY_EXISTS; } goto unlock_and_exit; } /* Walk the linked list of handlers */ handler_obj = handler_obj->address_space.next; } } else { ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, "Creating object on Device %p while installing handler\n", node)); /* obj_desc does not exist, create one */ if (node->type == ACPI_TYPE_ANY) { type = ACPI_TYPE_DEVICE; } else { type = node->type; } obj_desc = acpi_ut_create_internal_object(type); if (!obj_desc) { status = AE_NO_MEMORY; goto unlock_and_exit; } /* Init new descriptor */ obj_desc->common.type = (u8) type; /* Attach the new object to the Node */ status = acpi_ns_attach_object(node, obj_desc, type); /* Remove local reference to the object */ acpi_ut_remove_reference(obj_desc); if (ACPI_FAILURE(status)) { goto unlock_and_exit; } } ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, "Installing address handler for region %s(%X) on Device %4.4s %p(%p)\n", acpi_ut_get_region_name(space_id), space_id, acpi_ut_get_node_name(node), node, obj_desc)); /* * Install the handler * * At this point there is no existing handler. * Just allocate the object for the handler and link it * into the list. */ handler_obj = acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_ADDRESS_HANDLER); if (!handler_obj) { status = AE_NO_MEMORY; goto unlock_and_exit; } /* Init handler obj */ handler_obj->address_space.space_id = (u8) space_id; handler_obj->address_space.handler_flags = flags; handler_obj->address_space.region_list = NULL; handler_obj->address_space.node = node; handler_obj->address_space.handler = handler; handler_obj->address_space.context = context; handler_obj->address_space.setup = setup; /* Install at head of Device.address_space list */ handler_obj->address_space.next = obj_desc->device.handler; /* * The Device object is the first reference on the handler_obj. * Each region that uses the handler adds a reference. */ obj_desc->device.handler = handler_obj; /* * Walk the namespace finding all of the regions this * handler will manage. * * Start at the device and search the branch toward * the leaf nodes until either the leaf is encountered or * a device is detected that has an address handler of the * same type. * * In either case, back up and search down the remainder * of the branch */ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK, acpi_ev_install_handler, handler_obj, NULL); unlock_and_exit: return_ACPI_STATUS(status); }
acpi_status acpi_ns_initialize_devices(u32 flags) { acpi_status status = AE_OK; struct acpi_device_walk_info info; acpi_handle handle; ACPI_FUNCTION_TRACE(ns_initialize_devices); if (!(flags & ACPI_NO_DEVICE_INIT)) { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[Init] Initializing ACPI Devices\n")); /* Init counters */ info.device_count = 0; info.num_STA = 0; info.num_INI = 0; ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "Initializing Device/Processor/Thermal objects " "and executing _INI/_STA methods:\n")); /* Tree analysis: find all subtrees that contain _INI methods */ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, FALSE, acpi_ns_find_ini_methods, NULL, &info, NULL); if (ACPI_FAILURE(status)) { goto error_exit; } /* Allocate the evaluation information block */ info.evaluate_info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); if (!info.evaluate_info) { status = AE_NO_MEMORY; goto error_exit; } /* * Execute the "global" _INI method that may appear at the root. * This support is provided for Windows compatibility (Vista+) and * is not part of the ACPI specification. */ info.evaluate_info->prefix_node = acpi_gbl_root_node; info.evaluate_info->relative_pathname = METHOD_NAME__INI; info.evaluate_info->parameters = NULL; info.evaluate_info->flags = ACPI_IGNORE_RETURN_VALUE; status = acpi_ns_evaluate(info.evaluate_info); if (ACPI_SUCCESS(status)) { info.num_INI++; } /* * Execute \_SB._INI. * There appears to be a strict order requirement for \_SB._INI, * which should be evaluated before any _REG evaluations. */ status = acpi_get_handle(NULL, "\\_SB", &handle); if (ACPI_SUCCESS(status)) { memset(info.evaluate_info, 0, sizeof(struct acpi_evaluate_info)); info.evaluate_info->prefix_node = handle; info.evaluate_info->relative_pathname = METHOD_NAME__INI; info.evaluate_info->parameters = NULL; info.evaluate_info->flags = ACPI_IGNORE_RETURN_VALUE; status = acpi_ns_evaluate(info.evaluate_info); if (ACPI_SUCCESS(status)) { info.num_INI++; } } } /* * Run all _REG methods * * Note: Any objects accessed by the _REG methods will be automatically * initialized, even if they contain executable AML (see the call to * acpi_ns_initialize_objects below). * * Note: According to the ACPI specification, we actually needn't execute * _REG for system_memory/system_io operation regions, but for PCI_Config * operation regions, it is required to evaluate _REG for those on a PCI * root bus that doesn't contain _BBN object. So this code is kept here * in order not to break things. */ if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[Init] Executing _REG OpRegion methods\n")); status = acpi_ev_initialize_op_regions(); if (ACPI_FAILURE(status)) { goto error_exit; } } if (!(flags & ACPI_NO_DEVICE_INIT)) { /* Walk namespace to execute all _INIs on present devices */ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, FALSE, acpi_ns_init_one_device, NULL, &info, NULL); /* * Any _OSI requests should be completed by now. If the BIOS has * requested any Windows OSI strings, we will always truncate * I/O addresses to 16 bits -- for Windows compatibility. */ if (acpi_gbl_osi_data >= ACPI_OSI_WIN_2000) { acpi_gbl_truncate_io_addresses = TRUE; } ACPI_FREE(info.evaluate_info); if (ACPI_FAILURE(status)) { goto error_exit; } ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, " Executed %u _INI methods requiring %u _STA executions " "(examined %u objects)\n", info.num_INI, info.num_STA, info.device_count)); } return_ACPI_STATUS(status); error_exit: ACPI_EXCEPTION((AE_INFO, status, "During device initialization")); return_ACPI_STATUS(status); }
acpi_status acpi_ns_initialize_devices(void) { acpi_status status; struct acpi_device_walk_info info; ACPI_FUNCTION_TRACE(ns_initialize_devices); info.device_count = 0; info.num_STA = 0; info.num_INI = 0; ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "Initializing Device/Processor/Thermal objects " "by executing _INI methods:")); status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, FALSE, acpi_ns_find_ini_methods, NULL, &info, NULL); if (ACPI_FAILURE(status)) { goto error_exit; } info.evaluate_info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); if (!info.evaluate_info) { status = AE_NO_MEMORY; goto error_exit; } info.evaluate_info->prefix_node = acpi_gbl_root_node; info.evaluate_info->pathname = METHOD_NAME__INI; info.evaluate_info->parameters = NULL; info.evaluate_info->flags = ACPI_IGNORE_RETURN_VALUE; status = acpi_ns_evaluate(info.evaluate_info); if (ACPI_SUCCESS(status)) { info.num_INI++; } status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, FALSE, acpi_ns_init_one_device, NULL, &info, NULL); if (acpi_gbl_osi_data >= ACPI_OSI_WIN_2000) { acpi_gbl_truncate_io_addresses = TRUE; } ACPI_FREE(info.evaluate_info); if (ACPI_FAILURE(status)) { goto error_exit; } ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "\nExecuted %u _INI methods requiring %u _STA executions " "(examined %u objects)\n", info.num_INI, info.num_STA, info.device_count)); return_ACPI_STATUS(status); error_exit: ACPI_EXCEPTION((AE_INFO, status, "During device initialization")); return_ACPI_STATUS(status); }
acpi_status acpi_ds_initialize_objects(u32 table_index, struct acpi_namespace_node * start_node) { acpi_status status; struct acpi_init_walk_info info; struct acpi_table_header *table; acpi_owner_id owner_id; ACPI_FUNCTION_TRACE(ds_initialize_objects); status = acpi_tb_get_owner_id(table_index, &owner_id); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "**** Starting initialization of namespace objects ****\n")); ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "Parsing all Control Methods:")); /* Set all init info to zero */ ACPI_MEMSET(&info, 0, sizeof(struct acpi_init_walk_info)); info.owner_id = owner_id; info.table_index = table_index; /* Walk entire namespace from the supplied root */ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* * We don't use acpi_walk_namespace since we do not want to acquire * the namespace reader lock. */ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK, acpi_ds_init_one_object, NULL, &info, NULL); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace")); } (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); status = acpi_get_table_by_index(table_index, &table); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "\nTable [%4.4s](id %4.4X) - %u Objects with %u Devices %u Methods %u Regions\n", table->signature, owner_id, info.object_count, info.device_count, info.method_count, info.op_region_count)); ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "%u Methods, %u Regions\n", info.method_count, info.op_region_count)); return_ACPI_STATUS(AE_OK); }
acpi_status acpi_ds_initialize_objects(u32 table_index, struct acpi_namespace_node *start_node) { acpi_status status; struct acpi_init_walk_info info; struct acpi_table_header *table; acpi_owner_id owner_id; ACPI_FUNCTION_TRACE(ds_initialize_objects); status = acpi_tb_get_owner_id(table_index, &owner_id); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "**** Starting initialization of namespace objects ****\n")); /* Set all init info to zero */ memset(&info, 0, sizeof(struct acpi_init_walk_info)); info.owner_id = owner_id; info.table_index = table_index; /* Walk entire namespace from the supplied root */ /* * We don't use acpi_walk_namespace since we do not want to acquire * the namespace reader lock. */ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK, acpi_ds_init_one_object, NULL, &info, NULL); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace")); } status = acpi_get_table_by_index(table_index, &table); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* DSDT is always the first AML table */ if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT)) { ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "\nInitializing Namespace objects:\n")); } /* Summary of objects initialized */ ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "Table [%4.4s: %-8.8s] (id %.2X) - %4u Objects with %3u Devices, " "%3u Regions, %4u Methods (%u/%u/%u Serial/Non/Cvt)\n", table->signature, table->oem_table_id, owner_id, info.object_count, info.device_count, info.op_region_count, info.method_count, info.serial_method_count, info.non_serial_method_count, info.serialized_method_count)); ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "%u Methods, %u Regions\n", info.method_count, info.op_region_count)); return_ACPI_STATUS(AE_OK); }