phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id) { phys_cpuid_t phys_id; phys_id = map_mat_entry(handle, type, acpi_id); if (invalid_phys_cpuid(phys_id)) phys_id = map_madt_entry(get_madt_table(), type, acpi_id); return phys_id; }
int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id) { #ifdef CONFIG_SMP int i; #endif if (invalid_phys_cpuid(phys_id)) { /* * On UP processor, there is no _MAT or MADT table. * So above phys_id is always set to PHYS_CPUID_INVALID. * * BIOS may define multiple CPU handles even for UP processor. * For example, * * Scope (_PR) * { * Processor (CPU0, 0x00, 0x00000410, 0x06) {} * Processor (CPU1, 0x01, 0x00000410, 0x06) {} * Processor (CPU2, 0x02, 0x00000410, 0x06) {} * Processor (CPU3, 0x03, 0x00000410, 0x06) {} * } * * Ignores phys_id and always returns 0 for the processor * handle with acpi id 0 if nr_cpu_ids is 1. * This should be the case if SMP tables are not found. * Return -EINVAL for other CPU's handle. */ if (nr_cpu_ids <= 1 && acpi_id == 0) return acpi_id; else return -EINVAL; } #ifdef CONFIG_SMP for_each_possible_cpu(i) { if (cpu_physical_id(i) == phys_id) return i; } #else /* In UP kernel, only processor 0 is valid */ if (phys_id == 0) return phys_id; #endif return -ENODEV; }
static int acpi_processor_hotadd_init(struct acpi_processor *pr) { unsigned long long sta; acpi_status status; int ret; if (invalid_phys_cpuid(pr->phys_id)) return -ENODEV; status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta); if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT)) return -ENODEV; cpu_maps_update_begin(); cpu_hotplug_begin(); ret = acpi_map_cpu(pr->handle, pr->phys_id, &pr->id); if (ret) goto out; ret = arch_register_cpu(pr->id); if (ret) { acpi_unmap_cpu(pr->id); goto out; } /* * CPU got hot-added, but cpu_data is not initialized yet. Set a flag * to delay cpu_idle/throttling initialization and do it when the CPU * gets online for the first time. */ pr_info("CPU%d has been hot-added\n", pr->id); pr->flags.need_hotplug_init = 1; out: cpu_hotplug_done(); cpu_maps_update_done(); return ret; }
static int acpi_processor_get_info(struct acpi_device *device) { union acpi_object object = { 0 }; struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; struct acpi_processor *pr = acpi_driver_data(device); int device_declaration = 0; acpi_status status = AE_OK; static int cpu0_initialized; unsigned long long value; acpi_processor_errata(); /* * Check to see if we have bus mastering arbitration control. This * is required for proper C3 usage (to maintain cache coherency). */ if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) { pr->flags.bm_control = 1; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Bus mastering arbitration control present\n")); } else ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No bus mastering arbitration control\n")); if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) { /* Declared with "Processor" statement; match ProcessorID */ status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer); if (ACPI_FAILURE(status)) { dev_err(&device->dev, "Failed to evaluate processor object (0x%x)\n", status); return -ENODEV; } pr->acpi_id = object.processor.proc_id; } else { /* * Declared with "Device" statement; match _UID. * Note that we don't handle string _UIDs yet. */ status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID, NULL, &value); if (ACPI_FAILURE(status)) { dev_err(&device->dev, "Failed to evaluate processor _UID (0x%x)\n", status); return -ENODEV; } device_declaration = 1; pr->acpi_id = value; } pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration, pr->acpi_id); if (invalid_phys_cpuid(pr->phys_id)) acpi_handle_debug(pr->handle, "failed to get CPU physical ID.\n"); pr->id = acpi_map_cpuid(pr->phys_id, pr->acpi_id); if (!cpu0_initialized && !acpi_has_cpu_in_madt()) { cpu0_initialized = 1; /* * Handle UP system running SMP kernel, with no CPU * entry in MADT */ if (invalid_logical_cpuid(pr->id) && (num_online_cpus() == 1)) pr->id = 0; } /* * Extra Processor objects may be enumerated on MP systems with * less than the max # of CPUs. They should be ignored _iff * they are physically not present. */ if (invalid_logical_cpuid(pr->id)) { int ret = acpi_processor_hotadd_init(pr); if (ret) return ret; } /*
static int acpi_processor_get_info(struct acpi_device *device) { union acpi_object object = { 0 }; struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; struct acpi_processor *pr = acpi_driver_data(device); int device_declaration = 0; acpi_status status = AE_OK; static int cpu0_initialized; unsigned long long value; acpi_processor_errata(); /* * Check to see if we have bus mastering arbitration control. This * is required for proper C3 usage (to maintain cache coherency). */ if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) { pr->flags.bm_control = 1; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Bus mastering arbitration control present\n")); } else ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No bus mastering arbitration control\n")); if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) { /* Declared with "Processor" statement; match ProcessorID */ status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer); if (ACPI_FAILURE(status)) { dev_err(&device->dev, "Failed to evaluate processor object (0x%x)\n", status); return -ENODEV; } pr->acpi_id = object.processor.proc_id; } else { /* * Declared with "Device" statement; match _UID. * Note that we don't handle string _UIDs yet. */ status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID, NULL, &value); if (ACPI_FAILURE(status)) { dev_err(&device->dev, "Failed to evaluate processor _UID (0x%x)\n", status); return -ENODEV; } device_declaration = 1; pr->acpi_id = value; } if (acpi_duplicate_processor_id(pr->acpi_id)) { dev_err(&device->dev, "Failed to get unique processor _UID (0x%x)\n", pr->acpi_id); return -ENODEV; } pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration, pr->acpi_id); if (invalid_phys_cpuid(pr->phys_id)) acpi_handle_debug(pr->handle, "failed to get CPU physical ID.\n"); pr->id = acpi_map_cpuid(pr->phys_id, pr->acpi_id); if (!cpu0_initialized && !acpi_has_cpu_in_madt()) { cpu0_initialized = 1; /* * Handle UP system running SMP kernel, with no CPU * entry in MADT */ if (invalid_logical_cpuid(pr->id) && (num_online_cpus() == 1)) pr->id = 0; } /* * Extra Processor objects may be enumerated on MP systems with * less than the max # of CPUs. They should be ignored _iff * they are physically not present. * * NOTE: Even if the processor has a cpuid, it may not be present * because cpuid <-> apicid mapping is persistent now. */ if (invalid_logical_cpuid(pr->id) || !cpu_present(pr->id)) { int ret = acpi_processor_hotadd_init(pr); if (ret) return ret; } /* * On some boxes several processors use the same processor bus id. * But they are located in different scope. For example: * \_SB.SCK0.CPU0 * \_SB.SCK1.CPU0 * Rename the processor device bus id. And the new bus id will be * generated as the following format: * CPU+CPU ID. */ sprintf(acpi_device_bid(device), "CPU%X", pr->id); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id, pr->acpi_id)); if (!object.processor.pblk_address) ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n")); else if (object.processor.pblk_length != 6) dev_err(&device->dev, "Invalid PBLK length [%d]\n", object.processor.pblk_length); else { pr->throttling.address = object.processor.pblk_address; pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset; pr->throttling.duty_width = acpi_gbl_FADT.duty_width; pr->pblk = object.processor.pblk_address; } /* * If ACPI describes a slot number for this CPU, we can use it to * ensure we get the right value in the "physical id" field * of /proc/cpuinfo */ status = acpi_evaluate_integer(pr->handle, "_SUN", NULL, &value); if (ACPI_SUCCESS(status)) arch_fix_phys_package_id(pr->id, value); return 0; }