Пример #1
0
/* Requires cpu_add_remove_lock to be held */
static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
{
	int ret, nr_calls = 0;
	void *hcpu = (void *)(long)cpu;
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
	struct task_struct *idle;

	if (cpu_online(cpu) || !cpu_present(cpu))
		return -EINVAL;

	cpu_hotplug_begin();

	idle = idle_thread_get(cpu);
	if (IS_ERR(idle)) {
		ret = PTR_ERR(idle);
		goto out;
	}

	ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
	if (ret) {
		nr_calls--;
		printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
				__func__, cpu);
		goto out_notify;
	}

	/* Arch-specific enabling code. */
	ret = __cpu_up(cpu, idle);
	if (ret != 0)
		goto out_notify;
	BUG_ON(!cpu_online(cpu));

	/* Now call notifier in preparation. */
	cpu_notify(CPU_ONLINE | mod, hcpu);

out_notify:
	if (ret != 0)
		__cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
out:
	cpu_hotplug_done();

	return ret;
}
Пример #2
0
static int kexec_get_cpu(xen_kexec_range_t *range)
{
    int nr = range->nr;
    int nr_bytes = 0;

    if ( nr < 0 || nr >= NR_CPUS || !cpu_present(nr) )
        return -EINVAL;

    nr_bytes += sizeof_note("CORE", sizeof(ELF_Prstatus));
    nr_bytes += sizeof_note("Xen", sizeof(crash_xen_core_t));

    /* The Xen info note is included in CPU0's range. */
    if ( nr == 0 )
        nr_bytes += sizeof_note("Xen", sizeof(crash_xen_info_t));

    if ( per_cpu(crash_notes, nr) == NULL )
    {
        Elf_Note *note;

        note = per_cpu(crash_notes, nr) = xmalloc_bytes(nr_bytes);

        if ( note == NULL )
            return -ENOMEM;

        /* Setup CORE note. */
        setup_note(note, "CORE", NT_PRSTATUS, sizeof(ELF_Prstatus));

        /* Setup Xen CORE note. */
        note = ELFNOTE_NEXT(note);
        setup_note(note, "Xen", XEN_ELFNOTE_CRASH_REGS, sizeof(crash_xen_core_t));

        if (nr == 0)
        {
            /* Setup system wide Xen info note. */
            xen_crash_note = note = ELFNOTE_NEXT(note);
            setup_note(note, "Xen", XEN_ELFNOTE_CRASH_INFO, sizeof(crash_xen_info_t));
        }
    }

    range->start = __pa((unsigned long)per_cpu(crash_notes, nr));
    range->size = nr_bytes;
    return 0;
}
Пример #3
0
static int __init topology_init(void)
{
	int cpu;
	struct node *parent = NULL;

	register_nodes();

	register_cpu_notifier(&sysfs_cpu_nb);

	for_each_cpu(cpu) {
		struct cpu *c = &per_cpu(cpu_devices, cpu);

#ifdef CONFIG_NUMA
		/* The node to which a cpu belongs can't be known
		 * until the cpu is made present.
		 */
		parent = NULL;
		if (cpu_present(cpu))
			parent = &node_devices[cpu_to_node(cpu)];
#endif
		/*
		 * For now, we just see if the system supports making
		 * the RTAS calls for CPU hotplug.  But, there may be a
		 * more comprehensive way to do this for an individual
		 * CPU.  For instance, the boot cpu might never be valid
		 * for hotplugging.
		 */
		if (!ppc_md.cpu_die)
			c->no_control = 1;

		if (cpu_online(cpu) || (c->no_control == 0)) {
			register_cpu(c, cpu, parent);

			sysdev_create_file(&c->sysdev, &attr_physical_id);
		}

		if (cpu_online(cpu))
			register_cpu_online(cpu);
	}

	return 0;
}
/*
 * cpudl_find - find the best (later-dl) CPU in the system
 * @cp: the cpudl max-heap context
 * @p: the task
 * @later_mask: a mask to fill in with the selected CPUs (or NULL)
 *
 * Returns: int - best CPU (heap maximum if suitable)
 */
int cpudl_find(struct cpudl *cp, struct task_struct *p,
	       struct cpumask *later_mask)
{
	int best_cpu = -1;
	const struct sched_dl_entity *dl_se = &p->dl;

	if (later_mask && cpumask_and(later_mask, later_mask, cp->free_cpus)) {
		best_cpu = cpumask_any(later_mask);
		goto out;
	} else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) &&
			dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
		best_cpu = cpudl_maximum(cp);
		if (later_mask)
			cpumask_set_cpu(best_cpu, later_mask);
	}

out:
	WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));

	return best_cpu;
}
Пример #5
0
static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
{
	int ret, nr_calls = 0;
	void *hcpu = (void *)(long)cpu;
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;

	if (cpu_online(cpu) || !cpu_present(cpu))
		return -EINVAL;

	cpu_hotplug_begin();
	ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
	if (ret) {
		nr_calls--;
		printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
				__func__, cpu);
		goto out_notify;
	}

	
	ret = __cpu_up(cpu);
	if (ret != 0)
		goto out_notify;
	BUG_ON(!cpu_online(cpu));

	
	cpu_notify(CPU_ONLINE | mod, hcpu);

	#ifdef CONFIG_HTC_ACPU_DEBUG
	{
		unsigned int status = 1;
		msm_proc_comm(PCOM_BACKUP_CPU_STATUS, (unsigned*)&cpu, (unsigned*) &status);
	}
	#endif
out_notify:
	if (ret != 0)
		__cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
	cpu_hotplug_done();

	return ret;
}
Пример #6
0
int cpu_up(unsigned int cpu)
{
    int notifier_rc, err = 0;
    void *hcpu = (void *)(long)cpu;
    struct notifier_block *nb = NULL;

    if ( !cpu_hotplug_begin() )
        return -EBUSY;

    if ( (cpu >= NR_CPUS) || cpu_online(cpu) || !cpu_present(cpu) )
    {
        cpu_hotplug_done();
        return -EINVAL;
    }

    notifier_rc = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu, &nb);
    if ( notifier_rc != NOTIFY_DONE )
    {
        err = notifier_to_errno(notifier_rc);
        goto fail;
    }

    err = __cpu_up(cpu);
    if ( err < 0 )
        goto fail;

    notifier_rc = notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu, NULL);
    BUG_ON(notifier_rc != NOTIFY_DONE);

    send_guest_global_virq(dom0, VIRQ_PCPU_STATE);

    cpu_hotplug_done();
    return 0;

 fail:
    notifier_rc = notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu, &nb);
    BUG_ON(notifier_rc != NOTIFY_DONE);
    cpu_hotplug_done();
    return err;
}
Пример #7
0
static int __cpuinit apic_cluster_num(void)
{
    int i, clusters, zeros;
    unsigned id;
    u16 *bios_cpu_apicid;
    DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);

    bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
    bitmap_zero(clustermap, NUM_APIC_CLUSTERS);

    for (i = 0; i < nr_cpu_ids; i++) {

        if (bios_cpu_apicid) {
            id = bios_cpu_apicid[i];
        } else if (i < nr_cpu_ids) {
            if (cpu_present(i))
                id = per_cpu(x86_bios_cpu_apicid, i);
            else
                continue;
        } else
            break;

        if (id != BAD_APICID)
            __set_bit(APIC_CLUSTERID(id), clustermap);
    }


    clusters = 0;
    zeros = 0;
    for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
        if (test_bit(i, clustermap)) {
            clusters += 1 + zeros;
            zeros = 0;
        } else
            ++zeros;
    }

    return clusters;
}
Пример #8
0
/* Requires cpu_add_remove_lock to be held */
static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
{
	int ret, nr_calls = 0;
	void *hcpu = (void *)(long)cpu;
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;

	if (cpu_online(cpu) || !cpu_present(cpu))
		return -EINVAL;

	cpu_hotplug_begin();
	ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
	if (ret) {
		nr_calls--;
#ifdef CONFIG_DEBUG_PRINTK
		printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
				__func__, cpu);
#else
		;
#endif
		goto out_notify;
	}

	/* Arch-specific enabling code. */
	ret = __cpu_up(cpu);
	if (ret != 0)
		goto out_notify;
	BUG_ON(!cpu_online(cpu));

	/* Now call notifier in preparation. */
	cpu_notify(CPU_ONLINE | mod, hcpu);

out_notify:
	if (ret != 0)
		__cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
	cpu_hotplug_done();

	return ret;
}
Пример #9
0
void __init smp_prepare_cpus(unsigned int max_cpus)
{
	unsigned int cpu;

	DBG("smp_prepare_cpus\n");

	/* 
	 * setup_cpu may need to be called on the boot cpu. We havent
	 * spun any cpus up but lets be paranoid.
	 */
	BUG_ON(boot_cpuid != smp_processor_id());

	/* Fixup boot cpu */
	smp_store_cpu_info(boot_cpuid);
	cpu_callin_map[boot_cpuid] = 1;

	for_each_possible_cpu(cpu) {
		zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
					GFP_KERNEL, cpu_to_node(cpu));
		zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
					GFP_KERNEL, cpu_to_node(cpu));
		/*
		 * numa_node_id() works after this.
		 */
		if (cpu_present(cpu)) {
			set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
			set_cpu_numa_mem(cpu,
				local_memory_node(numa_cpu_lookup_table[cpu]));
		}
	}

	cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
	cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));

	if (smp_ops && smp_ops->probe)
		smp_ops->probe();
}
Пример #10
0
/*
 * Figure out to which domain a cpu belongs and stick it there.
 * Return the id of the domain used.
 */
static int numa_setup_cpu(unsigned long lcpu)
{
	int nid = -1;
	struct device_node *cpu;

	/*
	 * If a valid cpu-to-node mapping is already available, use it
	 * directly instead of querying the firmware, since it represents
	 * the most recent mapping notified to us by the platform (eg: VPHN).
	 */
	if ((nid = numa_cpu_lookup_table[lcpu]) >= 0) {
		map_cpu_to_node(lcpu, nid);
		return nid;
	}

	cpu = of_get_cpu_node(lcpu, NULL);

	if (!cpu) {
		WARN_ON(1);
		if (cpu_present(lcpu))
			goto out_present;
		else
			goto out;
	}

	nid = of_node_to_nid_single(cpu);

out_present:
	if (nid < 0 || !node_online(nid))
		nid = first_online_node;

	map_cpu_to_node(lcpu, nid);
	of_node_put(cpu);
out:
	return nid;
}
Пример #11
0
/*
 * cpudl_set - update the cpudl max-heap
 * @cp: the cpudl max-heap context
 * @cpu: the target cpu
 * @dl: the new earliest deadline for this cpu
 *
 * Notes: assumes cpu_rq(cpu)->lock is locked
 *
 * Returns: (void)
 */
void cpudl_set(struct cpudl *cp, int cpu, u64 dl)
{
	int old_idx;
	unsigned long flags;

	WARN_ON(!cpu_present(cpu));

	raw_spin_lock_irqsave(&cp->lock, flags);

	old_idx = cp->elements[cpu].idx;
	if (old_idx == IDX_INVALID) {
		int new_idx = cp->size++;
		cp->elements[new_idx].dl = dl;
		cp->elements[new_idx].cpu = cpu;
		cp->elements[cpu].idx = new_idx;
		cpudl_heapify_up(cp, new_idx);
		cpumask_clear_cpu(cpu, cp->free_cpus);
	} else {
		cp->elements[old_idx].dl = dl;
		cpudl_heapify(cp, old_idx);
	}

	raw_spin_unlock_irqrestore(&cp->lock, flags);
}
Пример #12
0
static void
acpi_processor_hotplug_notify(acpi_handle handle, u32 event, void *data)
{
	struct acpi_processor *pr;
	struct acpi_device *device = NULL;
	int result;

	ACPI_FUNCTION_TRACE("acpi_processor_hotplug_notify");

	switch (event) {
	case ACPI_NOTIFY_BUS_CHECK:
	case ACPI_NOTIFY_DEVICE_CHECK:
		printk("Processor driver received %s event\n",
		       (event == ACPI_NOTIFY_BUS_CHECK) ?
		       "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK");

		if (!is_processor_present(handle))
			break;

		if (acpi_bus_get_device(handle, &device)) {
			result = acpi_processor_device_add(handle, &device);
			if (result)
				ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
						  "Unable to add the device\n"));
			break;
		}

		pr = acpi_driver_data(device);
		if (!pr) {
			ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
					  "Driver data is NULL\n"));
			break;
		}

		if (pr->id >= 0 && (pr->id < NR_CPUS)) {
			kobject_hotplug(&device->kobj, KOBJ_OFFLINE);
			break;
		}

		result = acpi_processor_start(device);
		if ((!result) && ((pr->id >= 0) && (pr->id < NR_CPUS))) {
			kobject_hotplug(&device->kobj, KOBJ_ONLINE);
		} else {
			ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
					  "Device [%s] failed to start\n",
					  acpi_device_bid(device)));
		}
		break;
	case ACPI_NOTIFY_EJECT_REQUEST:
		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
				  "received ACPI_NOTIFY_EJECT_REQUEST\n"));

		if (acpi_bus_get_device(handle, &device)) {
			ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
					  "Device don't exist, dropping EJECT\n"));
			break;
		}
		pr = acpi_driver_data(device);
		if (!pr) {
			ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
					  "Driver data is NULL, dropping EJECT\n"));
			return_VOID;
		}

		if ((pr->id < NR_CPUS) && (cpu_present(pr->id)))
			kobject_hotplug(&device->kobj, KOBJ_OFFLINE);
		break;
	default:
		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
				  "Unsupported event [0x%x]\n", event));
		break;
	}

	return_VOID;
}
Пример #13
0
static int acpi_processor_get_info(struct acpi_device *device)
{
	union acpi_object object = { 0 };
	struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
	struct acpi_processor *pr = acpi_driver_data(device);
	int device_declaration = 0;
	acpi_status status = AE_OK;
	static int cpu0_initialized;
	unsigned long long value;

	acpi_processor_errata();

	/*
	 * Check to see if we have bus mastering arbitration control.  This
	 * is required for proper C3 usage (to maintain cache coherency).
	 */
	if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) {
		pr->flags.bm_control = 1;
		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
				  "Bus mastering arbitration control present\n"));
	} else
		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
				  "No bus mastering arbitration control\n"));

	if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) {
		/* Declared with "Processor" statement; match ProcessorID */
		status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
		if (ACPI_FAILURE(status)) {
			dev_err(&device->dev,
				"Failed to evaluate processor object (0x%x)\n",
				status);
			return -ENODEV;
		}

		pr->acpi_id = object.processor.proc_id;
	} else {
		/*
		 * Declared with "Device" statement; match _UID.
		 * Note that we don't handle string _UIDs yet.
		 */
		status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
						NULL, &value);
		if (ACPI_FAILURE(status)) {
			dev_err(&device->dev,
				"Failed to evaluate processor _UID (0x%x)\n",
				status);
			return -ENODEV;
		}
		device_declaration = 1;
		pr->acpi_id = value;
	}

	if (acpi_duplicate_processor_id(pr->acpi_id)) {
		dev_err(&device->dev,
			"Failed to get unique processor _UID (0x%x)\n",
			pr->acpi_id);
		return -ENODEV;
	}

	pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration,
					pr->acpi_id);
	if (invalid_phys_cpuid(pr->phys_id))
		acpi_handle_debug(pr->handle, "failed to get CPU physical ID.\n");

	pr->id = acpi_map_cpuid(pr->phys_id, pr->acpi_id);
	if (!cpu0_initialized && !acpi_has_cpu_in_madt()) {
		cpu0_initialized = 1;
		/*
		 * Handle UP system running SMP kernel, with no CPU
		 * entry in MADT
		 */
		if (invalid_logical_cpuid(pr->id) && (num_online_cpus() == 1))
			pr->id = 0;
	}

	/*
	 *  Extra Processor objects may be enumerated on MP systems with
	 *  less than the max # of CPUs. They should be ignored _iff
	 *  they are physically not present.
	 *
	 *  NOTE: Even if the processor has a cpuid, it may not be present
	 *  because cpuid <-> apicid mapping is persistent now.
	 */
	if (invalid_logical_cpuid(pr->id) || !cpu_present(pr->id)) {
		int ret = acpi_processor_hotadd_init(pr);
		if (ret)
			return ret;
	}

	/*
	 * On some boxes several processors use the same processor bus id.
	 * But they are located in different scope. For example:
	 * \_SB.SCK0.CPU0
	 * \_SB.SCK1.CPU0
	 * Rename the processor device bus id. And the new bus id will be
	 * generated as the following format:
	 * CPU+CPU ID.
	 */
	sprintf(acpi_device_bid(device), "CPU%X", pr->id);
	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id,
			  pr->acpi_id));

	if (!object.processor.pblk_address)
		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n"));
	else if (object.processor.pblk_length != 6)
		dev_err(&device->dev, "Invalid PBLK length [%d]\n",
			    object.processor.pblk_length);
	else {
		pr->throttling.address = object.processor.pblk_address;
		pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset;
		pr->throttling.duty_width = acpi_gbl_FADT.duty_width;

		pr->pblk = object.processor.pblk_address;
	}

	/*
	 * If ACPI describes a slot number for this CPU, we can use it to
	 * ensure we get the right value in the "physical id" field
	 * of /proc/cpuinfo
	 */
	status = acpi_evaluate_integer(pr->handle, "_SUN", NULL, &value);
	if (ACPI_SUCCESS(status))
		arch_fix_phys_package_id(pr->id, value);

	return 0;
}
Пример #14
0
/*
 * apic_is_clustered_box() -- Check if we can expect good TSC
 *
 * Thus far, the major user of this is IBM's Summit2 series:
 *
 * Clustered boxes may have unsynced TSC problems if they are
 * multi-chassis. Use available data to take a good guess.
 * If in doubt, go HPET.
 */
__cpuinit int apic_is_clustered_box(void)
{
    int i, clusters, zeros;
    unsigned id;
    u16 *bios_cpu_apicid;
    DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);

    /*
     * there is not this kind of box with AMD CPU yet.
     * Some AMD box with quadcore cpu and 8 sockets apicid
     * will be [4, 0x23] or [8, 0x27] could be thought to
     * vsmp box still need checking...
     */
    if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && !is_vsmp_box())
        return 0;

    bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
    bitmap_zero(clustermap, NUM_APIC_CLUSTERS);

    for (i = 0; i < NR_CPUS; i++) {
        /* are we being called early in kernel startup? */
        if (bios_cpu_apicid) {
            id = bios_cpu_apicid[i];
        }
        else if (i < nr_cpu_ids) {
            if (cpu_present(i))
                id = per_cpu(x86_bios_cpu_apicid, i);
            else
                continue;
        }
        else
            break;

        if (id != BAD_APICID)
            __set_bit(APIC_CLUSTERID(id), clustermap);
    }

    /* Problem:  Partially populated chassis may not have CPUs in some of
     * the APIC clusters they have been allocated.  Only present CPUs have
     * x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap.
     * Since clusters are allocated sequentially, count zeros only if
     * they are bounded by ones.
     */
    clusters = 0;
    zeros = 0;
    for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
        if (test_bit(i, clustermap)) {
            clusters += 1 + zeros;
            zeros = 0;
        } else
            ++zeros;
    }

    /* ScaleMP vSMPowered boxes have one cluster per board and TSCs are
     * not guaranteed to be synced between boards
     */
    if (is_vsmp_box() && clusters > 1)
        return 1;

    /*
     * If clusters > 2, then should be multi-chassis.
     * May have to revisit this when multi-core + hyperthreaded CPUs come
     * out, but AFAIK this will work even for them.
     */
    return (clusters > 2);
}