Пример #1
0
void __cpuinit platform_secondary_init(unsigned int cpu)
{
	trace_hardirqs_off();
	gic_cpu_init(0, IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x100);
	/*
	 * Synchronise with the boot thread.
	 */
	spin_lock(&boot_lock);
#ifdef CONFIG_HOTPLUG_CPU
	cpu_set(cpu, cpu_init_map);
	INIT_COMPLETION(per_cpu(cpu_killed, cpu));
#endif
	spin_unlock(&boot_lock);
}
Пример #2
0
static void __init smp_online(void)
{
	int cpu_id = smp_processor_id();

	local_irq_enable();

	/* Get our bogomips. */
	calibrate_delay();

	/* Save our processor parameters */
 	smp_store_cpu_info(cpu_id);

	cpu_set(cpu_id, cpu_online_map);
}
Пример #3
0
void generic_mach_cpu_die(void)
{
	unsigned int cpu;

	local_irq_disable();
	cpu = smp_processor_id();
	printk(KERN_DEBUG "CPU%d offline\n", cpu);
	__get_cpu_var(cpu_state) = CPU_DEAD;
	smp_wmb();
	while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
		cpu_relax();
	cpu_set(cpu, cpu_online_map);
	local_irq_enable();
}
Пример #4
0
static int __devinit profile_cpu_callback(struct notifier_block *info,
					unsigned long action, void *__cpu)
{
	int node, cpu = (unsigned long)__cpu;
	struct page *page;

	switch (action) {
	case CPU_UP_PREPARE:
		node = cpu_to_node(cpu);
		per_cpu(cpu_profile_flip, cpu) = 0;
		if (!per_cpu(cpu_profile_hits, cpu)[1]) {
			page = alloc_pages_node(node, GFP_KERNEL, 0);
			if (!page)
				return NOTIFY_BAD;
			clear_highpage(page);
			per_cpu(cpu_profile_hits, cpu)[1] = page_address(page);
		}
		if (!per_cpu(cpu_profile_hits, cpu)[0]) {
			page = alloc_pages_node(node, GFP_KERNEL, 0);
			if (!page)
				goto out_free;
			clear_highpage(page);
			per_cpu(cpu_profile_hits, cpu)[0] = page_address(page);
		}
		break;
	out_free:
		page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
		per_cpu(cpu_profile_hits, cpu)[1] = NULL;
		__free_page(page);
		return NOTIFY_BAD;
	case CPU_ONLINE:
		cpu_set(cpu, prof_cpu_mask);
		break;
	case CPU_UP_CANCELED:
	case CPU_DEAD:
		cpu_clear(cpu, prof_cpu_mask);
		if (per_cpu(cpu_profile_hits, cpu)[0]) {
			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
			per_cpu(cpu_profile_hits, cpu)[0] = NULL;
			__free_page(page);
		}
		if (per_cpu(cpu_profile_hits, cpu)[1]) {
			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
			per_cpu(cpu_profile_hits, cpu)[1] = NULL;
			__free_page(page);
		}
		break;
	}
	return NOTIFY_OK;
}
Пример #5
0
int acpi_map_lsapic(acpi_handle handle, int *pcpu)
{
	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
	union acpi_object *obj;
	struct acpi_madt_local_sapic *lsapic;
	cpumask_t tmp_map;
	long physid;
	int cpu;

	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
		return -EINVAL;

	if (!buffer.length || !buffer.pointer)
		return -EINVAL;

	obj = buffer.pointer;
	if (obj->type != ACPI_TYPE_BUFFER)
	{
		kfree(buffer.pointer);
		return -EINVAL;
	}

	lsapic = (struct acpi_madt_local_sapic *)obj->buffer.pointer;

	if ((lsapic->header.type != ACPI_MADT_TYPE_LOCAL_SAPIC) ||
	    (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))) {
		kfree(buffer.pointer);
		return -EINVAL;
	}

	physid = ((lsapic->id << 8) | (lsapic->eid));

	kfree(buffer.pointer);
	buffer.length = ACPI_ALLOCATE_BUFFER;
	buffer.pointer = NULL;

	cpus_complement(tmp_map, cpu_present_map);
	cpu = first_cpu(tmp_map);
	if (cpu >= NR_CPUS)
		return -EINVAL;

	acpi_map_cpu2node(handle, cpu, physid);

	cpu_set(cpu, cpu_present_map);
	ia64_cpu_to_sapicid[cpu] = physid;

	*pcpu = cpu;
	return (0);
}
Пример #6
0
void linsched_run_sim(int sim_ticks)
{
	/* Run a simulation for some number of ticks. Each tick,
	 * scheduling and load balancing decisions are made. The
	 * order in which CPUs make their scheduler_tick calls
	 * is randomized. Obviously, we could create tasks,
	 * change priorities, etc., at certain ticks if we desired,
	 * rather than just running a simple simulation.
	 * (Tasks can also be removed by having them exit.)
	 */
	/* NOTE: The per-CPU "tick" is never disabled, like it might be in a
	 * real system, when a CPU goes idle. Since even the most current
	 * version of Linux maintains a periodic tick when there is
	 * actual work to do, and disabling the tick when idle would
	 * not change anything about how the scheduler behaves
	 * (it only conserves energy, which is not going to help us here),
	 * there is no need.
	 */
	

//	printf("Yeah-first_run\n");
	int initial_jiffies = jiffies;
	for (jiffies = initial_jiffies;
	     jiffies < initial_jiffies + sim_ticks;
	     jiffies++) {
		cpumask_t cpu_processed_map = CPU_MASK_NONE;
		while (!cpus_full(cpu_processed_map)) {
			int active_cpu;
			
			/* Determine next active CPU, and set as processed. */ 
			do {
				active_cpu = linsched_random() % NR_CPUS;
				//active_cpu = 1;

			} while (cpu_isset(active_cpu, cpu_processed_map));
			cpu_set(active_cpu, cpu_processed_map);

			/* Call scheduler_tick for that CPU. */
			linsched_change_cpu(active_cpu);
//			printf("Mainsimulation\n");
			scheduler_tick(); /* may trigger a schedule() call */

			/* First time executing a task? Do not need to
			 * call schedule_tail, since we are not actually
			 * performing a "real" context switch.
			 */
		}
	}
}
Пример #7
0
/*
 * Initialize the logical CPU number to SAPICID mapping
 */
void __init
smp_build_cpu_map (void)
{
	int sapicid, cpu, i;
	int boot_cpu_id = hard_smp_processor_id();

	for (cpu = 0; cpu < NR_CPUS; cpu++) {
		ia64_cpu_to_sapicid[cpu] = -1;
	}

	ia64_cpu_to_sapicid[0] = boot_cpu_id;
	cpus_clear(cpu_present_map);
	cpu_set(0, cpu_present_map);
	cpu_set(0, cpu_possible_map);
	for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) {
		sapicid = smp_boot_data.cpu_phys_id[i];
		if (sapicid == boot_cpu_id)
			continue;
		cpu_set(cpu, cpu_present_map);
		cpu_set(cpu, cpu_possible_map);
		ia64_cpu_to_sapicid[cpu] = sapicid;
		cpu++;
	}
}
Пример #8
0
/**
 * percpu_populate_mask - populate per-cpu data for more cpu's
 * @__pdata: per-cpu data to populate further
 * @size: size of per-cpu object
 * @gfp: may sleep or not etc.
 * @mask: populate per-cpu data for cpu's selected through mask bits
 *
 * Per-cpu objects are populated with zeroed buffers.
 */
int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
			   cpumask_t *mask)
{
	cpumask_t populated;
	int cpu;

	cpus_clear(populated);
	for_each_cpu_mask(cpu, *mask)
		if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) {
			__percpu_depopulate_mask(__pdata, &populated);
			return -ENOMEM;
		} else
			cpu_set(cpu, populated);
	return 0;
}
Пример #9
0
int __cpu_up(unsigned int cpu)
{
	struct task_struct *tsk;

	tsk = fork_idle(cpu);

	if (IS_ERR(tsk))
		panic("Failed forking idle task for cpu %d\n", cpu);
	
	task_thread_info(tsk)->cpu = cpu;

	cpu_set(cpu, cpu_online_map);

	return 0;
}
void __init setup_replication_mask(void)
{
	/* Set only the master cnode's bit.  The master cnode is always 0. */
	cpus_clear(ktext_repmask);
	cpu_set(0, ktext_repmask);

#ifdef CONFIG_REPLICATE_KTEXT
#ifndef CONFIG_MAPPED_KERNEL
#error Kernel replication works with mapped kernel support. No calias support.
#endif
	{
		cnodeid_t	cnode;

		for_each_online_node(cnode) {
			if (cnode == 0)
				continue;
			/* Advertise that we have a copy of the kernel */
			cpu_set(cnode, ktext_repmask);
		}
	}
#endif
	/* Set up a GDA pointer to the replication mask. */
	GDA->g_ktext_repmask = &ktext_repmask;
}
Пример #11
0
static void tegra_cache_smc(bool enable, u32 arg)
{
	void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000;
	bool need_affinity_switch;
	bool can_switch_affinity;
	bool l2x0_enabled;
	cpumask_t local_cpu_mask;
	cpumask_t saved_cpu_mask;
	unsigned long flags;
	long ret;

	/*
	 * ISSUE : Some registers of PL310 controler must be written
	 *              from Secure context (and from CPU0)!
	 *
	 * When called form Normal we obtain an abort or do nothing.
	 * Instructions that must be called in Secure:
	 *      - Write to Control register (L2X0_CTRL==0x100)
	 *      - Write in Auxiliary controler (L2X0_AUX_CTRL==0x104)
	 *      - Invalidate all entries (L2X0_INV_WAY==0x77C),
	 *              mandatory at boot time.
	 *      - Tag and Data RAM Latency Control Registers
	 *              (0x108 & 0x10C) must be written in Secure.
	 */
	need_affinity_switch = (smp_processor_id() != 0);
	can_switch_affinity = !irqs_disabled();

	WARN_ON(need_affinity_switch && !can_switch_affinity);
	if (need_affinity_switch && can_switch_affinity) {
		cpu_set(0, local_cpu_mask);
		sched_getaffinity(0, &saved_cpu_mask);
		ret = sched_setaffinity(0, &local_cpu_mask);
		WARN_ON(ret != 0);
	}

	local_irq_save(flags);
	l2x0_enabled = readl_relaxed(p + L2X0_CTRL) & 1;
	if (enable && !l2x0_enabled)
		tegra_generic_smc(0xFFFFF100, 0x00000001, arg);
	else if (!enable && l2x0_enabled)
		tegra_generic_smc(0xFFFFF100, 0x00000002, arg);
	local_irq_restore(flags);

	if (need_affinity_switch && can_switch_affinity) {
		ret = sched_setaffinity(0, &saved_cpu_mask);
		WARN_ON(ret != 0);
	}
}
Пример #12
0
/*
 * Powerstate information: The system enters/leaves a state, where
 * affected devices might stop
 */
static void tick_do_broadcast_on_off(void *why)
{
	struct clock_event_device *bc, *dev;
	struct tick_device *td;
	unsigned long flags, *reason = why;
	int cpu;

	spin_lock_irqsave(&tick_broadcast_lock, flags);

	cpu = smp_processor_id();
	td = &per_cpu(tick_cpu_device, cpu);
	dev = td->evtdev;
	bc = tick_broadcast_device.evtdev;

	/*
	 * Is the device in broadcast mode forever or is it not
	 * affected by the powerstate ?
	 */
	if (!dev || !tick_device_is_functional(dev) ||
	    !(dev->features & CLOCK_EVT_FEAT_C3STOP))
		goto out;

	if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_ON) {
		if (!cpu_isset(cpu, tick_broadcast_mask)) {
			cpu_set(cpu, tick_broadcast_mask);
			if (td->mode == TICKDEV_MODE_PERIODIC)
				clockevents_set_mode(dev,
						     CLOCK_EVT_MODE_SHUTDOWN);
		}
	} else {
		if (cpu_isset(cpu, tick_broadcast_mask)) {
			cpu_clear(cpu, tick_broadcast_mask);
			if (td->mode == TICKDEV_MODE_PERIODIC)
				tick_setup_periodic(dev, 0);
		}
	}

	if (cpus_empty(tick_broadcast_mask))
		clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
	else {
		if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
			tick_broadcast_start_periodic(bc);
		else
			tick_broadcast_setup_oneshot(bc);
	}
out:
	spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
Пример #13
0
int __cpu_up(unsigned int cpu)
{
	struct task_struct *p;
	char buf[32];
	int c;

	/* create a process for the processor */
	/* only regs.msr is actually used, and 0 is OK for it */
	p = fork_idle(cpu);
	if (IS_ERR(p))
		panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
	secondary_ti = p->thread_info;
	p->thread_info->cpu = cpu;

	/*
	 * There was a cache flush loop here to flush the cache
	 * to memory for the first 8MB of RAM.  The cache flush
	 * has been pushed into the kick_cpu function for those
	 * platforms that need it.
	 */

	/* wake up cpu */
	smp_ops->kick_cpu(cpu);
	
	/*
	 * wait to see if the cpu made a callin (is actually up).
	 * use this value that I found through experimentation.
	 * -- Cort
	 */
	for (c = 1000; c && !cpu_callin_map[cpu]; c--)
		udelay(100);

	if (!cpu_callin_map[cpu]) {
		sprintf(buf, "didn't find cpu %u", cpu);
		if (ppc_md.progress) ppc_md.progress(buf, 0x360+cpu);
		printk("Processor %u is stuck.\n", cpu);
		return -ENOENT;
	}

	sprintf(buf, "found cpu %u", cpu);
	if (ppc_md.progress) ppc_md.progress(buf, 0x350+cpu);
	printk("Processor %d found.\n", cpu);

	smp_ops->give_timebase();
	cpu_set(cpu, cpu_online_map);
	return 0;
}
Пример #14
0
/* This is called very early */
static void __init smp_init_pseries(void)
{
	int i;

	DBG(" -> smp_init_pSeries()\n");

	/* Mark threads which are still spinning in hold loops. */
	if (cpu_has_feature(CPU_FTR_SMT)) {
		for_each_present_cpu(i) { 
			if (i % 2 == 0)
				/*
				 * Even-numbered logical cpus correspond to
				 * primary threads.
				 */
				cpu_set(i, of_spin_map);
		}
	} else {
Пример #15
0
/* This is called very early */
void __init smp_init_pSeries(void)
{
	int i;

	DBG(" -> smp_init_pSeries()\n");

	switch (ppc64_interrupt_controller) {
#ifdef CONFIG_MPIC
	case IC_OPEN_PIC:
		smp_ops = &pSeries_mpic_smp_ops;
		break;
#endif
#ifdef CONFIG_XICS
	case IC_PPC_XIC:
		smp_ops = &pSeries_xics_smp_ops;
		break;
#endif
#ifdef CONFIG_BPA_IIC
	case IC_BPA_IIC:
		smp_ops = &bpa_iic_smp_ops;
		break;
#endif
	default:
		panic("Invalid interrupt controller");
	}

#ifdef CONFIG_HOTPLUG_CPU
	smp_ops->cpu_disable = pSeries_cpu_disable;
	smp_ops->cpu_die = pSeries_cpu_die;

	/* Processors can be added/removed only on LPAR */
	if (systemcfg->platform == PLATFORM_PSERIES_LPAR)
		pSeries_reconfig_notifier_register(&pSeries_smp_nb);
#endif

	/* Mark threads which are still spinning in hold loops. */
	if (cpu_has_feature(CPU_FTR_SMT)) {
		for_each_present_cpu(i) { 
			if (i % 2 == 0)
				/*
				 * Even-numbered logical cpus correspond to
				 * primary threads.
				 */
				cpu_set(i, of_spin_map);
		}
	} else {
Пример #16
0
static inline void fixup_cpu_present_map(void)
{
#ifdef CONFIG_SMP
    int i;

    /*
     * If arch is not hotplug ready and did not populate
     * cpu_present_map, just make cpu_present_map same as cpu_possible_map
     * for other cpu bringup code to function as normal. e.g smp_init() etc.
     */
    if (cpus_empty(cpu_present_map)) {
        for_each_cpu(i) {
            cpu_set(i, cpu_present_map);
        }
    }
#endif
}
Пример #17
0
void crash_ipi_callback(struct pt_regs *regs)
{
	int cpu = smp_processor_id();

	if (!cpu_online(cpu))
		return;

	hard_irq_disable();
	if (!cpu_isset(cpu, cpus_in_crash))
		crash_save_cpu(regs, cpu);
	cpu_set(cpu, cpus_in_crash);

	/*
	 * Entered via soft-reset - could be the kdump
	 * process is invoked using soft-reset or user activated
	 * it if some CPU did not respond to an IPI.
	 * For soft-reset, the secondary CPU can enter this func
	 * twice. 1 - using IPI, and 2. soft-reset.
	 * Tell the kexec CPU that entered via soft-reset and ready
	 * to go down.
	 */
	if (cpu_isset(cpu, cpus_in_sr)) {
		cpu_clear(cpu, cpus_in_sr);
		atomic_inc(&enter_on_soft_reset);
	}

	/*
	 * Starting the kdump boot.
	 * This barrier is needed to make sure that all CPUs are stopped.
	 * If not, soft-reset will be invoked to bring other CPUs.
	 */
	while (!cpu_isset(crashing_cpu, cpus_in_crash))
		cpu_relax();

	if (ppc_md.kexec_cpu_down)
		ppc_md.kexec_cpu_down(1, 1);

#ifdef CONFIG_PPC64
	kexec_smp_wait();
#else
	for (;;);	/* FIXME */
#endif

	/* NOTREACHED */
}
Пример #18
0
int __cpuinit __cpu_up(unsigned int cpu)
{
	extern int __cpuinit smp4m_boot_one_cpu(int);
	extern int __cpuinit smp4d_boot_one_cpu(int);
	int ret=0;

	switch(sparc_cpu_model) {
	case sun4:
		printk("SUN4\n");
		BUG();
		break;
	case sun4c:
		printk("SUN4C\n");
		BUG();
		break;
	case sun4m:
		ret = smp4m_boot_one_cpu(cpu);
		break;
	case sun4d:
		ret = smp4d_boot_one_cpu(cpu);
		break;
	case sparc_leon:
		ret = leon_boot_one_cpu(cpu);
		break;
	case sun4e:
		printk("SUN4E\n");
		BUG();
		break;
	case sun4u:
		printk("SUN4U\n");
		BUG();
		break;
	default:
		printk("UNKNOWN!\n");
		BUG();
		break;
	};

	if (!ret) {
		cpu_set(cpu, smp_commenced_mask);
		while (!cpu_online(cpu))
			mb();
	}
	return ret;
}
Пример #19
0
/**
 * Runs a function on the specified CPU.
 */
static void 
palacios_xcall(
	int			cpu_id, 
	void			(*fn)(void *arg),
	void *			arg
)
{
	cpumask_t cpu_mask;

	cpus_clear(cpu_mask);
	cpu_set(cpu_id, cpu_mask);

	printk(KERN_DEBUG
		"Palacios making xcall to cpu %d from cpu %d.\n",
		cpu_id, current->cpu_id);

	xcall_function(cpu_mask, fn, arg, 1);
}
Пример #20
0
int cpu_up_check(unsigned int cpu)
{
	int rc = 0;

	if (local_cpu_hotplug_request()) {
		cpu_set(cpu, local_allowed_cpumask);
		if (!cpu_isset(cpu, xenbus_allowed_cpumask)) {
			printk("%s: attempt to bring up CPU %u disallowed by "
			       "remote admin.\n", __FUNCTION__, cpu);
			rc = -EBUSY;
		}
	} else if (!cpu_isset(cpu, local_allowed_cpumask) ||
		   !cpu_isset(cpu, xenbus_allowed_cpumask)) {
		rc = -EBUSY;
	}

	return rc;
}
Пример #21
0
void switch_APIC_timer_to_ipi(void *cpumask)
{
	cpumask_t mask = *(cpumask_t *)cpumask;
	int cpu = smp_processor_id();

	if (cpu_isset(cpu, mask) &&
	    !cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask)) {
		disable_APIC_timer();
		cpu_set(cpu, timer_interrupt_broadcast_ipi_mask);
#ifdef CONFIG_HIGH_RES_TIMERS
		printk("Disabling NO_HZ and high resolution timers "
			"due to timer broadcasting\n");
		for_each_possible_cpu(cpu)
			per_cpu(lapic_events, cpu).features &=
				~CLOCK_EVT_FEAT_ONESHOT;
#endif
	}
}
Пример #22
0
void __init xen_smp_prepare_cpus(unsigned int max_cpus)
{
	unsigned cpu;

	for_each_possible_cpu(cpu) {
		cpus_clear(per_cpu(cpu_sibling_map, cpu));
		/*
		 * cpu_core_ map will be zeroed when the per
		 * cpu area is allocated.
		 *
		 * cpus_clear(per_cpu(cpu_core_map, cpu));
		 */
	}

	smp_store_cpu_info(0);
	set_cpu_sibling_map(0);

	if (xen_smp_intr_init(0))
		BUG();

	cpu_initialized_map = cpumask_of_cpu(0);

	/* Restrict the possible_map according to max_cpus. */
	while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
		for (cpu = NR_CPUS-1; !cpu_isset(cpu, cpu_possible_map); cpu--)
			continue;
		cpu_clear(cpu, cpu_possible_map);
	}

	for_each_possible_cpu (cpu) {
		struct task_struct *idle;

		if (cpu == 0)
			continue;

		idle = fork_idle(cpu);
		if (IS_ERR(idle))
			panic("failed fork for CPU %d", cpu);

		cpu_set(cpu, cpu_present_map);
	}

	//init_xenbus_allowed_cpumask();
}
Пример #23
0
/*
 * Activate a secondary processor.
 */
void __init start_secondary(void)
{
	/*
	 * Dont put anything before smp_callin(), SMP
	 * booting is too fragile that we want to limit the
	 * things done here to the most necessary things.
	 */
	cpu_init();
	smp_callin();

	/* otherwise gcc will move up the smp_processor_id before the cpu_init */
	barrier();

	Dprintk("cpu %d: waiting for commence\n", smp_processor_id()); 
	while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
		rep_nop();

	Dprintk("cpu %d: setting up apic clock\n", smp_processor_id()); 	
	setup_secondary_APIC_clock();

	Dprintk("cpu %d: enabling apic timer\n", smp_processor_id()); 

	if (nmi_watchdog == NMI_IO_APIC) {
		disable_8259A_irq(0);
		enable_NMI_through_LVT0(NULL);
		enable_8259A_irq(0);
	}


	enable_APIC_timer(); 

	/*
	 * low-memory mappings have been cleared, flush them from
	 * the local TLBs too.
	 */
	local_flush_tlb();

	Dprintk("cpu %d eSetting cpu_online_map\n", smp_processor_id()); 
	cpu_set(smp_processor_id(), cpu_online_map);
	wmb();
	
	cpu_idle();
}
Пример #24
0
/**
 * build_cpu_to_node_map - setup cpu to node and node to cpumask arrays
 *
 * Build cpu to node mapping and initialize the per node cpu masks using
 * info from the node_cpuid array handed to us by ACPI.
 */
void __init build_cpu_to_node_map(void)
{
	int cpu, i, node;

	for(node=0; node < MAX_NUMNODES; node++)
		cpus_clear(node_to_cpu_mask[node]);

	for(cpu = 0; cpu < NR_CPUS; ++cpu) {
		node = -1;
		for (i = 0; i < NR_CPUS; ++i)
			if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) {
				node = node_cpuid[i].nid;
				break;
			}
		cpu_to_node_map[cpu] = (node >= 0) ? node : 0;
		if (node >= 0)
			cpu_set(cpu, node_to_cpu_mask[node]);
	}
}
Пример #25
0
int __devinit __cpu_up(unsigned int cpu_id)
{
	int timeout;

	cpu_set(cpu_id, smp_commenced_mask);

	/*
	 * Wait 5s total for a response
	 */
	for (timeout = 0; timeout < 5000; timeout++) {
		if (cpu_isset(cpu_id, cpu_online_map))
			break;
		udelay(1000);
	}
	if (!cpu_isset(cpu_id, cpu_online_map))
		BUG();

	return 0;
}
Пример #26
0
void pc_reset()
{
    cpu_set();
    resetx86();
    mem_updatecache();
    //timer_reset();
    dma_reset();
    fdc_reset();
    pic_reset();
    pit_reset();
    serial_reset();

    setpitclock(models[model].cpu[cpu_manufacturer].cpus[cpu].rspeed);

//        sb_reset();

    ali1429_reset();
//        video_init();
}
Пример #27
0
int __cpuinit xen_cpu_up(unsigned int cpu)
{
	struct task_struct *idle = idle_task(cpu);
	int rc;

#if 0
	rc = cpu_up_check(cpu);
	if (rc)
		return rc;
#endif

	init_gdt(cpu);
	per_cpu(current_task, cpu) = idle;
	irq_ctx_init(cpu);
	xen_setup_timer(cpu);

	/* make sure interrupts start blocked */
	per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;

	rc = cpu_initialize_context(cpu, idle);
	if (rc)
		return rc;

	if (num_online_cpus() == 1)
		alternatives_smp_switch(1);

	rc = xen_smp_intr_init(cpu);
	if (rc)
		return rc;

	smp_store_cpu_info(cpu);
	set_cpu_sibling_map(cpu);
	/* This must be done before setting cpu_online_map */
	wmb();

	cpu_set(cpu, cpu_online_map);

	rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
	BUG_ON(rc);

	return 0;
}
Пример #28
0
/* This is the first C code that secondary processors invoke.  */
void secondary_cpu_init(int cpuid, unsigned long r4)
{
    struct vcpu *vcpu;

    cpu_initialize(cpuid);
    smp_generic_take_timebase();

    /* If we are online, we must be able to ACK IPIs.  */
    mpic_setup_this_cpu();
    cpu_set(cpuid, cpu_online_map);

    vcpu = alloc_vcpu(idle_domain, cpuid, cpuid);
    BUG_ON(vcpu == NULL);

    set_current(idle_domain->vcpu[cpuid]);
    idle_vcpu[cpuid] = current;
    startup_cpu_idle_loop();

    panic("should never get here\n");
}
Пример #29
0
void __cpuinit map_cpu_to_node(int cpu, int nid)
{
	int oldnid;
	if (nid < 0) { /* just initialize by zero */
		cpu_to_node_map[cpu] = 0;
		return;
	}
	/* sanity check first */
	oldnid = cpu_to_node_map[cpu];
	if (cpu_isset(cpu, node_to_cpu_mask[oldnid])) {
		return; /* nothing to do */
	}
	/* we don't have cpu-driven node hot add yet...
	   In usual case, node is created from SRAT at boot time. */
	if (!node_online(nid))
		nid = first_online_node;
	cpu_to_node_map[cpu] = nid;
	cpu_set(cpu, node_to_cpu_mask[nid]);
	return;
}
Пример #30
0
void __init
acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
{
	int pxm;

	if (!(pa->flags & ACPI_SRAT_CPU_ENABLED))
		return;

	pxm = get_processor_proximity_domain(pa);

	/* record this node in proximity bitmap */
	pxm_bit_set(pxm);

	node_cpuid[srat_num_cpus].phys_id =
	    (pa->apic_id << 8) | (pa->local_sapic_eid);
	/* nid should be overridden as logical node id later */
	node_cpuid[srat_num_cpus].nid = pxm;
	cpu_set(srat_num_cpus, early_cpu_possible_map);
	srat_num_cpus++;
}