Ejemplo n.º 1
0
/*
 * The idle thread. There's no useful work to be done, so just try to conserve
 * power and have a low exit latency (ie sit in a loop waiting for somebody to
 * say that they'd like to reschedule)
 */
void __noreturn cpu_idle(void)
{
	int cpu;

	/* CPU is going idle. */
	cpu = smp_processor_id();

	/* endless idle loop with no priority at all */
	while (1) {
		tick_nohz_stop_sched_tick(1);
		while (!need_resched() && cpu_online(cpu)) {
#ifdef CONFIG_MIPS_MT_SMTC
			extern void smtc_idle_loop_hook(void);

			smtc_idle_loop_hook();
#endif
			if (cpu_wait)
				(*cpu_wait)();
		}
#ifdef CONFIG_HOTPLUG_CPU
		if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map) &&
		    (system_state == SYSTEM_RUNNING ||
		     system_state == SYSTEM_BOOTING))
			play_dead();
#endif
		tick_nohz_restart_sched_tick();
		preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}
Ejemplo n.º 2
0
int __cpuinit
__cpu_up(unsigned int cpu, struct task_struct *tidle)
{
	int ret;
	int sapicid;

	sapicid = ia64_cpu_to_sapicid[cpu];
	if (sapicid == -1)
		return -EINVAL;

	/*
	 * Already booted cpu? not valid anymore since we dont
	 * do idle loop tightspin anymore.
	 */
	if (cpu_isset(cpu, cpu_callin_map))
		return -EINVAL;

	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
	/* Processor goes to start_secondary(), sets online flag */
	ret = do_boot_cpu(sapicid, cpu, tidle);
	if (ret < 0)
		return ret;

	if (cpu_data(cpu)->threads_per_core == 1 &&
	    cpu_data(cpu)->cores_per_socket == 1) {
		cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
		cpu_set(cpu, cpu_core_map[cpu]);
		return 0;
	}

	set_cpu_sibling_map(cpu);

	return 0;
}
Ejemplo n.º 3
0
void cpu_idle_wait(void)
{
    unsigned int cpu, this_cpu = get_cpu();
    cpumask_t map, tmp = current->cpus_allowed;

    set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
    put_cpu();

    cpus_clear(map);
    for_each_online_cpu(cpu) {
        per_cpu(cpu_idle_state, cpu) = 1;
        cpu_set(cpu, map);
    }

    __get_cpu_var(cpu_idle_state) = 0;

    wmb();
    do {
        ssleep(1);
        for_each_online_cpu(cpu) {
            if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
                cpu_clear(cpu, map);
        }
        cpus_and(map, map, cpu_online_map);
    } while (!cpus_empty(map));

    set_cpus_allowed(current, tmp);
}
Ejemplo n.º 4
0
static void __smp_call_function_many(cpumask_t *mask, void (*func) (void *info),
				     void *info, int nonatomic, int wait)
{
	struct call_data_struct data;
	int cpus;
	int cpu = smp_processor_id();

	if (cpu_isset(cpu, *mask))
		cpu_clear(cpu, *mask);
	cpus = cpus_weight(*mask);
	if (!cpus)
		return;

	data.func = func;
	data.info = info;
	atomic_set(&data.started, 0);
	data.wait = wait;
	if (wait)
		atomic_set(&data.finished, 0);

	call_data = &data;
	wmb();
	/* Send a message to all other CPUs and wait for them to respond */
	send_IPI_mask(*mask, CALL_FUNCTION_VECTOR);

	/* Wait for response */
	while (atomic_read(&data.started) != cpus)
		cpu_relax();

	if (!wait)
		return;

	while (atomic_read(&data.finished) != cpus)
		cpu_relax();
}
Ejemplo n.º 5
0
/*
 * Called once for each "cpu_possible(cpu)".  Needs to spin up the cpu
 * and keep control until "cpu_online(cpu)" is set.  Note: cpu is
 * physical, not logical.
 */
int __devinit __cpu_up(unsigned int cpu)
{
	struct task_struct *idle;

	/*
	 * Processor goes to start_secondary(), sets online flag
	 * The following code is purely to make sure
	 * Linux can schedule processes on this slave.
	 */
	idle = fork_idle(cpu);
	if (IS_ERR(idle))
		panic(KERN_ERR "Fork failed for CPU %d", cpu);

	prom_boot_secondary(cpu, idle);

	/*
	 * Trust is futile.  We should really have timeouts ...
	 */
	while (!cpu_isset(cpu, cpu_callin_map))
		udelay(100);

	cpu_set(cpu, cpu_online_map);

	return 0;
}
Ejemplo n.º 6
0
static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
{
	int me = smp_processor_id();
	ia64_vector vector;
	unsigned long flags;

	for (vector = IA64_FIRST_DEVICE_VECTOR;
	     vector < IA64_LAST_DEVICE_VECTOR; vector++) {
		int irq;
		struct irq_desc *desc;
		struct irq_cfg *cfg;
		irq = __get_cpu_var(vector_irq)[vector];
		if (irq < 0)
			continue;

		desc = irq_to_desc(irq);
		cfg = irq_cfg + irq;
		raw_spin_lock(&desc->lock);
		if (!cfg->move_cleanup_count)
			goto unlock;

		if (!cpu_isset(me, cfg->old_domain))
			goto unlock;

		spin_lock_irqsave(&vector_lock, flags);
		__get_cpu_var(vector_irq)[vector] = -1;
		cpu_clear(me, vector_table[vector]);
		spin_unlock_irqrestore(&vector_lock, flags);
		cfg->move_cleanup_count--;
	unlock:
		raw_spin_unlock(&desc->lock);
	}
	return IRQ_HANDLED;
}
Ejemplo n.º 7
0
asmlinkage void smp_invalidate_interrupt (void)
{
	unsigned long cpu;

	cpu = get_cpu();

	if (!cpu_isset(cpu, flush_cpumask))
		goto out;
		/* 
		 * This was a BUG() but until someone can quote me the
		 * line from the intel manual that guarantees an IPI to
		 * multiple CPUs is retried _only_ on the erroring CPUs
		 * its staying as a return
		 *
		 * BUG();
		 */
		 
	if (flush_mm == read_pda(active_mm)) {
		if (read_pda(mmu_state) == TLBSTATE_OK) {
			if (flush_va == FLUSH_ALL)
				local_flush_tlb();
			else
				__flush_tlb_one(flush_va);
		} else
			leave_mm(cpu);
	}
	ack_APIC_irq();
	cpu_clear(cpu, flush_cpumask);

out:
	put_cpu_no_resched();
}
Ejemplo n.º 8
0
/*==========================================================================*
 * Name:         smp_invalidate_interrupt
 *
 * Description:  This routine executes on CPU which received
 *               'INVALIDATE_TLB_IPI'.
 *               1.Flush local TLB.
 *               2.Report flush TLB process was finished.
 *
 * Born on Date: 2002.02.05
 *
 * Arguments:    NONE
 *
 * Returns:      void (cannot fail)
 *
 * Modification log:
 * Date       Who Description
 * ---------- --- --------------------------------------------------------
 *
 *==========================================================================*/
void smp_invalidate_interrupt(void)
{
	int cpu_id = smp_processor_id();
	unsigned long *mmc = &flush_mm->context[cpu_id];

	if (!cpu_isset(cpu_id, flush_cpumask))
		return;

	if (flush_va == FLUSH_ALL) {
		*mmc = NO_CONTEXT;
		if (flush_mm == current->active_mm)
			activate_context(flush_mm);
		else
			cpu_clear(cpu_id, flush_mm->cpu_vm_mask);
	} else {
		unsigned long va = flush_va;

		if (*mmc != NO_CONTEXT) {
			va &= PAGE_MASK;
			va |= (*mmc & MMU_CONTEXT_ASID_MASK);
			__flush_tlb_page(va);
		}
	}
	cpu_clear(cpu_id, flush_cpumask);
}
Ejemplo n.º 9
0
/**
 * smp_startup_cpu() - start the given cpu
 *
 * At boot time, there is nothing to do for primary threads which were
 * started from Open Firmware.  For anything else, call RTAS with the
 * appropriate start location.
 *
 * Returns:
 *	0	- failure
 *	1	- success
 */
static inline int __devinit smp_startup_cpu(unsigned int lcpu)
{
	int status;
	unsigned long start_here = __pa((u32)*((unsigned long *)
					       generic_secondary_smp_init));
	unsigned int pcpu;
	int start_cpu;

	if (cpu_isset(lcpu, of_spin_map))
		/* Already started by OF and sitting in spin loop */
		return 1;

	pcpu = get_hard_smp_processor_id(lcpu);

	/* Fixup atomic count: it exited inside IRQ handler. */
	task_thread_info(paca[lcpu].__current)->preempt_count	= 0;

	/* 
	 * If the RTAS start-cpu token does not exist then presume the
	 * cpu is already spinning.
	 */
	start_cpu = rtas_token("start-cpu");
	if (start_cpu == RTAS_UNKNOWN_SERVICE)
		return 1;

	status = rtas_call(start_cpu, 3, 1, NULL, pcpu, start_here, pcpu);
	if (status != 0) {
		printk(KERN_ERR "start-cpu failed: %i\n", status);
		return 0;
	}

	return 1;
}
Ejemplo n.º 10
0
static int
salinfo_log_clear(struct salinfo_data *data, int cpu)
{
	sal_log_record_header_t *rh;
	unsigned long flags;
	spin_lock_irqsave(&data_saved_lock, flags);
	data->state = STATE_NO_DATA;
	if (!cpu_isset(cpu, data->cpu_event)) {
		spin_unlock_irqrestore(&data_saved_lock, flags);
		return 0;
	}
	cpu_clear(cpu, data->cpu_event);
	if (data->saved_num) {
		shift1_data_saved(data, data->saved_num - 1);
		data->saved_num = 0;
	}
	spin_unlock_irqrestore(&data_saved_lock, flags);
	rh = (sal_log_record_header_t *)(data->log_buffer);
	/* Corrected errors have already been cleared from SAL */
	if (rh->severity != sal_log_severity_corrected)
		call_on_cpu(cpu, salinfo_log_clear_cpu, data);
	/* clearing a record may make a new record visible */
	salinfo_log_new_read(cpu, data);
	if (data->state == STATE_LOG_RECORD) {
		spin_lock_irqsave(&data_saved_lock, flags);
		cpu_set(cpu, data->cpu_event);
		salinfo_work_to_do(data);
		spin_unlock_irqrestore(&data_saved_lock, flags);
	}
	return 0;
}
Ejemplo n.º 11
0
/*
 * Broadcast the event to the cpus, which are set in the mask
 */
int tick_do_broadcast(cpumask_t mask)
{
	int ret = 0, cpu = smp_processor_id();
	struct tick_device *td;

	/*
	 * Check, if the current cpu is in the mask
	 */
	if (cpu_isset(cpu, mask)) {
		cpu_clear(cpu, mask);
		td = &per_cpu(tick_cpu_device, cpu);
		td->evtdev->event_handler(td->evtdev);
		ret = 1;
	}

	if (!cpus_empty(mask)) {
		/*
		 * It might be necessary to actually check whether the devices
		 * have different broadcast functions. For now, just use the
		 * one of the first device. This works as long as we have this
		 * misfeature only on x86 (lapic)
		 */
		cpu = first_cpu(mask);
		td = &per_cpu(tick_cpu_device, cpu);
		td->evtdev->broadcast(mask);
		ret = 1;
	}
	return ret;
}
Ejemplo n.º 12
0
int tick_resume_broadcast(void)
{
	struct clock_event_device *bc;
	unsigned long flags;
	int broadcast = 0;

	spin_lock_irqsave(&tick_broadcast_lock, flags);

	bc = tick_broadcast_device.evtdev;

	if (bc) {
		switch (tick_broadcast_device.mode) {
		case TICKDEV_MODE_PERIODIC:
			if(!cpus_empty(tick_broadcast_mask))
				tick_broadcast_start_periodic(bc);
			broadcast = cpu_isset(smp_processor_id(),
					      tick_broadcast_mask);
			break;
		case TICKDEV_MODE_ONESHOT:
			broadcast = tick_resume_broadcast_oneshot(bc);
			break;
		}
	}
	spin_unlock_irqrestore(&tick_broadcast_lock, flags);

	return broadcast;
}
static int show_cpuinfo(struct seq_file *m, void *v)
{
	unsigned int version;
	unsigned int fp_vers;
	unsigned long n = (unsigned long) v - 1;
	char fmt [64];

	preempt_disable();
	version = current_cpu_data.processor_id;
	fp_vers = current_cpu_data.fpu_id;

#ifdef CONFIG_SMP
	if (!cpu_isset(n, cpu_online_map))
	{
		preempt_enable();
		return 0;
	}
#endif

	/*
	 * For the first processor also print the system type
	 */
	if (n == 0)
		seq_printf(m, "system type\t\t: %s\n", get_system_type());

	seq_printf(m, "processor\t\t: %ld\n", n);
	sprintf(fmt, "cpu model\t\t: %%s V%%d.%%d%s\n",
	        cpu_has_fpu ? "  FPU V%d.%d" : "");
	seq_printf(m, fmt, cpu_name[current_cpu_data.cputype <= CPU_LAST ?
	                            current_cpu_data.cputype : CPU_UNKNOWN],
	                           (version >> 4) & 0x0f, version & 0x0f,
	                           (fp_vers >> 4) & 0x0f, fp_vers & 0x0f);
	seq_printf(m, "BogoMIPS\t\t: %lu.%02lu\n",
	              cpu_data[n].udelay_val / (500000/HZ),
	              (cpu_data[n].udelay_val / (5000/HZ)) % 100);
	seq_printf(m, "wait instruction\t: %s\n", cpu_wait ? "yes" : "no");
	seq_printf(m, "microsecond timers\t: %s\n",
	              cpu_has_counter ? "yes" : "no");
	seq_printf(m, "tlb_entries\t\t: %d\n", current_cpu_data.tlbsize);
	seq_printf(m, "extra interrupt vector\t: %s\n",
	              cpu_has_divec ? "yes" : "no");
	seq_printf(m, "hardware watchpoint\t: %s\n",
	              cpu_has_watch ? "yes" : "no");
	seq_printf(m, "ASEs implemented\t:%s%s%s%s%s%s\n",
		      cpu_has_mips16 ? " mips16" : "",
		      cpu_has_mdmx ? " mdmx" : "",
		      cpu_has_mips3d ? " mips3d" : "",
		      cpu_has_smartmips ? " smartmips" : "",
		      cpu_has_dsp ? " dsp" : "",
		      cpu_has_mipsmt ? " mt" : ""
		);

	sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
	        cpu_has_vce ? "%u" : "not available");
	seq_printf(m, fmt, 'D', vced_count);
	seq_printf(m, fmt, 'I', vcei_count);

	preempt_enable();
	return 0;
}
Ejemplo n.º 14
0
void unmap_cpu_from_node(int cpu, int nid)
{
	WARN_ON(!cpu_isset(cpu, node_to_cpu_mask[nid]));
	WARN_ON(cpu_to_node_map[cpu] != nid);
	cpu_to_node_map[cpu] = 0;
	cpu_clear(cpu, node_to_cpu_mask[nid]);
}
Ejemplo n.º 15
0
void __init replicate_kernel_text()
{
    cnodeid_t cnode;
    nasid_t client_nasid;
    nasid_t server_nasid;

    server_nasid = master_nasid;

    /* Record where the master node should get its kernel text */
    set_ktext_source(master_nasid, master_nasid);

    for_each_online_node(cnode) {
        if (cnode == 0)
            continue;
        client_nasid = COMPACT_TO_NASID_NODEID(cnode);

        /* Check if this node should get a copy of the kernel */
        if (cpu_isset(cnode, ktext_repmask)) {
            server_nasid = client_nasid;
            copy_kernel(server_nasid);
        }

        /* Record where this node should get its kernel text */
        set_ktext_source(client_nasid, server_nasid);
    }
}
Ejemplo n.º 16
0
fastcall void smp_invalidate_interrupt(struct pt_regs *regs)
{
	unsigned long cpu;

	cpu = get_cpu();

	if (!cpu_isset(cpu, flush_cpumask))
		goto out;
		/* 
		 * This was a BUG() but until someone can quote me the
		 * line from the intel manual that guarantees an IPI to
		 * multiple CPUs is retried _only_ on the erroring CPUs
		 * its staying as a return
		 *
		 * BUG();
		 */
		 
	if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
		if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
			if (flush_va == FLUSH_ALL)
				local_flush_tlb();
			else
				__flush_tlb_one(flush_va);
		} else
			leave_mm(cpu);
	}
	ack_APIC_irq();
	smp_mb__before_clear_bit();
	cpu_clear(cpu, flush_cpumask);
	smp_mb__after_clear_bit();
out:
	put_cpu_no_resched();
}
Ejemplo n.º 17
0
static void crash_kexec_prepare_cpus(int cpu)
{
    unsigned int msecs;

    unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */

    crash_send_ipi(crash_ipi_callback);
    smp_wmb();

    printk(KERN_EMERG "Sending IPI to other cpus...\n");
    msecs = 10000;
    while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) {
        cpu_relax();
        mdelay(1);
    }

    /* Would it be better to replace the trap vector here? */

    if (cpus_weight(cpus_in_crash) < ncpus) {
        printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n",
               ncpus - cpus_weight(cpus_in_crash));
        printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n");
        cpus_in_sr = CPU_MASK_NONE;
        atomic_set(&enter_on_soft_reset, 0);
        while (cpus_weight(cpus_in_crash) < ncpus)
            cpu_relax();
    }
    /*
     * Make sure all CPUs are entered via soft-reset if the kdump is
     * invoked using soft-reset.
     */
    if (cpu_isset(cpu, cpus_in_sr))
        crash_soft_reset_check(cpu);
    /* Leave the IPI callback set */
}
Ejemplo n.º 18
0
static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
						unsigned long va)
{
	/*
	 * A couple of (to be removed) sanity checks:
	 *
	 * - current CPU must not be in mask
	 * - mask must exist :)
	 */
	BUG_ON(cpus_empty(cpumask));
	BUG_ON(cpu_isset(smp_processor_id(), cpumask));
	BUG_ON(!mm);

	/* If a CPU which we ran on has gone down, OK. */
	cpus_and(cpumask, cpumask, cpu_online_map);
	if (cpus_empty(cpumask))
		return;

	/*
	 * i'm not happy about this global shared spinlock in the
	 * MM hot path, but we'll see how contended it is.
	 * Temporarily this turns IRQs off, so that lockups are
	 * detected by the NMI watchdog.
	 */
	spin_lock(&tlbstate_lock);
	
	flush_mm = mm;
	flush_va = va;
#if NR_CPUS <= BITS_PER_LONG
	atomic_set_mask(cpumask, &flush_cpumask);
#else
	{
		int k;
		unsigned long *flush_mask = (unsigned long *)&flush_cpumask;
		unsigned long *cpu_mask = (unsigned long *)&cpumask;
		for (k = 0; k < BITS_TO_LONGS(NR_CPUS); ++k)
			atomic_set_mask(cpu_mask[k], &flush_mask[k]);
	}
#endif

	/*
	 * Make the above memory operations globally visible before
	 * sending the IPI.
	 */
	smp_mb();
	/*
	 * We have to send the IPI only to
	 * CPUs affected.
	 */
	send_IPI_mask(flush_cpumask, INVALIDATE_TLB_VECTOR);

	while (!cpus_empty(flush_cpumask))
		/* nothing. lockup detection does not belong here */
		mb();

	flush_mm = NULL;
	flush_va = 0;
	spin_unlock(&tlbstate_lock);
}
Ejemplo n.º 19
0
/*
 * Called from irq_enter() when idle was interrupted to reenable the
 * per cpu device.
 */
void tick_check_oneshot_broadcast(int cpu)
{
	if (cpu_isset(cpu, tick_broadcast_oneshot_mask)) {
		struct tick_device *td = &per_cpu(tick_cpu_device, cpu);

		clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
	}
}
Ejemplo n.º 20
0
/*==========================================================================*
 * Name:         flush_tlb_others
 *
 * Description:  This routine requests other CPU to execute flush TLB.
 *               1.Setup parameters.
 *               2.Send 'INVALIDATE_TLB_IPI' to other CPU.
 *                 Request other CPU to execute 'smp_invalidate_interrupt()'.
 *               3.Wait for other CPUs operation finished.
 *
 * Born on Date: 2002.02.05
 *
 * Arguments:    cpumask - bitmap of target CPUs
 *               *mm -  a pointer to the mm struct for flush TLB
 *               *vma -  a pointer to the vma struct include va
 *               va - virtual address for flush TLB
 *
 * Returns:      void (cannot fail)
 *
 * Modification log:
 * Date       Who Description
 * ---------- --- --------------------------------------------------------
 *
 *==========================================================================*/
static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
	struct vm_area_struct *vma, unsigned long va)
{
	unsigned long *mask;
#ifdef DEBUG_SMP
	unsigned long flags;
	__save_flags(flags);
	if (!(flags & 0x0040))	/* Interrupt Disable NONONO */
		BUG();
#endif /* DEBUG_SMP */

	/*
	 * A couple of (to be removed) sanity checks:
	 *
	 * - we do not send IPIs to not-yet booted CPUs.
	 * - current CPU must not be in mask
	 * - mask must exist :)
	 */
	BUG_ON(cpus_empty(cpumask));

	BUG_ON(cpu_isset(smp_processor_id(), cpumask));
	BUG_ON(!mm);

	/* If a CPU which we ran on has gone down, OK. */
	cpus_and(cpumask, cpumask, cpu_online_map);
	if (cpus_empty(cpumask))
		return;

	/*
	 * i'm not happy about this global shared spinlock in the
	 * MM hot path, but we'll see how contended it is.
	 * Temporarily this turns IRQs off, so that lockups are
	 * detected by the NMI watchdog.
	 */
	spin_lock(&tlbstate_lock);

	flush_mm = mm;
	flush_vma = vma;
	flush_va = va;
	mask=cpus_addr(cpumask);
	atomic_set_mask(*mask, (atomic_t *)&flush_cpumask);

	/*
	 * We have to send the IPI only to
	 * CPUs affected.
	 */
	send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);

	while (!cpus_empty(flush_cpumask)) {
		/* nothing. lockup detection does not belong here */
		mb();
	}

	flush_mm = NULL;
	flush_vma = NULL;
	flush_va = 0;
	spin_unlock(&tlbstate_lock);
}
Ejemplo n.º 21
0
void crash_ipi_callback(struct pt_regs *regs)
{
	int cpu = smp_processor_id();

	if (!cpu_online(cpu))
		return;

	hard_irq_disable();
	if (!cpu_isset(cpu, cpus_in_crash))
		crash_save_cpu(regs, cpu);
	cpu_set(cpu, cpus_in_crash);

	/*
	 * Entered via soft-reset - could be the kdump
	 * process is invoked using soft-reset or user activated
	 * it if some CPU did not respond to an IPI.
	 * For soft-reset, the secondary CPU can enter this func
	 * twice. 1 - using IPI, and 2. soft-reset.
	 * Tell the kexec CPU that entered via soft-reset and ready
	 * to go down.
	 */
	if (cpu_isset(cpu, cpus_in_sr)) {
		cpu_clear(cpu, cpus_in_sr);
		atomic_inc(&enter_on_soft_reset);
	}

	/*
	 * Starting the kdump boot.
	 * This barrier is needed to make sure that all CPUs are stopped.
	 * If not, soft-reset will be invoked to bring other CPUs.
	 */
	while (!cpu_isset(crashing_cpu, cpus_in_crash))
		cpu_relax();

	if (ppc_md.kexec_cpu_down)
		ppc_md.kexec_cpu_down(1, 1);

#ifdef CONFIG_PPC64
	kexec_smp_wait();
#else
	for (;;);	/* FIXME */
#endif

	/* NOTREACHED */
}
/*
 * Powerstate information: The system enters/leaves a state, where
 * affected devices might stop.
 */
void tick_broadcast_on_off(unsigned long reason, int *oncpu)
{
	if (!cpu_isset(*oncpu, cpu_online_map))
		printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
		       "offline CPU #%d\n", *oncpu);
	else
		smp_call_function_single(*oncpu, tick_do_broadcast_on_off,
					 &reason, 1);
}
Ejemplo n.º 23
0
int cpu_up_check(unsigned int cpu)
{
	int rc = 0;

	if (local_cpu_hotplug_request()) {
		cpu_set(cpu, local_allowed_cpumask);
		if (!cpu_isset(cpu, xenbus_allowed_cpumask)) {
			printk("%s: attempt to bring up CPU %u disallowed by "
			       "remote admin.\n", __FUNCTION__, cpu);
			rc = -EBUSY;
		}
	} else if (!cpu_isset(cpu, local_allowed_cpumask) ||
		   !cpu_isset(cpu, xenbus_allowed_cpumask)) {
		rc = -EBUSY;
	}

	return rc;
}
Ejemplo n.º 24
0
static void *
c_start (struct seq_file *m, loff_t *pos)
{
#ifdef CONFIG_SMP
	while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map))
		++*pos;
#endif
	return *pos < NR_CPUS ? cpu_data(*pos) : NULL;
}
Ejemplo n.º 25
0
static inline void map_cpu_to_node(int cpu, int node)
{
	dbg("cpu %d maps to domain %d\n", cpu, node);
	numa_cpu_lookup_table[cpu] = node;
	if (!(cpu_isset(cpu, numa_cpumask_lookup_table[node]))) {
		cpu_set(cpu, numa_cpumask_lookup_table[node]);
		nr_cpus_in_node[node]++;
	}
}
Ejemplo n.º 26
0
int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
{
	clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);

	if(!cpus_empty(tick_broadcast_oneshot_mask))
		tick_broadcast_set_event(ktime_get(), 1);

	return cpu_isset(smp_processor_id(), tick_broadcast_oneshot_mask);
}
Ejemplo n.º 27
0
void switch_APIC_timer_to_ipi(void *cpumask)
{
	cpumask_t mask = *(cpumask_t *)cpumask;
	int cpu = smp_processor_id();

	if (cpu_isset(cpu, mask) &&
	    !cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask)) {
		disable_APIC_timer();
		cpu_set(cpu, timer_interrupt_broadcast_ipi_mask);
#ifdef CONFIG_HIGH_RES_TIMERS
		printk("Disabling NO_HZ and high resolution timers "
			"due to timer broadcasting\n");
		for_each_possible_cpu(cpu)
			per_cpu(lapic_events, cpu).features &=
				~CLOCK_EVT_FEAT_ONESHOT;
#endif
	}
}
Ejemplo n.º 28
0
static void __cpuinit topology_remove_dev(unsigned int cpu)
{
	struct sys_device *sys_dev = get_cpu_sysdev(cpu);

	if (!cpu_isset(cpu, topology_dev_map))
		return;
	cpu_clear(cpu, topology_dev_map);
	sysfs_remove_group(&sys_dev->kobj, &topology_attr_group);
}
void profile_tick(int type)
{
	struct pt_regs *regs = get_irq_regs();

	if (type == CPU_PROFILING && timer_hook)
		timer_hook(regs);
	if (!user_mode(regs) && cpu_isset(smp_processor_id(), prof_cpu_mask))
		profile_hit(type, (void *)profile_pc(regs));
}
Ejemplo n.º 30
0
int __devinit __cpu_up(unsigned int cpu_id)
{
	int timeout;

	cpu_set(cpu_id, smp_commenced_mask);

	/*
	 * Wait 5s total for a response
	 */
	for (timeout = 0; timeout < 5000; timeout++) {
		if (cpu_isset(cpu_id, cpu_online_map))
			break;
		udelay(1000);
	}
	if (!cpu_isset(cpu_id, cpu_online_map))
		BUG();

	return 0;
}