Exemple #1
0
/**
 * padata_do_parallel - padata parallelization function
 *
 * @pinst: padata instance
 * @padata: object to be parallelized
 * @cb_cpu: cpu the serialization callback function will run on,
 *          must be in the serial cpumask of padata(i.e. cpumask.cbcpu).
 *
 * The parallelization callback function will run with BHs off.
 * Note: Every object which is parallelized by padata_do_parallel
 * must be seen by padata_do_serial.
 */
int padata_do_parallel(struct padata_instance *pinst,
		       struct padata_priv *padata, int cb_cpu)
{
	int target_cpu, err;
	struct padata_parallel_queue *queue;
	struct parallel_data *pd;

	rcu_read_lock_bh();

	pd = rcu_dereference_bh(pinst->pd);

	err = -EINVAL;
	if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
		goto out;

	if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu))
		goto out;

	err =  -EBUSY;
	if ((pinst->flags & PADATA_RESET))
		goto out;

	if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM)
		goto out;

	err = 0;
	atomic_inc(&pd->refcnt);
	padata->pd = pd;
	padata->cb_cpu = cb_cpu;

	target_cpu = padata_cpu_hash(pd);
	padata->cpu = target_cpu;
	queue = per_cpu_ptr(pd->pqueue, target_cpu);

	spin_lock(&queue->parallel.lock);
	list_add_tail(&padata->list, &queue->parallel.list);
	spin_unlock(&queue->parallel.lock);

	queue_work_on(target_cpu, pinst->wq, &queue->work);

out:
	rcu_read_unlock_bh();

	return err;
}
/*
 * Check, if the new registered device should be used. Called with
 * clockevents_lock held and interrupts disabled.
 */
void tick_check_new_device(struct clock_event_device *newdev)
{
	struct clock_event_device *curdev;
	struct tick_device *td;
	int cpu;

	cpu = smp_processor_id();
	if (!cpumask_test_cpu(cpu, newdev->cpumask))
		goto out_bc;

	td = &per_cpu(tick_cpu_device, cpu);
	curdev = td->evtdev;

	/* cpu local device ? */
	if (!tick_check_percpu(curdev, newdev, cpu))
		goto out_bc;

	/* Preference decision */
	if (!tick_check_preferred(curdev, newdev))
		goto out_bc;

	if (!try_module_get(newdev->owner))
		return;

	/*
	 * Replace the eventually existing device by the new
	 * device. If the current device is the broadcast device, do
	 * not give it back to the clockevents layer !
	 */
	if (tick_is_broadcast_device(curdev)) {
		clockevents_shutdown(curdev);
		curdev = NULL;
	}
	clockevents_exchange_device(curdev, newdev);
	tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
	if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
		tick_oneshot_notify();
	return;

out_bc:
	/*
	 * Can the new device be used as a broadcast device ?
	 */
	tick_install_broadcast_device(newdev);
}
Exemple #3
0
/**
 * smp_startup_cpu() - start the given cpu
 *
 * At boot time, there is nothing to do for primary threads which were
 * started from Open Firmware.  For anything else, call RTAS with the
 * appropriate start location.
 *
 * Returns:
 *	0	- failure
 *	1	- success
 */
static inline int smp_startup_cpu(unsigned int lcpu)
{
	int status;
	unsigned long start_here =
			__pa(ppc_function_entry(generic_secondary_smp_init));
	unsigned int pcpu;
	int start_cpu;

	if (cpumask_test_cpu(lcpu, of_spin_mask))
		/* Already started by OF and sitting in spin loop */
		return 1;

	pcpu = get_hard_smp_processor_id(lcpu);

	/* Check to see if the CPU out of FW already for kexec */
	if (smp_query_cpu_stopped(pcpu) == QCSS_NOT_STOPPED){
		cpumask_set_cpu(lcpu, of_spin_mask);
		return 1;
	}

	/* Fixup atomic count: it exited inside IRQ handler. */
	task_thread_info(paca[lcpu].__current)->preempt_count	= 0;
#ifdef CONFIG_HOTPLUG_CPU
	if (get_cpu_current_state(lcpu) == CPU_STATE_INACTIVE)
		goto out;
#endif
	/* 
	 * If the RTAS start-cpu token does not exist then presume the
	 * cpu is already spinning.
	 */
	start_cpu = rtas_token("start-cpu");
	if (start_cpu == RTAS_UNKNOWN_SERVICE)
		return 1;

	status = rtas_call(start_cpu, 3, 1, NULL, pcpu, start_here, pcpu);
	if (status != 0) {
		printk(KERN_ERR "start-cpu failed: %i\n", status);
		return 0;
	}

#ifdef CONFIG_HOTPLUG_CPU
out:
#endif
	return 1;
}
Exemple #4
0
void irq_complete_move(unsigned irq)
{
	struct irq_cfg *cfg = &irq_cfg[irq];
	cpumask_t cleanup_mask;
	int i;

	if (likely(!cfg->move_in_progress))
		return;

	if (unlikely(cpumask_test_cpu(smp_processor_id(), &cfg->old_domain)))
		return;

	cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask);
	cfg->move_cleanup_count = cpumask_weight(&cleanup_mask);
	for_each_cpu(i, &cleanup_mask)
		platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
	cfg->move_in_progress = 0;
}
Exemple #5
0
static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
			      const struct cpumask *andmask)
{
	int cpu;

	/*
	 * We're using fixed IRQ delivery, can only return one phys APIC ID.
	 * May as well be the first.
	 */
	for_each_cpu_and(cpu, cpumask, andmask) {
		if (cpumask_test_cpu(cpu, cpu_online_mask))
			break;
	}
	if (cpu < nr_cpu_ids)
		return bigsmp_cpu_to_logical_apicid(cpu);

	return BAD_APICID;
}
Exemple #6
0
static void tick_do_broadcast(struct cpumask *mask)
{
	int cpu = smp_processor_id();
	struct tick_device *td;

	
	if (cpumask_test_cpu(cpu, mask)) {
		cpumask_clear_cpu(cpu, mask);
		td = &per_cpu(tick_cpu_device, cpu);
		td->evtdev->event_handler(td->evtdev);
	}

	if (!cpumask_empty(mask)) {
		
		td = &per_cpu(tick_cpu_device, cpumask_first(mask));
		td->evtdev->broadcast(mask);
	}
}
Exemple #7
0
int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
			    const struct cpumask *online_mask)
{
	unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
	cpumask_var_t cpus;

	if (!alloc_cpumask_var(&cpus, GFP_ATOMIC))
		return 1;

	cpumask_clear(cpus);
	nr_cpus = nr_uniq_cpus = 0;
	for_each_cpu(i, online_mask) {
		nr_cpus++;
		first_sibling = get_first_sibling(i);
		if (!cpumask_test_cpu(first_sibling, cpus))
			nr_uniq_cpus++;
		cpumask_set_cpu(i, cpus);
	}
/**
 * flush_tlb_others - Tell the specified CPUs to invalidate their TLBs
 * @cpumask: The list of CPUs to target.
 * @mm: The VM context to flush from (if va!=FLUSH_ALL).
 * @va: Virtual address to flush or FLUSH_ALL to flush everything.
 */
static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
			     unsigned long va)
{
	cpumask_t tmp;

	/* A couple of sanity checks (to be removed):
	 * - mask must not be empty
	 * - current CPU must not be in mask
	 * - we do not send IPIs to as-yet unbooted CPUs.
	 */
	BUG_ON(!mm);
	BUG_ON(cpumask_empty(&cpumask));
	BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask));

	cpumask_and(&tmp, &cpumask, cpu_online_mask);
	BUG_ON(!cpumask_equal(&cpumask, &tmp));

	/* I'm not happy about this global shared spinlock in the MM hot path,
	 * but we'll see how contended it is.
	 *
	 * Temporarily this turns IRQs off, so that lockups are detected by the
	 * NMI watchdog.
	 */
	spin_lock(&tlbstate_lock);

	flush_mm = mm;
	flush_va = va;
#if NR_CPUS <= BITS_PER_LONG
	atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]);
#else
#error Not supported.
#endif

	/* FIXME: if NR_CPUS>=3, change send_IPI_mask */
	smp_call_function(smp_flush_tlb, NULL, 1);

	while (!cpumask_empty(&flush_cpumask))
		/* Lockup detection does not belong here */
		smp_mb();

	flush_mm = NULL;
	flush_va = 0;
	spin_unlock(&tlbstate_lock);
}
Exemple #9
0
/*==========================================================================*
 * Name:         start_secondary
 *
 * Description:  This routine activate a secondary processor.
 *
 * Born on Date: 2002.02.05
 *
 * Arguments:    *unused - currently unused.
 *
 * Returns:      void (cannot fail)
 *
 * Modification log:
 * Date       Who Description
 * ---------- --- --------------------------------------------------------
 * 2003-06-24 hy  modify for linux-2.5.69
 *
 *==========================================================================*/
int __init start_secondary(void *unused)
{
	cpu_init();
	preempt_disable();
	smp_callin();
	while (!cpumask_test_cpu(smp_processor_id(), &smp_commenced_mask))
		cpu_relax();

	smp_online();

	/*
	 * low-memory mappings have been cleared, flush them from
	 * the local TLBs too.
	 */
	local_flush_tlb_all();

	cpu_idle();
	return 0;
}
void _intc_enable(struct irq_data *data, unsigned long handle)
{
	unsigned int irq = data->irq;
	struct intc_desc_int *d = get_intc_desc(irq);
	unsigned long addr;
	unsigned int cpu;

	for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
#ifdef CONFIG_SMP
		if (!cpumask_test_cpu(cpu, data->affinity))
			continue;
#endif
		addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
		intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
						    [_INTC_FN(handle)], irq);
	}

	intc_balancing_enable(irq);
}
/* Parse the boot-time nohz CPU list from the kernel parameters. */
static int __init tick_nohz_full_setup(char *str)
{
	int cpu;

	alloc_bootmem_cpumask_var(&nohz_full_mask);
	if (cpulist_parse(str, nohz_full_mask) < 0) {
		pr_warning("NOHZ: Incorrect nohz_full cpumask\n");
		return 1;
	}

	cpu = smp_processor_id();
	if (cpumask_test_cpu(cpu, nohz_full_mask)) {
		pr_warning("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", cpu);
		cpumask_clear_cpu(cpu, nohz_full_mask);
	}
	have_nohz_full_mask = true;

	return 1;
}
Exemple #12
0
asmlinkage
#endif
void smp_invalidate_interrupt(struct pt_regs *regs)
{
	unsigned int cpu;
	unsigned int sender;
	union smp_flush_state *f;

	cpu = smp_processor_id();
	/*
	 * orig_rax contains the negated interrupt vector.
	 * Use that to determine where the sender put the data.
	 */
	sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
	f = &flush_state[sender];

	if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask)))
		goto out;
		/*
		 * This was a BUG() but until someone can quote me the
		 * line from the intel manual that guarantees an IPI to
		 * multiple CPUs is retried _only_ on the erroring CPUs
		 * its staying as a return
		 *
		 * BUG();
		 */

	if (f->flush_mm == percpu_read(cpu_tlbstate.active_mm)) {
		if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
			if (f->flush_va == TLB_FLUSH_ALL)
				local_flush_tlb();
			else
				__flush_tlb_one(f->flush_va);
		} else
			leave_mm(cpu);
	}
out:
	ack_APIC_irq();
	smp_mb__before_clear_bit();
	cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask));
	smp_mb__after_clear_bit();
	inc_irq_stat(irq_tlb_count);
}
Exemple #13
0
/**
 * Xen scheduler callback function to select a CPU for the VCPU to run on
 *
 * @param ops       Pointer to this instance of the scheduler structure
 * @param v         Pointer to the VCPU structure for the current domain
 *
 * @return          Number of selected physical CPU
 */
static int
a653sched_pick_cpu(const struct scheduler *ops, struct vcpu *vc)
{
    cpumask_t *online;
    unsigned int cpu;

    /* 
     * If present, prefer vc's current processor, else
     * just find the first valid vcpu .
     */
    online = cpupool_scheduler_cpumask(vc->domain->cpupool);

    cpu = cpumask_first(online);

    if ( cpumask_test_cpu(vc->processor, online)
         || (cpu >= nr_cpu_ids) )
        cpu = vc->processor;

    return cpu;
}
Exemple #14
0
void flush_area_mask(const cpumask_t *mask, const void *va, unsigned int flags)
{
    ASSERT(local_irq_is_enabled());

    if ( cpumask_test_cpu(smp_processor_id(), mask) )
        flush_area_local(va, flags);

    if ( !cpumask_subset(mask, cpumask_of(smp_processor_id())) )
    {
        spin_lock(&flush_lock);
        cpumask_and(&flush_cpumask, mask, &cpu_online_map);
        cpumask_clear_cpu(smp_processor_id(), &flush_cpumask);
        flush_va      = va;
        flush_flags   = flags;
        send_IPI_mask(&flush_cpumask, INVALIDATE_TLB_VECTOR);
        while ( !cpumask_empty(&flush_cpumask) )
            cpu_relax();
        spin_unlock(&flush_lock);
    }
}
Exemple #15
0
void leave_mm(int cpu)
{
	struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);

	/*
	 * It's plausible that we're in lazy TLB mode while our mm is init_mm.
	 * If so, our callers still expect us to flush the TLB, but there
	 * aren't any user TLB entries in init_mm to worry about.
	 *
	 * This needs to happen before any other sanity checks due to
	 * intel_idle's shenanigans.
	 */
	if (loaded_mm == &init_mm)
		return;

	/* Warn if we're not lazy. */
	WARN_ON(cpumask_test_cpu(smp_processor_id(), mm_cpumask(loaded_mm)));

	switch_mm(NULL, &init_mm, NULL);
}
Exemple #16
0
static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
			    struct msr *msrs,
			    void (*msr_func) (void *info))
{
	struct msr_info rv;
	int this_cpu;

	memset(&rv, 0, sizeof(rv));

	rv.msrs	  = msrs;
	rv.msr_no = msr_no;

	this_cpu = get_cpu();

	if (cpumask_test_cpu(this_cpu, mask))
		msr_func(&rv);

	smp_call_function_many(mask, msr_func, &rv, 1);
	put_cpu();
}
Exemple #17
0
static int __kprobes
arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs)
{
	int cpu;

	cpu = smp_processor_id();

	if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
		static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;

		arch_spin_lock(&lock);
		printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
		show_regs(regs);
		arch_spin_unlock(&lock);
		cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
		return NMI_HANDLED;
	}

	return NMI_DONE;
}
/* This is the actual function which stops the CPU. It runs
 * in the context of a dedicated stopmachine workqueue. */
static void stop_cpu(struct work_struct *unused)
{
	enum stopmachine_state curstate = STOPMACHINE_NONE;
	struct stop_machine_data *smdata = &idle;
	int cpu = smp_processor_id();
	int err;

	if (!active_cpus) {
		if (cpu == cpumask_first(cpu_online_mask))
			smdata = &active;
	} else {
		if (cpumask_test_cpu(cpu, active_cpus))
			smdata = &active;
	}
	/* Simple state machine */
	do {
		/* Chill out and ensure we re-read stopmachine_state. */
		cpu_relax();
		if (state != curstate) {
			curstate = state;
			switch (curstate) {
			case STOPMACHINE_DISABLE_IRQ:
				local_irq_disable();
				hard_irq_disable();
				break;
			case STOPMACHINE_RUN:
				/* On multiple CPUs only a single error code
				 * is needed to tell that something failed. */
				err = smdata->fn(smdata->data);
				if (err)
					smdata->fnret = err;
				break;
			default:
				break;
			}
			ack_state();
		}
	} while (curstate != STOPMACHINE_EXIT);

	local_irq_enable();
}
/*
 * cpudl_find - find the best (later-dl) CPU in the system
 * @cp: the cpudl max-heap context
 * @p: the task
 * @later_mask: a mask to fill in with the selected CPUs (or NULL)
 *
 * Returns: int - best CPU (heap maximum if suitable)
 */
int cpudl_find(struct cpudl *cp, struct task_struct *p,
	       struct cpumask *later_mask)
{
	int best_cpu = -1;
	const struct sched_dl_entity *dl_se = &p->dl;

	if (later_mask && cpumask_and(later_mask, later_mask, cp->free_cpus)) {
		best_cpu = cpumask_any(later_mask);
		goto out;
	} else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) &&
			dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
		best_cpu = cpudl_maximum(cp);
		if (later_mask)
			cpumask_set_cpu(best_cpu, later_mask);
	}

out:
	WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));

	return best_cpu;
}
/**
 * cpufreq_thermal_notifier - notifier callback for cpufreq policy change.
 * @nb:	struct notifier_block * with callback info.
 * @event: value showing cpufreq event for which this function invoked.
 * @data: callback-specific data
 *
 * Callback to highjack the notification on cpufreq policy transition.
 * Every time there is a change in policy, we will intercept and
 * update the cpufreq policy with thermal constraints.
 *
 * Return: 0 (success)
 */
static int cpufreq_thermal_notifier(struct notifier_block *nb,
				    unsigned long event, void *data)
{
	struct cpufreq_policy *policy = data;
	unsigned long max_freq = 0;

	if (event != CPUFREQ_ADJUST || notify_device == NOTIFY_INVALID)
		return 0;

	if (cpumask_test_cpu(policy->cpu, &notify_device->allowed_cpus))
		max_freq = notify_device->cpufreq_val;

	/* Never exceed user_policy.max */
	if (max_freq > policy->user_policy.max)
		max_freq = policy->user_policy.max;

	if (policy->max != max_freq)
		cpufreq_verify_within_limits(policy, 0, max_freq);

	return 0;
}
Exemple #21
0
bool nmi_cpu_backtrace(struct pt_regs *regs)
{
	int cpu = smp_processor_id();

	if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
		if (regs && cpu_in_idle(instruction_pointer(regs))) {
			pr_warn("NMI backtrace for cpu %d skipped: idling at pc %#lx\n",
				cpu, instruction_pointer(regs));
		} else {
			pr_warn("NMI backtrace for cpu %d\n", cpu);
			if (regs)
				show_regs(regs);
			else
				dump_stack();
		}
		cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
		return true;
	}

	return false;
}
/*
 * Broadcast the event to the cpus, which are set in the mask (mangled).
 */
static bool tick_do_broadcast(struct cpumask *mask)
{
	int cpu = smp_processor_id();
	struct tick_device *td;
	bool local = false;

	/*
	 * Check, if the current cpu is in the mask
	 */
	if (cpumask_test_cpu(cpu, mask)) {
		struct clock_event_device *bc = tick_broadcast_device.evtdev;

		cpumask_clear_cpu(cpu, mask);
		/*
		 * We only run the local handler, if the broadcast
		 * device is not hrtimer based. Otherwise we run into
		 * a hrtimer recursion.
		 *
		 * local timer_interrupt()
		 *   local_handler()
		 *     expire_hrtimers()
		 *       bc_handler()
		 *         local_handler()
		 *	     expire_hrtimers()
		 */
		local = !(bc->features & CLOCK_EVT_FEAT_HRTIMER);
	}

	if (!cpumask_empty(mask)) {
		/*
		 * It might be necessary to actually check whether the devices
		 * have different broadcast functions. For now, just use the
		 * one of the first device. This works as long as we have this
		 * misfeature only on x86 (lapic)
		 */
		td = &per_cpu(tick_cpu_device, cpumask_first(mask));
		td->evtdev->broadcast(mask);
	}
	return local;
}
Exemple #23
0
static void unmask_cic_irq(struct irq_data *d)
{
	volatile u32   *cic_msk_reg = CIC_VPE0_MSK_REG;
	int vpe;
#ifdef CONFIG_SMP
	unsigned int mtflags;
	unsigned long  flags;

	/*
	* Make sure we have IRQ affinity.  It may have changed while
	* we were processing the IRQ.
	*/
	if (!cpumask_test_cpu(smp_processor_id(), d->affinity))
		return;
#endif

	vpe = get_current_vpe();
	LOCK_VPE(flags, mtflags);
	cic_msk_reg[vpe] |= (1 << (d->irq - MSP_CIC_INTBASE));
	UNLOCK_VPE(flags, mtflags);
	cic_wmb();
}
Exemple #24
0
static u32 get_cur_val(const cpumask_t *mask)
{
    struct cpufreq_policy *policy;
    struct processor_performance *perf;
    struct drv_cmd cmd;
    unsigned int cpu = smp_processor_id();

    if (unlikely(cpumask_empty(mask)))
        return 0;

    if (!cpumask_test_cpu(cpu, mask))
        cpu = cpumask_first(mask);
    if (cpu >= nr_cpu_ids || !cpu_online(cpu))
        return 0;

    policy = per_cpu(cpufreq_cpu_policy, cpu);
    if (!policy || !cpufreq_drv_data[policy->cpu])
        return 0;    

    switch (cpufreq_drv_data[policy->cpu]->arch_cpu_flags) {
    case SYSTEM_INTEL_MSR_CAPABLE:
        cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
        cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
        break;
    case SYSTEM_IO_CAPABLE:
        cmd.type = SYSTEM_IO_CAPABLE;
        perf = cpufreq_drv_data[policy->cpu]->acpi_data;
        cmd.addr.io.port = perf->control_register.address;
        cmd.addr.io.bit_width = perf->control_register.bit_width;
        break;
    default:
        return 0;
    }

    cmd.mask = cpumask_of(cpu);

    drv_read(&cmd);
    return cmd.val;
}
Exemple #25
0
/**
 * ipi_get_hwirq - Get the hwirq associated with an IPI to a cpu
 * @irq:	linux irq number
 * @cpu:	the target cpu
 *
 * When dealing with coprocessors IPI, we need to inform the coprocessor of
 * the hwirq it needs to use to receive and send IPIs.
 *
 * Returns hwirq value on success and INVALID_HWIRQ on failure.
 */
irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu)
{
	struct irq_data *data = irq_get_irq_data(irq);
	struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL;

	if (!data || !ipimask || cpu > nr_cpu_ids)
		return INVALID_HWIRQ;

	if (!cpumask_test_cpu(cpu, ipimask))
		return INVALID_HWIRQ;

	/*
	 * Get the real hardware irq number if the underlying implementation
	 * uses a seperate irq per cpu. If the underlying implementation uses
	 * a single hardware irq for all cpus then the IPI send mechanism
	 * needs to take care of the cpu destinations.
	 */
	if (irq_domain_is_ipi_per_cpu(data->domain))
		data = irq_get_irq_data(irq + cpu - data->common->ipi_offset);

	return data ? irqd_to_hwirq(data) : INVALID_HWIRQ;
}
static bool migrate_one_irq(struct irq_desc *desc)
{
	struct irq_data *d = irq_desc_get_irq_data(desc);
	const struct cpumask *affinity = d->affinity;
	bool ret = false;

	/*
	 * If this is a per-CPU interrupt, or the affinity does not
	 * include this CPU, then we have nothing to do.
	 */
	if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
		return false;

	if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
 		affinity = cpu_online_mask;
		ret = true;
	}

	__irq_set_affinity_locked(d, affinity);
 
	return ret;
}
static void cpufreq_idle(void)
{
	struct timer_list *t;
	u64 *cpu_time_in_idle;
	u64 *cpu_idle_exit_time;

	pm_idle_old();

	if (!cpumask_test_cpu(smp_processor_id(), policy->cpus))
			return;

	/* Timer to fire in 1-2 ticks, jiffie aligned. */
	t = &per_cpu(cpu_timer, smp_processor_id());
	cpu_idle_exit_time = &per_cpu(idle_exit_time, smp_processor_id());
	cpu_time_in_idle = &per_cpu(time_in_idle, smp_processor_id());

	if (timer_pending(t) == 0) {
		*cpu_time_in_idle = get_cpu_idle_time_us(
				smp_processor_id(), cpu_idle_exit_time);
		mod_timer(t, jiffies + 2);
	}
}
Exemple #28
0
static void __cpuinit jzsoc_boot_secondary(int cpu, struct task_struct *idle)
{
	int err;
	unsigned long flags,ctrl;


	printk("jzsoc_boot_secondary start\n");
	local_irq_save(flags);

	/* set reset bit! */
	ctrl = get_smp_ctrl();
	ctrl |= (1 << cpu);
	set_smp_ctrl(ctrl);

	/* blast all cache before booting secondary cpu */
	blast_dcache_jz();
	blast_icache_jz();

	cpm_clear_bit(15,CPM_CLKGR1);
	cpm_pwc_enable(scpu_pwc);
	preempt_disable();
	preempt_enable_no_resched();

	/* clear reset bit! */
	ctrl = get_smp_ctrl();
	ctrl &= ~(1 << cpu);
	set_smp_ctrl(ctrl);
wait:
	if (!cpumask_test_cpu(cpu, cpu_ready))
		goto wait;
	pr_info("[SMP] Booting CPU%d ...\n", cpu);
//	pr_debug("[SMP] Booting CPU%d ...\n", cpu);
	err = cpu_boot(cpu_logical_map(cpu), __KSTK_TOS(idle),
			(unsigned long)task_thread_info(idle));
	if (err != 0)
		pr_err("start_cpu(%i) returned %i\n" , cpu, err);
	local_irq_restore(flags);
}
Exemple #29
0
static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
{
	struct nlm_soc_info *nodep;
	uint64_t syspcibase;
	uint32_t syscoremask;
	int core, n, cpu;

	for (n = 0; n < NLM_NR_NODES; n++) {
		syspcibase = nlm_get_sys_pcibase(n);
		if (nlm_read_reg(syspcibase, 0) == 0xffffffff)
			break;

		/* read cores in reset from SYS and account for boot cpu */
		nlm_node_init(n);
		nodep = nlm_get_node(n);
		syscoremask = nlm_read_sys_reg(nodep->sysbase, SYS_CPU_RESET);
		if (n == 0)
			syscoremask |= 1;

		for (core = 0; core < NLM_CORES_PER_NODE; core++) {
			/* see if the core exists */
			if ((syscoremask & (1 << core)) == 0)
				continue;

			/* see if at least the first thread is enabled */
			cpu = (n * NLM_CORES_PER_NODE + core)
						* NLM_THREADS_PER_CORE;
			if (!cpumask_test_cpu(cpu, wakeup_mask))
				continue;

			/* wake up the core */
			if (xlp_wakeup_core(nodep->sysbase, core))
				nodep->coremask |= 1u << core;
			else
				pr_err("Failed to enable core %d\n", core);
		}
	}
}
Exemple #30
0
/*
 * AMD Erratum 400 aware idle routine. We check for C1E active in the interrupt
 * pending message MSR. If we detect C1E, then we handle it the same
 * way as C3 power states (local apic timer and TSC stop)
 */
static void amd_e400_idle(void)
{
	if (!amd_e400_c1e_detected) {
		u32 lo, hi;

		rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);

		if (lo & K8_INTP_C1E_ACTIVE_MASK) {
			amd_e400_c1e_detected = true;
			if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
				mark_tsc_unstable("TSC halt in AMD C1E");
			pr_info("System has AMD C1E enabled\n");
		}
	}

	if (amd_e400_c1e_detected) {
		int cpu = smp_processor_id();

		if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) {
			cpumask_set_cpu(cpu, amd_e400_c1e_mask);
			/* Force broadcast so ACPI can not interfere. */
			tick_broadcast_force();
			pr_info("Switch to broadcast mode on CPU%d\n", cpu);
		}
		tick_broadcast_enter();

		default_idle();

		/*
		 * The switch back from broadcast mode needs to be
		 * called with interrupts disabled.
		 */
		local_irq_disable();
		tick_broadcast_exit();
		local_irq_enable();
	} else
		default_idle();
}