示例#1
0
static u32 get_cur_val(cpumask_t mask)
{
    struct processor_performance *perf;
    struct drv_cmd cmd;

    if (unlikely(cpus_empty(mask)))
        return 0;

    switch (drv_data[first_cpu(mask)]->cpu_feature) {
    case SYSTEM_INTEL_MSR_CAPABLE:
        cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
        cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
        break;
    case SYSTEM_IO_CAPABLE:
        cmd.type = SYSTEM_IO_CAPABLE;
        perf = drv_data[first_cpu(mask)]->acpi_data;
        cmd.addr.io.port = perf->control_register.address;
        cmd.addr.io.bit_width = perf->control_register.bit_width;
        break;
    default:
        return 0;
    }

    cmd.mask = mask;

    drv_read(&cmd);
    return cmd.val;
}
static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
			    bool force)
{
	unsigned int irq = d->irq - _irqbase;
	cpumask_t	tmp = CPU_MASK_NONE;
	unsigned long	flags;
	int		i;

	pr_debug("%s(%d) called\n", __func__, irq);
	cpumask_and(&tmp, cpumask, cpu_online_mask);
	if (cpus_empty(tmp))
		return -1;

	/* Assumption : cpumask refers to a single CPU */
	spin_lock_irqsave(&gic_lock, flags);
	for (;;) {
		/* Re-route this IRQ */
		GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp));

		/* Update the pcpu_masks */
		for (i = 0; i < NR_CPUS; i++)
			clear_bit(irq, pcpu_masks[i].pcpu_mask);
		set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);

	}
	cpumask_copy(d->affinity, cpumask);
	spin_unlock_irqrestore(&gic_lock, flags);

	return IRQ_SET_MASK_OK_NOCOPY;
}
示例#3
0
static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
{
	cpumask_t	tmp = CPU_MASK_NONE;
	unsigned long	flags;
	int		i;

	irq -= _irqbase;
	pr_debug("%s(%d) called\n", __func__, irq);
	cpumask_and(&tmp, cpumask, cpu_online_mask);
	if (cpus_empty(tmp))
		return -1;

	/* Assumption : cpumask refers to a single CPU */
	spin_lock_irqsave(&gic_lock, flags);
#ifndef CONFIG_RALINK_SOC
	for (;;) {
#endif
		/* Re-route this IRQ */
		GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp));

		/* Update the pcpu_masks */
		for (i = 0; i < NR_CPUS; i++)
			clear_bit(irq, pcpu_masks[i].pcpu_mask);
		set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
#ifndef CONFIG_RALINK_SOC
	}
#endif
	cpumask_copy(irq_desc[irq].affinity, cpumask);
	spin_unlock_irqrestore(&gic_lock, flags);

	return 0;
}
示例#4
0
static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
{
	struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
	nasid_t nasid;
	int slice;

	nasid = cpuid_to_nasid(first_cpu(mask));
	slice = cpuid_to_slice(first_cpu(mask));

	list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
				 sn_irq_lh[irq], list)
		(void)sn_retarget_vector(sn_irq_info, nasid, slice);
}
static int ia64_set_msi_irq_affinity(struct irq_data *idata,
				     const cpumask_t *cpu_mask, bool force)
{
	struct msi_msg msg;
	u32 addr, data;
	int cpu = first_cpu(*cpu_mask);
	unsigned int irq = idata->irq;

	if (!cpu_online(cpu))
		return -1;

	if (irq_prepare_move(irq, cpu))
		return -1;

	get_cached_msi_msg(irq, &msg);

	addr = msg.address_lo;
	addr &= MSI_ADDR_DEST_ID_MASK;
	addr |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
	msg.address_lo = addr;

	data = msg.data;
	data &= MSI_DATA_VECTOR_MASK;
	data |= MSI_DATA_VECTOR(irq_to_vector(irq));
	msg.data = data;

	write_msi_msg(irq, &msg);
	cpumask_copy(idata->affinity, cpumask_of(cpu));

	return 0;
}
示例#6
0
/*
 * Shutdown an event device on a given cpu:
 *
 * This is called on a life CPU, when a CPU is dead. So we cannot
 * access the hardware device itself.
 * We just set the mode and remove it from the lists.
 */
static void tick_shutdown(unsigned int *cpup)
{
	struct tick_device *td = &per_cpu(tick_cpu_device, *cpup);
	struct clock_event_device *dev = td->evtdev;
	unsigned long flags;

	spin_lock_irqsave(&tick_device_lock, flags);
	td->mode = TICKDEV_MODE_PERIODIC;
	if (dev) {
		/*
		 * Prevent that the clock events layer tries to call
		 * the set mode function!
		 */
		dev->mode = CLOCK_EVT_MODE_UNUSED;
		clockevents_exchange_device(dev, NULL);
		td->evtdev = NULL;
	}
	/* Transfer the do_timer job away from this cpu */
	if (*cpup == tick_do_timer_cpu) {
		int cpu = first_cpu(cpu_online_map);

		tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : -1;
	}
	spin_unlock_irqrestore(&tick_device_lock, flags);
}
示例#7
0
static void rtas_event_scan(struct work_struct *w)
{
	unsigned int cpu;

	do_event_scan();

	get_online_cpus();

	cpu = next_cpu(smp_processor_id(), cpu_online_map);
	if (cpu == NR_CPUS) {
		cpu = first_cpu(cpu_online_map);

		if (first_pass) {
			first_pass = 0;
			event_scan_delay = 30*HZ/rtas_event_scan_rate;

			if (surveillance_timeout != -1) {
				pr_debug("rtasd: enabling surveillance\n");
				enable_surveillance(surveillance_timeout);
				pr_debug("rtasd: surveillance enabled\n");
			}
		}
	}

	schedule_delayed_work_on(cpu, &event_scan_work,
		__round_jiffies_relative(event_scan_delay, cpu));

	put_online_cpus();
}
/*
 * Broadcast the event to the cpus, which are set in the mask
 */
static void tick_do_broadcast(cpumask_t mask)
{
	int cpu = smp_processor_id();
	struct tick_device *td;

	/*
	 * Check, if the current cpu is in the mask
	 */
	if (cpu_isset(cpu, mask)) {
		cpu_clear(cpu, mask);
		td = &per_cpu(tick_cpu_device, cpu);
		td->evtdev->event_handler(td->evtdev);
	}

	if (!cpus_empty(mask)) {
		/*
		 * It might be necessary to actually check whether the devices
		 * have different broadcast functions. For now, just use the
		 * one of the first device. This works as long as we have this
		 * misfeature only on x86 (lapic)
		 */
		cpu = first_cpu(mask);
		td = &per_cpu(tick_cpu_device, cpu);
		td->evtdev->broadcast(mask);
	}
}
示例#9
0
static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
				   unsigned long count, void *data)
{
	unsigned int irq = (int)(long)data, full_count = count, err;
	cpumask_t new_value, tmp;

	if (!irq_desc[irq].handler->set_affinity || no_irq_affinity)
		return -EIO;

	err = cpumask_parse(buffer, count, new_value);
	if (err)
		return err;

	/*
	 * Do not allow disabling IRQs completely - it's a too easy
	 * way to make the system unusable accidentally :-) At least
	 * one online CPU still has to be targeted.
	 */
	cpus_and(tmp, new_value, cpu_online_map);
	if (cpus_empty(tmp))
		return -EINVAL;

	irq_affinity[irq] = new_value;
	irq_desc[irq].handler->set_affinity(irq,
					cpumask_of_cpu(first_cpu(new_value)));

	return full_count;
}
示例#10
0
static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
{
	struct msi_msg msg;
	u32 addr, data;
	int cpu = first_cpu(cpu_mask);

	if (!cpu_online(cpu))
		return;

	if (irq_prepare_move(irq, cpu))
		return;

	read_msi_msg(irq, &msg);

	addr = msg.address_lo;
	addr &= MSI_ADDR_DESTID_MASK;
	addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
	msg.address_lo = addr;

	data = msg.data;
	data &= MSI_DATA_VECTOR_MASK;
	data |= MSI_DATA_VECTOR(irq_to_vector(irq));
	msg.data = data;

	write_msi_msg(irq, &msg);
	irq_desc[irq].affinity = cpumask_of_cpu(cpu);
}
示例#11
0
文件: msi_ia64.c 项目: ivucica/linux
int ia64_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
{
	struct msi_msg	msg;
	unsigned long	dest_phys_id;
	unsigned int	vector;

	dest_phys_id = cpu_physical_id(first_cpu(cpu_online_map));
	vector = irq;

	msg.address_hi = 0;
	msg.address_lo =
		MSI_ADDR_HEADER |
		MSI_ADDR_DESTMODE_PHYS |
		MSI_ADDR_REDIRECTION_CPU |
		MSI_ADDR_DESTID_CPU(dest_phys_id);

	msg.data =
		MSI_DATA_TRIGGER_EDGE |
		MSI_DATA_LEVEL_ASSERT |
		MSI_DATA_DELIVERY_FIXED |
		MSI_DATA_VECTOR(vector);

	write_msi_msg(irq, &msg);
	set_irq_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq);

	return 0;
}
示例#12
0
int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
{
	struct msi_msg	msg;
	unsigned long	dest_phys_id;
	int	irq, vector;
	cpumask_t mask;

	irq = create_irq();
	if (irq < 0)
		return irq;

	set_irq_msi(irq, desc);
	cpus_and(mask, irq_to_domain(irq), cpu_online_map);
	dest_phys_id = cpu_physical_id(first_cpu(mask));
	vector = irq_to_vector(irq);

	msg.address_hi = 0;
	msg.address_lo =
		MSI_ADDR_HEADER |
		MSI_ADDR_DESTMODE_PHYS |
		MSI_ADDR_REDIRECTION_CPU |
		MSI_ADDR_DESTID_CPU(dest_phys_id);

	msg.data =
		MSI_DATA_TRIGGER_EDGE |
		MSI_DATA_LEVEL_ASSERT |
		MSI_DATA_DELIVERY_FIXED |
		MSI_DATA_VECTOR(vector);

	write_msi_msg(irq, &msg);
	set_irq_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq);

	return 0;
}
示例#13
0
文件: xics.c 项目: matgnt/linux-2.6
static int get_irq_server(unsigned int virq, cpumask_t cpumask,
			  unsigned int strict_check)
{
	int server;
	/* For the moment only implement delivery to all cpus or one cpu */
	cpumask_t tmp = CPU_MASK_NONE;

	if (!distribute_irqs)
		return default_server;

	if (!cpus_equal(cpumask, CPU_MASK_ALL)) {
		cpus_and(tmp, cpu_online_map, cpumask);

		server = first_cpu(tmp);

		if (server < NR_CPUS)
			return get_hard_smp_processor_id(server);

		if (strict_check)
			return -1;
	}

	if (cpus_equal(cpu_online_map, cpu_present_map))
		return default_distrib_server;

	return default_server;
}
示例#14
0
/* this assigns a "stagger" to the current CPU, which is used throughout
   the code in this module as an extra array offset, to select the "even"
   or "odd" part of all the divided resources. */
static unsigned int get_stagger(void)
{
#ifdef CONFIG_SMP
	int cpu = smp_processor_id();
	return (cpu != first_cpu(cpu_sibling_map[cpu]));
#endif	
	return 0;
}
示例#15
0
static unsigned int next_bind_cpu(cpumask_t map)
{
	static unsigned int bind_cpu;
	bind_cpu = next_cpu(bind_cpu, map);
	if (bind_cpu >= NR_CPUS)
		bind_cpu = first_cpu(map);
	return bind_cpu;
}
示例#16
0
文件: smp.c 项目: ColinIanKing/m576
void flush_all_cpu_caches(void)
{
        unsigned int cpu, cluster, target_cpu;

	preempt_disable();
	cpu = smp_processor_id();
	cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);

	if (!cluster)
		target_cpu = first_cpu(hmp_slow_cpu_mask);
	else
		target_cpu = first_cpu(hmp_fast_cpu_mask);

	smp_call_function(flush_all_cpu_cache, NULL, 1);
	smp_call_function_single(target_cpu, flush_all_cluster_cache, NULL, 1);
	flush_cache_all();

	preempt_enable();
}
示例#17
0
static void *c_start(struct seq_file *m, loff_t *pos)
{
	if (*pos == 0)	/* just in case, cpu 0 is not the first */
		*pos = first_cpu(cpu_online_map);
	else
		*pos = next_cpu_nr(*pos - 1, cpu_online_map);
	if ((*pos) < nr_cpu_ids)
		return &cpu_data(*pos);
	return NULL;
}
示例#18
0
static void start_event_scan(void)
{
	printk(KERN_DEBUG "RTAS daemon started\n");
	pr_debug("rtasd: will sleep for %d milliseconds\n",
		 (30000 / rtas_event_scan_rate));

	/* Retreive errors from nvram if any */
	retreive_nvram_error_log();

	schedule_delayed_work_on(first_cpu(cpu_online_map), &event_scan_work,
				 event_scan_delay);
}
示例#19
0
int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest)
{
	int cpu_dest;

	
	if (irqd_is_per_cpu(d))
		return -EINVAL;

	
	cpu_dest = first_cpu(*dest);

	return cpu_dest;
}
示例#20
0
文件: irq.c 项目: 4oh4ed/linux
int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest)
{
	int cpu_dest;

	/* timer and ipi have to always be received on all CPUs */
	if (irqd_is_per_cpu(d))
		return -EINVAL;

	/* whatever mask they set, we just allow one CPU */
	cpu_dest = first_cpu(*dest);

	return cpu_dest;
}
示例#21
0
static void __cpuinit MP_processor_info(struct mpc_config_processor *m)
{
	int cpu;
	cpumask_t tmp_map;
	char *bootup_cpu = "";

	if (!(m->mpc_cpuflag & CPU_ENABLED)) {
		disabled_cpus++;
		return;
	}
	if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
		bootup_cpu = " (Bootup-CPU)";
		boot_cpu_id = m->mpc_apicid;
	}

	printk(KERN_INFO "Processor #%d%s\n", m->mpc_apicid, bootup_cpu);

	if (num_processors >= NR_CPUS) {
		printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
			" Processor ignored.\n", NR_CPUS);
		return;
	}

	num_processors++;
	cpus_complement(tmp_map, cpu_present_map);
	cpu = first_cpu(tmp_map);

	physid_set(m->mpc_apicid, phys_cpu_present_map);
 	if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
 		/*
		 * x86_bios_cpu_apicid is required to have processors listed
 		 * in same order as logical cpu numbers. Hence the first
 		 * entry is BSP, and so on.
 		 */
		cpu = 0;
 	}
	/* are we being called early in kernel startup? */
	if (x86_cpu_to_apicid_early_ptr) {
		u16 *cpu_to_apicid = x86_cpu_to_apicid_early_ptr;
		u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr;

		cpu_to_apicid[cpu] = m->mpc_apicid;
		bios_cpu_apicid[cpu] = m->mpc_apicid;
	} else {
		per_cpu(x86_cpu_to_apicid, cpu) = m->mpc_apicid;
		per_cpu(x86_bios_cpu_apicid, cpu) = m->mpc_apicid;
	}

	cpu_set(cpu, cpu_possible_map);
	cpu_set(cpu, cpu_present_map);
}
示例#22
0
int
acpi_map_lsapic(acpi_handle handle, int *pcpu)
{
	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
	union acpi_object *obj;
	struct acpi_table_lsapic *lsapic;
	cpumask_t tmp_map;
	long physid;
	int cpu;
 
	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
		return -EINVAL;

	if (!buffer.length ||  !buffer.pointer)
		return -EINVAL;
 
	obj = buffer.pointer;
	if (obj->type != ACPI_TYPE_BUFFER ||
	    obj->buffer.length < sizeof(*lsapic)) {
		acpi_os_free(buffer.pointer);
		return -EINVAL;
	}

	lsapic = (struct acpi_table_lsapic *)obj->buffer.pointer;

	if ((lsapic->header.type != ACPI_MADT_LSAPIC) ||
	    (!lsapic->flags.enabled)) {
		acpi_os_free(buffer.pointer);
		return -EINVAL;
	}

	physid = ((lsapic->id <<8) | (lsapic->eid));

	acpi_os_free(buffer.pointer);
	buffer.length = ACPI_ALLOCATE_BUFFER;
	buffer.pointer = NULL;

	cpus_complement(tmp_map, cpu_present_map);
	cpu = first_cpu(tmp_map);
	if(cpu >= NR_CPUS)
		return -EINVAL;

	acpi_map_cpu2node(handle, cpu, physid);

 	cpu_set(cpu, cpu_present_map);
	ia64_cpu_to_sapicid[cpu] = physid;
	ia64_acpiid_to_sapicid[lsapic->acpi_id] = ia64_cpu_to_sapicid[cpu];

	*pcpu = cpu;
	return(0);
}
示例#23
0
static void gic_set_cpu(unsigned int irq, cpumask_t mask_val)
{
    void __iomem *reg = gic_dist_base + GIC_DIST_TARGET + (irq & ~3);
    unsigned int shift = (irq % 4) * 8;
    unsigned int cpu = first_cpu(mask_val);
    u32 val;

    spin_lock(&irq_controller_lock);
    irq_desc[irq].cpu = cpu;
    val = readl(reg) & ~(0xff << shift);
    val |= 1 << (cpu + shift);
    writel(val, reg);
    spin_unlock(&irq_controller_lock);
}
示例#24
0
文件: irq.c 项目: ryos36/xen-arm
void gic_set_cpu(unsigned int irq, cpumask_t mask_val)
{
        void *reg = gic_dist_base(irq) + ICDIPTR + (gic_irq(irq) & ~3);
        unsigned int shift = (irq % 4) * 8;
        unsigned int cpu = first_cpu(mask_val);
        u32 val;

        spin_lock(&irq_controller_lock);
        irq_desc[irq].cpu = cpu;
        val = mmio_readl(reg) & ~(0xff << shift);
        val |= 1 << (cpu + shift);
        mmio_writel(val, reg);
        spin_unlock(&irq_controller_lock);
}
示例#25
0
文件: irq.c 项目: danhamilt1/linux
/* ONLY called from entry.S:intr_extint() */
void do_cpu_irq_mask(struct pt_regs *regs)
{
	struct pt_regs *old_regs;
	unsigned long eirr_val;
	int irq, cpu = smp_processor_id();
#ifdef CONFIG_SMP
	struct irq_desc *desc;
	cpumask_t dest;
#endif

	old_regs = set_irq_regs(regs);
	local_irq_disable();
	irq_enter();

	eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu);
	if (!eirr_val)
		goto set_out;
	irq = eirr_to_irq(eirr_val);

#ifdef CONFIG_SMP
	desc = irq_to_desc(irq);
	cpumask_copy(&dest, desc->irq_data.affinity);
	if (irqd_is_per_cpu(&desc->irq_data) &&
	    !cpu_isset(smp_processor_id(), dest)) {
		int cpu = first_cpu(dest);

		printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
		       irq, smp_processor_id(), cpu);
		gsc_writel(irq + CPU_IRQ_BASE,
			   per_cpu(cpu_data, cpu).hpa);
		goto set_out;
	}
#endif
	stack_overflow_check(regs);

#ifdef CONFIG_IRQSTACKS
	execute_on_irq_stack(&generic_handle_irq, irq);
#else
	generic_handle_irq(irq);
#endif /* CONFIG_IRQSTACKS */

 out:
	irq_exit();
	set_irq_regs(old_regs);
	return;

 set_out:
	set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
	goto out;
}
示例#26
0
static unsigned int cluster_cpu_mask_to_apicid(cpumask_t cpumask)
{
	int cpu;

	/*
	 * We're using fixed IRQ delivery, can only return one phys APIC ID.
	 * May as well be the first.
	 */
	cpu = first_cpu(cpumask);
	if ((unsigned)cpu < NR_CPUS)
		return x86_cpu_to_apicid[cpu];
	else
		return BAD_APICID;
}
示例#27
0
void __xntimer_init(struct xntimer *timer,
		    struct xnclock *clock,
		    void (*handler)(struct xntimer *timer),
		    struct xnsched *sched,
		    int flags)
{
	spl_t s __maybe_unused;
	int cpu;

#ifdef CONFIG_XENO_OPT_EXTCLOCK
	timer->clock = clock;
#endif
	xntimerh_init(&timer->aplink);
	xntimerh_date(&timer->aplink) = XN_INFINITE;
	xntimer_set_priority(timer, XNTIMER_STDPRIO);
	timer->status = (XNTIMER_DEQUEUED|(flags & XNTIMER_INIT_MASK));
	timer->handler = handler;
	timer->interval_ns = 0;
	/*
	 * Timers are affine to a scheduler slot, which is in turn
	 * bound to a real-time CPU. If no scheduler affinity was
	 * given, assign the timer to the scheduler slot of the
	 * current CPU if real-time, otherwise default to the
	 * scheduler slot of the first real-time CPU.
	 */
	if (sched)
		timer->sched = sched;
	else {
		cpu = ipipe_processor_id();
		if (!xnsched_supported_cpu(cpu))
			cpu = first_cpu(xnsched_realtime_cpus);

		timer->sched = xnsched_struct(cpu);
	}

#ifdef CONFIG_XENO_OPT_STATS
#ifdef CONFIG_XENO_OPT_EXTCLOCK
	timer->tracker = clock;
#endif
	ksformat(timer->name, XNOBJECT_NAME_LEN, "%d/%s",
		 current->pid, current->comm);
	xntimer_reset_stats(timer);
	xnlock_get_irqsave(&nklock, s);
	list_add_tail(&timer->next_stat, &clock->timerq);
	clock->nrtimers++;
	xnvfile_touch(&clock->timer_vfile);
	xnlock_put_irqrestore(&nklock, s);
#endif /* CONFIG_XENO_OPT_STATS */
}
示例#28
0
int acpi_map_lsapic(acpi_handle handle, int *pcpu)
{
	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
	union acpi_object *obj;
	struct acpi_madt_local_sapic *lsapic;
	cpumask_t tmp_map;
	long physid;
	int cpu;

	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
		return -EINVAL;

	if (!buffer.length || !buffer.pointer)
		return -EINVAL;

	obj = buffer.pointer;
	if (obj->type != ACPI_TYPE_BUFFER)
	{
		kfree(buffer.pointer);
		return -EINVAL;
	}

	lsapic = (struct acpi_madt_local_sapic *)obj->buffer.pointer;

	if ((lsapic->header.type != ACPI_MADT_TYPE_LOCAL_SAPIC) ||
	    (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))) {
		kfree(buffer.pointer);
		return -EINVAL;
	}

	physid = ((lsapic->id << 8) | (lsapic->eid));

	kfree(buffer.pointer);
	buffer.length = ACPI_ALLOCATE_BUFFER;
	buffer.pointer = NULL;

	cpus_complement(tmp_map, cpu_present_map);
	cpu = first_cpu(tmp_map);
	if (cpu >= NR_CPUS)
		return -EINVAL;

	acpi_map_cpu2node(handle, cpu, physid);

	cpu_set(cpu, cpu_present_map);
	ia64_cpu_to_sapicid[cpu] = physid;

	*pcpu = cpu;
	return (0);
}
示例#29
0
文件: irq-gic.c 项目: 274914765/C
static void gic_set_affinity(unsigned int irq, cpumask_t cpumask)
{
    cpumask_t    tmp = CPU_MASK_NONE;
    unsigned long    flags;
    int        i;

    pr_debug(KERN_DEBUG "%s called\n", __func__);
    irq -= _irqbase;

    cpus_and(tmp, cpumask, cpu_online_map);
    if (cpus_empty(tmp))
        return;

    /* Assumption : cpumask refers to a single CPU */
    spin_lock_irqsave(&gic_lock, flags);
    for (;;) {
        /* Re-route this IRQ */
        GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp));

        /*
         * FIXME: assumption that _intrmap is ordered and has no holes
         */

        /* Update the intr_map */
        _intrmap[irq].cpunum = first_cpu(tmp);

        /* Update the pcpu_masks */
        for (i = 0; i < NR_CPUS; i++)
            clear_bit(irq, pcpu_masks[i].pcpu_mask);
        set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);

    }
    irq_desc[irq].affinity = cpumask;
    spin_unlock_irqrestore(&gic_lock, flags);

}
示例#30
0
文件: msi_ia64.c 项目: ivucica/linux
static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
{
	struct msi_msg msg;
	u32 addr;

	read_msi_msg(irq, &msg);

	addr = msg.address_lo;
	addr &= MSI_ADDR_DESTID_MASK;
	addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(first_cpu(cpu_mask)));
	msg.address_lo = addr;

	write_msi_msg(irq, &msg);
	set_native_irq_info(irq, cpu_mask);
}