示例#1
0
文件: msi.c 项目: rcplay/snake-os
static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask)
{
    struct msi_desc *entry;
    struct msg_address address;
    unsigned int irq = vector;
    unsigned int dest_cpu = first_cpu(cpu_mask);

    entry = (struct msi_desc *)msi_desc[vector];
    if (!entry || !entry->dev)
        return;

    switch (entry->msi_attrib.type) {
    case PCI_CAP_ID_MSI:
    {
        int pos;

        if (!(pos = pci_find_capability(entry->dev, PCI_CAP_ID_MSI)))
            return;

        pci_read_config_dword(entry->dev, msi_lower_address_reg(pos),
                              &address.lo_address.value);
        address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK;
        address.lo_address.value |= (cpu_physical_id(dest_cpu) <<
                                     MSI_TARGET_CPU_SHIFT);
        entry->msi_attrib.current_cpu = cpu_physical_id(dest_cpu);
        pci_write_config_dword(entry->dev, msi_lower_address_reg(pos),
                               address.lo_address.value);
        set_native_irq_info(irq, cpu_mask);
        break;
    }
    case PCI_CAP_ID_MSIX:
    {
        int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
                     PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET;

        address.lo_address.value = readl(entry->mask_base + offset);
        address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK;
        address.lo_address.value |= (cpu_physical_id(dest_cpu) <<
                                     MSI_TARGET_CPU_SHIFT);
        entry->msi_attrib.current_cpu = cpu_physical_id(dest_cpu);
        writel(address.lo_address.value, entry->mask_base + offset);
        set_native_irq_info(irq, cpu_mask);
        break;
    }
    default:
        break;
    }
}
static int dmar_msi_set_affinity(struct irq_data *data,
				 const struct cpumask *mask, bool force)
{
	unsigned int irq = data->irq;
	struct irq_cfg *cfg = irq_cfg + irq;
	struct msi_msg msg;
	int cpu = cpumask_first(mask);

	if (!cpu_online(cpu))
		return -1;

	if (irq_prepare_move(irq, cpu))
		return -1;

	dmar_msi_read(irq, &msg);

	msg.data &= ~MSI_DATA_VECTOR_MASK;
	msg.data |= MSI_DATA_VECTOR(cfg->vector);
	msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
	msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));

	dmar_msi_write(irq, &msg);
	cpumask_copy(data->affinity, mask);

	return 0;
}
示例#3
0
/*
 * Build cpu to node mapping and initialize the per node cpu masks.
 */
void __init
build_cpu_to_node_map (void)
{
	int cpu, i, node;

	for(node=0; node<MAX_NUMNODES; node++)
		cpus_clear(node_to_cpu_mask[node]);
	for(cpu = 0; cpu < NR_CPUS; ++cpu) {
		/*
		 * All Itanium NUMA platforms I know use ACPI, so maybe we
		 * can drop this ifdef completely.                    [EF]
		 */
#ifdef CONFIG_ACPI_NUMA
		node = -1;
		for (i = 0; i < NR_CPUS; ++i)
			if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) {
				node = node_cpuid[i].nid;
				break;
			}
#else
#		error Fixme: Dunno how to build CPU-to-node map.
#endif
		cpu_to_node_map[cpu] = (node >= 0) ? node : 0;
		if (node >= 0)
			cpu_set(cpu, node_to_cpu_mask[node]);
	}
}
示例#4
0
文件: msi_ia64.c 项目: ivucica/linux
int ia64_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
{
	struct msi_msg	msg;
	unsigned long	dest_phys_id;
	unsigned int	vector;

	dest_phys_id = cpu_physical_id(first_cpu(cpu_online_map));
	vector = irq;

	msg.address_hi = 0;
	msg.address_lo =
		MSI_ADDR_HEADER |
		MSI_ADDR_DESTMODE_PHYS |
		MSI_ADDR_REDIRECTION_CPU |
		MSI_ADDR_DESTID_CPU(dest_phys_id);

	msg.data =
		MSI_DATA_TRIGGER_EDGE |
		MSI_DATA_LEVEL_ASSERT |
		MSI_DATA_DELIVERY_FIXED |
		MSI_DATA_VECTOR(vector);

	write_msi_msg(irq, &msg);
	set_irq_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq);

	return 0;
}
示例#5
0
/* Setup timer on comparator RTC1 */
static void mmtimer_setup_int_0(int cpu, u64 expires)
{
	u64 val;

	/* Disable interrupt */
	HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_ENABLE), 0UL);

	/* Initialize comparator value */
	HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPB), -1L);

	/* Clear pending bit */
	mmtimer_clr_int_pending(0);

	val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC1_INT_CONFIG_IDX_SHFT) |
		((u64)cpu_physical_id(cpu) <<
			SH_RTC1_INT_CONFIG_PID_SHFT);

	/* Set configuration */
	HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_CONFIG), val);

	/* Enable RTC interrupts */
	HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_ENABLE), 1UL);

	/* Initialize comparator value */
	HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPB), expires);


}
int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
{
#ifdef CONFIG_SMP
	int i;
#endif
	int apic_id = -1;

	apic_id = map_mat_entry(handle, type, acpi_id);
	if (apic_id == -1)
		apic_id = map_madt_entry(type, acpi_id);
	if (apic_id == -1) {
		if (acpi_id == 0)
			return acpi_id;
		else
			return apic_id;
	}

#ifdef CONFIG_SMP
	for_each_possible_cpu(i) {
		if (cpu_physical_id(i) == apic_id)
			return i;
	}
#else
	
	if (apic_id == 0)
		return apic_id;
#endif
	return -1;
}
示例#7
0
static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
{
	struct msi_msg msg;
	u32 addr, data;
	int cpu = first_cpu(cpu_mask);

	if (!cpu_online(cpu))
		return;

	if (irq_prepare_move(irq, cpu))
		return;

	read_msi_msg(irq, &msg);

	addr = msg.address_lo;
	addr &= MSI_ADDR_DESTID_MASK;
	addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
	msg.address_lo = addr;

	data = msg.data;
	data &= MSI_DATA_VECTOR_MASK;
	data |= MSI_DATA_VECTOR(irq_to_vector(irq));
	msg.data = data;

	write_msi_msg(irq, &msg);
	irq_desc[irq].affinity = cpumask_of_cpu(cpu);
}
示例#8
0
static void
sn_set_affinity_irq(unsigned int irq, unsigned long cpu)
{
	int redir = 0;
	struct pcibr_intr_list_t *p = pcibr_intr_list[irq];
	pcibr_intr_t intr;
	extern void sn_shub_redirect_intr(pcibr_intr_t intr, unsigned long cpu);
	extern void sn_tio_redirect_intr(pcibr_intr_t intr, unsigned long cpu);

	if (p == NULL)
		return;

	intr = p->intr;

	if (intr == NULL)
		return;

	if (IS_PIC_SOFT(intr->bi_soft) ) {
		sn_shub_redirect_intr(intr, cpu);
	// Defer TIO for now.
	// } else if (IS_TIO_SOFT(intr->bi_soft) {
		// sn_tio_redirect_intr(intr, cpu);
	} else {
		return;
	}
	(void) set_irq_affinity_info(irq, cpu_physical_id(cpu), redir);
}
示例#9
0
void send_IPI_mask_x2apic(const cpumask_t *cpumask, int vector)
{
    unsigned int cpu, cfg;
    unsigned long flags;

    /*
     * Ensure that any synchronisation data written in program order by this
     * CPU is seen by notified remote CPUs. The WRMSR contained within
     * apic_icr_write() can otherwise be executed early.
     * 
     * The reason mb() is sufficient here is subtle: the register arguments
     * to WRMSR must depend on a memory read executed after the barrier. This
     * is guaranteed by cpu_physical_id(), which reads from a global array (and
     * so cannot be hoisted above the barrier even by a clever compiler).
     */
    mb();

    local_irq_save(flags);

    cfg = APIC_DM_FIXED | 0 /* no shorthand */ | APIC_DEST_PHYSICAL | vector;
    for_each_cpu_mask ( cpu, *cpumask )
        if ( cpu != smp_processor_id() )
            apic_wrmsr(APIC_ICR, cfg, cpu_physical_id(cpu));

    local_irq_restore(flags);
}
示例#10
0
int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
{
	struct msi_msg	msg;
	unsigned long	dest_phys_id;
	int	irq, vector;
	cpumask_t mask;

	irq = create_irq();
	if (irq < 0)
		return irq;

	set_irq_msi(irq, desc);
	cpus_and(mask, irq_to_domain(irq), cpu_online_map);
	dest_phys_id = cpu_physical_id(first_cpu(mask));
	vector = irq_to_vector(irq);

	msg.address_hi = 0;
	msg.address_lo =
		MSI_ADDR_HEADER |
		MSI_ADDR_DESTMODE_PHYS |
		MSI_ADDR_REDIRECTION_CPU |
		MSI_ADDR_DESTID_CPU(dest_phys_id);

	msg.data =
		MSI_DATA_TRIGGER_EDGE |
		MSI_DATA_LEVEL_ASSERT |
		MSI_DATA_DELIVERY_FIXED |
		MSI_DATA_VECTOR(vector);

	write_msi_msg(irq, &msg);
	set_irq_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq);

	return 0;
}
示例#11
0
文件: xpc_uv.c 项目: 020gzh/linux
static int
xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
{
	int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);

#if defined CONFIG_X86_64
	mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset,
			UV_AFFINITY_CPU);
	if (mq->irq < 0)
		return mq->irq;

	mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset);

#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
	if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0)
		mq->irq = SGI_XPC_ACTIVATE;
	else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0)
		mq->irq = SGI_XPC_NOTIFY;
	else
		return -EINVAL;

	mq->mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq;
	uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mq->mmr_value);
#else
	#error not a supported configuration
#endif

	return 0;
}
示例#12
0
文件: x2apic.c 项目: 0day-ci/xen
static void send_IPI_mask_x2apic_phys(const cpumask_t *cpumask, int vector)
{
    unsigned int cpu;
    unsigned long flags;
    uint64_t msr_content;

    /*
     * Ensure that any synchronisation data written in program order by this
     * CPU is seen by notified remote CPUs. The WRMSR contained within
     * apic_icr_write() can otherwise be executed early.
     * 
     * The reason mb() is sufficient here is subtle: the register arguments
     * to WRMSR must depend on a memory read executed after the barrier. This
     * is guaranteed by cpu_physical_id(), which reads from a global array (and
     * so cannot be hoisted above the barrier even by a clever compiler).
     */
    mb();

    local_irq_save(flags);

    for_each_cpu ( cpu, cpumask )
    {
        if ( !cpu_online(cpu) || (cpu == smp_processor_id()) )
            continue;
        msr_content = cpu_physical_id(cpu);
        msr_content = (msr_content << 32) | APIC_DM_FIXED |
                      APIC_DEST_PHYSICAL | vector;
        apic_wrmsr(APIC_ICR, msr_content);
    }

    local_irq_restore(flags);
}
static u8 ioat_dca_get_tag(struct dca_provider *dca,
			    struct device *dev,
			    int cpu)
{
	u8 tag;

	struct ioat_dca_priv *ioatdca = dca_priv(dca);
	int i, apic_id, bit, value;
	u8 entry;

	tag = 0;
	apic_id = cpu_physical_id(cpu);

	for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
		entry = ioatdca->tag_map[i];
		if (entry & DCA3_TAG_MAP_BIT_TO_SEL) {
			bit = entry &
				~(DCA3_TAG_MAP_BIT_TO_SEL | DCA3_TAG_MAP_BIT_TO_INV);
			value = (apic_id & (1 << bit)) ? 1 : 0;
		} else if (entry & DCA3_TAG_MAP_BIT_TO_INV) {
			bit = entry & ~DCA3_TAG_MAP_BIT_TO_INV;
			value = (apic_id & (1 << bit)) ? 0 : 1;
		} else {
			value = (entry & DCA3_TAG_MAP_LITERAL_VAL) ? 1 : 0;
		}
		tag |= (value << i);
	}

	return tag;
}
示例#14
0
文件: msi_ia64.c 项目: 21cnbao/linux
static int ia64_set_msi_irq_affinity(struct irq_data *idata,
				     const cpumask_t *cpu_mask, bool force)
{
	struct msi_msg msg;
	u32 addr, data;
	int cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
	unsigned int irq = idata->irq;

	if (irq_prepare_move(irq, cpu))
		return -1;

	__get_cached_msi_msg(idata->msi_desc, &msg);

	addr = msg.address_lo;
	addr &= MSI_ADDR_DEST_ID_MASK;
	addr |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
	msg.address_lo = addr;

	data = msg.data;
	data &= MSI_DATA_VECTOR_MASK;
	data |= MSI_DATA_VECTOR(irq_to_vector(irq));
	msg.data = data;

	write_msi_msg(irq, &msg);
	cpumask_copy(idata->affinity, cpumask_of(cpu));

	return 0;
}
示例#15
0
文件: smp.c 项目: amodj/Utopia
void send_IPI_mask_phys(cpumask_t mask, int vector)
{
    unsigned long cfg, flags;
    unsigned int query_cpu;

    local_irq_save(flags);

    for_each_cpu_mask ( query_cpu, mask )
    {
        /*
         * Wait for idle.
         */
        apic_wait_icr_idle();

        /*
         * prepare target chip field
         */
        cfg = __prepare_ICR2(cpu_physical_id(query_cpu));
        apic_write_around(APIC_ICR2, cfg);

        /*
         * program the ICR
         */
        cfg = __prepare_ICR(0, vector) | APIC_DEST_PHYSICAL;

        /*
         * Send the IPI. The write to APIC_ICR fires this off.
         */
        apic_write_around(APIC_ICR, cfg);
    }
示例#16
0
/* As we are using single CPU as destination, pick only one CPU here */
static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask)
{
	int cpu = cpumask_first(cpumask);

	if (cpu < nr_cpu_ids)
		return cpu_physical_id(cpu);
	return BAD_APICID;
}
示例#17
0
static hub_intr_t
do_hub_intr_alloc(devfs_handle_t dev,
		device_desc_t dev_desc,
		devfs_handle_t owner_dev,
		int uncond_nothread)
{
	cpuid_t		cpu = 0;
	int		vector;
	hub_intr_t	intr_hdl;
	cnodeid_t	cnode;
	int		cpuphys, slice;
	int		nasid;
	iopaddr_t	xtalk_addr;
	struct xtalk_intr_s	*xtalk_info;
	xwidget_info_t	xwidget_info;
	ilvl_t		intr_swlevel = 0;

	cpu = intr_heuristic(dev, dev_desc, -1, 0, owner_dev, NULL, &vector);

	if (cpu == CPU_NONE) {
		printk("Unable to allocate interrupt for 0x%p\n", (void *)owner_dev);
		return(0);
	}

	cpuphys = cpu_physical_id(cpu);
	slice = cpu_physical_id_to_slice(cpuphys);
	nasid = cpu_physical_id_to_nasid(cpuphys);
	cnode = cpuid_to_cnodeid(cpu);

	if (slice) {
		xtalk_addr = SH_II_INT1 | GLOBAL_MMR_SPACE |
			((unsigned long)nasid << 36) | (1UL << 47);
	} else {
		xtalk_addr = SH_II_INT0 | GLOBAL_MMR_SPACE |
			((unsigned long)nasid << 36) | (1UL << 47);
	}

	intr_hdl = snia_kmem_alloc_node(sizeof(struct hub_intr_s), KM_NOSLEEP, cnode);
	ASSERT_ALWAYS(intr_hdl);

	xtalk_info = &intr_hdl->i_xtalk_info;
	xtalk_info->xi_dev = dev;
	xtalk_info->xi_vector = vector;
	xtalk_info->xi_addr = xtalk_addr;

	xwidget_info = xwidget_info_get(dev);
	if (xwidget_info) {
		xtalk_info->xi_target = xwidget_info_masterid_get(xwidget_info);
	}

	intr_hdl->i_swlevel = intr_swlevel;
	intr_hdl->i_cpuid = cpu;
	intr_hdl->i_bit = vector;
	intr_hdl->i_flags |= HUB_INTR_IS_ALLOCED;

	hub_device_desc_update(dev_desc, intr_swlevel, cpu);
	return(intr_hdl);
}
示例#18
0
文件: irq.c 项目: E-LLP/n900
void sn_set_err_irq_affinity(unsigned int irq)
{
        /*
         * On systems which support CPU disabling (SHub2), all error interrupts
         * are targetted at the boot CPU.
         */
        if (is_shub2() && sn_prom_feature_available(PRF_CPU_DISABLE_SUPPORT))
                set_irq_affinity_info(irq, cpu_physical_id(0), 0);
}
示例#19
0
static void
msi_target_apic(unsigned int vector,
		unsigned int dest_cpu,
		u32 *address_hi,	/* in/out */
		u32 *address_lo)	/* in/out */
{
	u32 addr = *address_lo;

	addr &= MSI_ADDR_DESTID_MASK;
	addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(dest_cpu));

	*address_lo = addr;
}
示例#20
0
文件: msi.c 项目: rcplay/snake-os
static void msi_address_init(struct msg_address *msi_address)
{
    unsigned int	dest_id;
    unsigned long	dest_phys_id = cpu_physical_id(MSI_TARGET_CPU);

    memset(msi_address, 0, sizeof(struct msg_address));
    msi_address->hi_address = (u32)0;
    dest_id = (MSI_ADDRESS_HEADER << MSI_ADDRESS_HEADER_SHIFT);
    msi_address->lo_address.u.dest_mode = MSI_PHYSICAL_MODE;
    msi_address->lo_address.u.redirection_hint = MSI_REDIRECTION_HINT_MODE;
    msi_address->lo_address.u.dest_id = dest_id;
    msi_address->lo_address.value |= (dest_phys_id << MSI_TARGET_CPU_SHIFT);
}
示例#21
0
int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
{
#ifdef CONFIG_SMP
	int i;
#endif
	int apic_id = -1;

	apic_id = map_mat_entry(handle, type, acpi_id);
	if (apic_id == -1)
		apic_id = map_madt_entry(type, acpi_id);
	if (apic_id == -1) {
		/*
		 * On UP processor, there is no _MAT or MADT table.
		 * So above apic_id is always set to -1.
		 *
		 * BIOS may define multiple CPU handles even for UP processor.
		 * For example,
		 *
		 * Scope (_PR)
                 * {
		 *     Processor (CPU0, 0x00, 0x00000410, 0x06) {}
		 *     Processor (CPU1, 0x01, 0x00000410, 0x06) {}
		 *     Processor (CPU2, 0x02, 0x00000410, 0x06) {}
		 *     Processor (CPU3, 0x03, 0x00000410, 0x06) {}
		 * }
		 *
		 * Ignores apic_id and always returns 0 for the processor
		 * handle with acpi id 0 if nr_cpu_ids is 1.
		 * This should be the case if SMP tables are not found.
		 * Return -1 for other CPU's handle.
		 */
		if (nr_cpu_ids <= 1 && acpi_id == 0)
			return acpi_id;
		else
			return apic_id;
	}

#ifdef CONFIG_SMP
	for_each_possible_cpu(i) {
		if (cpu_physical_id(i) == apic_id)
			return i;
	}
#else
	/* In UP kernel, only processor 0 is valid */
	if (apic_id == 0)
		return apic_id;
#endif
	return -1;
}
示例#22
0
void
ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
{
	void __iomem *ipi_addr;
	unsigned long ipi_data;
	unsigned long phys_cpu_id;

	phys_cpu_id = cpu_physical_id(cpu);


	ipi_data = (delivery_mode << 8) | (vector & 0xff);
	ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));

	writeq(ipi_data, ipi_addr);
}
示例#23
0
文件: msi_ia64.c 项目: ivucica/linux
static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
{
	struct msi_msg msg;
	u32 addr;

	read_msi_msg(irq, &msg);

	addr = msg.address_lo;
	addr &= MSI_ADDR_DESTID_MASK;
	addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(first_cpu(cpu_mask)));
	msg.address_lo = addr;

	write_msi_msg(irq, &msg);
	set_native_irq_info(irq, cpu_mask);
}
示例#24
0
static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
			      const struct cpumask *andmask)
{
	int cpu;

	/*
	 * We're using fixed IRQ delivery, can only return one phys APIC ID.
	 * May as well be the first.
	 */
	for_each_cpu_and(cpu, cpumask, andmask) {
		if (cpumask_test_cpu(cpu, cpu_online_mask))
			return cpu_physical_id(cpu);
	}
	return BAD_APICID;
}
示例#25
0
static int get_cpu_id(acpi_handle handle, int type, u32 acpi_id)
{
	int i;
	int apic_id = -1;

	apic_id = map_mat_entry(handle, type, acpi_id);
	if (apic_id == -1)
		apic_id = map_madt_entry(type, acpi_id);
	if (apic_id == -1)
		return apic_id;

	for_each_possible_cpu(i) {
		if (cpu_physical_id(i) == apic_id)
			return i;
	}
	return -1;
}
示例#26
0
文件: numa.c 项目: nearffxx/xpenology
/**
 * build_cpu_to_node_map - setup cpu to node and node to cpumask arrays
 *
 * Build cpu to node mapping and initialize the per node cpu masks using
 * info from the node_cpuid array handed to us by ACPI.
 */
void __init build_cpu_to_node_map(void)
{
	int cpu, i, node;

	for(node=0; node < MAX_NUMNODES; node++)
		cpus_clear(node_to_cpu_mask[node]);

	for_each_possible_early_cpu(cpu) {
		node = -1;
		for (i = 0; i < NR_CPUS; ++i)
			if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) {
				node = node_cpuid[i].nid;
				break;
			}
		map_cpu_to_node(cpu, node);
	}
}
int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id)
{
#ifdef CONFIG_SMP
	int i;
#endif

	if (invalid_phys_cpuid(phys_id)) {
		/*
		 * On UP processor, there is no _MAT or MADT table.
		 * So above phys_id is always set to PHYS_CPUID_INVALID.
		 *
		 * BIOS may define multiple CPU handles even for UP processor.
		 * For example,
		 *
		 * Scope (_PR)
		 * {
		 *     Processor (CPU0, 0x00, 0x00000410, 0x06) {}
		 *     Processor (CPU1, 0x01, 0x00000410, 0x06) {}
		 *     Processor (CPU2, 0x02, 0x00000410, 0x06) {}
		 *     Processor (CPU3, 0x03, 0x00000410, 0x06) {}
		 * }
		 *
		 * Ignores phys_id and always returns 0 for the processor
		 * handle with acpi id 0 if nr_cpu_ids is 1.
		 * This should be the case if SMP tables are not found.
		 * Return -EINVAL for other CPU's handle.
		 */
		if (nr_cpu_ids <= 1 && acpi_id == 0)
			return acpi_id;
		else
			return -EINVAL;
	}

#ifdef CONFIG_SMP
	for_each_possible_cpu(i) {
		if (cpu_physical_id(i) == phys_id)
			return i;
	}
#else
	/* In UP kernel, only processor 0 is valid */
	if (phys_id == 0)
		return phys_id;
#endif
	return -ENODEV;
}
示例#28
0
文件: numa.c 项目: a2k2/xen-unstable
/**
 * build_cpu_to_node_map - setup cpu to node and node to cpumask arrays
 *
 * Build cpu to node mapping and initialize the per node cpu masks using
 * info from the node_cpuid array handed to us by ACPI.
 */
void __init build_cpu_to_node_map(void)
{
	int cpu, i, node;

	for(node=0; node < MAX_NUMNODES; node++)
		cpus_clear(node_to_cpu_mask[node]);

	for(cpu = 0; cpu < NR_CPUS; ++cpu) {
		node = -1;
		for (i = 0; i < NR_CPUS; ++i)
			if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) {
				node = node_cpuid[i].nid;
				break;
			}
		cpu_to_node_map[cpu] = (node >= 0) ? node : 0;
		if (node >= 0)
			cpu_set(cpu, node_to_cpu_mask[node]);
	}
}
示例#29
0
/* Setup timer on comparator RTC3 */
static void mmtimer_setup_int_2(int cpu, u64 expires)
{
	u64 val;

	HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_ENABLE), 0UL);

	HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPD), -1L);

	mmtimer_clr_int_pending(2);

	val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC3_INT_CONFIG_IDX_SHFT) |
		((u64)cpu_physical_id(cpu) <<
			SH_RTC3_INT_CONFIG_PID_SHFT);

	HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_CONFIG), val);

	HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_ENABLE), 1UL);

	HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPD), expires);
}
示例#30
0
static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
{
	struct irq_cfg *cfg = irq_cfg + irq;
	struct msi_msg msg;
	int cpu = cpumask_first(mask);

	if (!cpu_online(cpu))
		return;

	if (irq_prepare_move(irq, cpu))
		return;

	dmar_msi_read(irq, &msg);

	msg.data &= ~MSI_DATA_VECTOR_MASK;
	msg.data |= MSI_DATA_VECTOR(cfg->vector);
	msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
	msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));

	dmar_msi_write(irq, &msg);
	cpumask_copy(irq_desc[irq].affinity, mask);
}