Example #1
0
static void summit_init_apic_ldr(void)
{
	unsigned long val, id;
	int count = 0;
	u8 my_id = (u8)hard_smp_processor_id();
	u8 my_cluster = APIC_CLUSTER(my_id);
#ifdef CONFIG_SMP
	u8 lid;
	int i;

	/* Create logical APIC IDs by counting CPUs already in cluster. */
	for (count = 0, i = nr_cpu_ids; --i >= 0; ) {
		lid = cpu_2_logical_apicid[i];
		if (lid != BAD_APICID && APIC_CLUSTER(lid) == my_cluster)
			++count;
	}
#endif
	/* We only have a 4 wide bitmap in cluster mode.  If a deranged
	 * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */
	BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
	id = my_cluster | (1UL << count);
	apic_write(APIC_DFR, SUMMIT_APIC_DFR_VALUE);
	val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
	val |= SET_APIC_LOGICAL_ID(id);
	apic_write(APIC_LDR, val);
}
/*
 * Set up the logical destination ID.
 *
 * Intel recommends to set DFR, LDR and TPR before enabling
 * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
 * document number 292116).  So here it goes...
 */
static void cluster_init_apic_ldr(void)
{
	unsigned long val, id;
	long i, count;
	u8 lid;
	u8 my_id = hard_smp_processor_id();
	u8 my_cluster = APIC_CLUSTER(my_id);

	/* Create logical APIC IDs by counting CPUs already in cluster. */
	for (count = 0, i = NR_CPUS; --i >= 0; ) {
		lid = x86_cpu_to_log_apicid[i];
		if (lid != BAD_APICID && APIC_CLUSTER(lid) == my_cluster)
			++count;
	}
	/*
	 * We only have a 4 wide bitmap in cluster mode.  There's no way
	 * to get above 60 CPUs and still give each one it's own bit.
	 * But, we're using physical IRQ delivery, so we don't care.
	 * Use bit 3 for the 4th through Nth CPU in each cluster.
	 */
	if (count >= XAPIC_DEST_CPUS_SHIFT)
		count = 3;
	id = my_cluster | (1UL << count);
	x86_cpu_to_log_apicid[smp_processor_id()] = id;
	apic_write(APIC_DFR, APIC_DFR_CLUSTER);
	val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
	val |= SET_APIC_LOGICAL_ID(id);
	apic_write(APIC_LDR, val);
}
Example #3
0
static void summit_init_apic_ldr(void)
{
	unsigned long val, id;
	int count = 0;
	u8 my_id = (u8)hard_smp_processor_id();
	u8 my_cluster = APIC_CLUSTER(my_id);
#ifdef CONFIG_SMP
	u8 lid;
	int i;

	
	for (count = 0, i = nr_cpu_ids; --i >= 0; ) {
		lid = cpu_2_logical_apicid[i];
		if (lid != BAD_APICID && APIC_CLUSTER(lid) == my_cluster)
			++count;
	}
#endif
	
	BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
	id = my_cluster | (1UL << count);
	apic_write(APIC_DFR, SUMMIT_APIC_DFR_VALUE);
	val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
	val |= SET_APIC_LOGICAL_ID(id);
	apic_write(APIC_LDR, val);
}
Example #4
0
static unsigned long calculate_ldr(unsigned long old)
{
	unsigned long id;
	if(clustered_apic_mode == CLUSTERED_APIC_XAPIC)
		id = physical_to_logical_apicid(hard_smp_processor_id());
	else
		id = 1UL << smp_processor_id();
	return (old & ~APIC_LDR_MASK)|SET_APIC_LOGICAL_ID(id);
}
Example #5
0
void default_init_apic_ldr(void)
{
    unsigned long val;

    apic_write(APIC_DFR, APIC_DFR_VALUE);
    val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
    val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
    apic_write(APIC_LDR, val);
}
Example #6
0
static inline unsigned long calculate_ldr(int cpu)
{
	unsigned long val, id;

	val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
	id = per_cpu(x86_bios_cpu_apicid, cpu);
	val |= SET_APIC_LOGICAL_ID(id);

	return val;
}
Example #7
0
static void summit_init_apic_ldr(void)
{
	int cpu = smp_processor_id();
	unsigned long id = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
	unsigned long val;

	apic_write(APIC_DFR, SUMMIT_APIC_DFR_VALUE);
	val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
	val |= SET_APIC_LOGICAL_ID(id);
	apic_write(APIC_LDR, val);
}
/*
 * Set up the logical destination ID.
 *
 * Intel recommends to set DFR, LDR and TPR before enabling
 * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
 * document number 292116).  So here it goes...
 */
void flat_init_apic_ldr(void)
{
	unsigned long val;
	unsigned long num, id;

	num = smp_processor_id();
	id = 1UL << num;
	apic_write(APIC_DFR, APIC_DFR_FLAT);
	val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
	val |= SET_APIC_LOGICAL_ID(id);
	apic_write(APIC_LDR, val);
}
Example #9
0
/*
 * Set up the logical destination ID.
 *
 * Intel recommends to set DFR, LDR and TPR before enabling
 * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
 * document number 292116).  So here it goes...
 */
static void flat_init_apic_ldr(void)
{
	unsigned long val;
	unsigned long num, id;

	num = smp_processor_id();
	id = 1UL << num;
	x86_cpu_to_log_apicid[num] = id;
	apic_write_around(APIC_DFR, APIC_DFR_FLAT);
	val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
	val |= SET_APIC_LOGICAL_ID(id);
	apic_write_around(APIC_LDR, val);
}
Example #10
0
static u32 xen_apic_read(u32 reg)
{
	struct xen_platform_op op = {
		.cmd = XENPF_get_cpuinfo,
		.interface_version = XENPF_INTERFACE_VERSION,
		.u.pcpu_info.xen_cpuid = 0,
	};
	int ret = 0;

	/* Shouldn't need this as APIC is turned off for PV, and we only
	 * get called on the bootup processor. But just in case. */
	if (!xen_initial_domain() || smp_processor_id())
		return 0;

	if (reg == APIC_LVR)
		return 0x10;
#ifdef CONFIG_X86_32
	if (reg == APIC_LDR)
		return SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
#endif
	if (reg != APIC_ID)
		return 0;

	ret = HYPERVISOR_dom0_op(&op);
	if (ret)
		op.u.pcpu_info.apic_id = BAD_APICID;

	return op.u.pcpu_info.apic_id << 24;
}

static void xen_apic_write(u32 reg, u32 val)
{
	if (reg == APIC_LVTPC) {
		(void)pmu_apic_update(reg);
		return;
	}

	/* Warn to see if there's any stray references */
	WARN(1,"register: %x, value: %x\n", reg, val);
}

static u64 xen_apic_icr_read(void)
{
	return 0;
}

static void xen_apic_icr_write(u32 low, u32 id)
{
	/* Warn to see if there's any stray references */
	WARN_ON(1);
}
static unsigned long calculate_ldr(int cpu)
{
    unsigned long id = per_cpu(x86_bios_cpu_apicid, cpu);

    return SET_APIC_LOGICAL_ID(id);
}
Example #12
0
/**
 * Initializes the calling CPU's local APIC.
 */
void __init
lapic_init(void)
{
	uint32_t val;

	/*
	 * Initialize Destination Format Register.
	 * When using logical destination mode, we want to use the flat model.
	 */
	apic_write(APIC_DFR, APIC_DFR_FLAT);

	/*
 	 * Initialize the Logical Destination Register.
 	 * The LWK never uses logical destination mode, so just set it to the
 	 * APIC's physical ID to avoid possible confusion.
 	 */
	val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
	val |= SET_APIC_LOGICAL_ID( GET_APIC_ID(apic_read(APIC_ID)) );
	apic_write(APIC_LDR, val);

	/*
	 * Initialize the Task Priority Register.
	 * We set this to accept all (0) and never touch it again.
	 */
	val = apic_read(APIC_TASKPRI) & ~APIC_TPRI_MASK;
	apic_write(APIC_TASKPRI, val);

	/*
	 * Intialize the Spurious-Interrupt Vector Register.
	 * This also enables the local APIC.
 	 */
	val = apic_read(APIC_SPIV) & ~APIC_VECTOR_MASK;
	val |= (APIC_SPIV_APIC_ENABLED | APIC_SPURIOUS_VECTOR);
	apic_write(APIC_SPIV, val);

	/* Setup LVT[0] = APIC Timer Interrupt */
	apic_write(APIC_LVTT, 0
	             | APIC_DM_FIXED       /* route to fixed IDT vector */
	             | APIC_TIMER_VECTOR   /* IDT vector to route to */
	             | APIC_LVT_MASKED     /* initially disable */
	);

	/* Setup LVT[1] = Thermal Sensor Interrupt */
	apic_write(APIC_LVTTHMR, 0
	             | APIC_DM_FIXED       /* route to fixed IDT vector */     
	             | APIC_THERMAL_VECTOR /* IDT vector to route to */
	);

	/* Setup LVT[2] = Performance Counter Interrupt */
	apic_write(APIC_LVTPC, 0
	             | APIC_DM_NMI         /* treat as non-maskable interrupt */
	                                   /* NMIs are routed to IDT vector 2 */
	             | APIC_LVT_MASKED     /* initially disable */
	);

	/* Setup LVT[3] = Local Interrupt Pin 0 */
	apic_write(APIC_LVT0, 0
	             | APIC_DM_EXTINT      /* hooked up to old 8259A PIC   */
	                                   /* IDT vector provided by 8259A */
	             | APIC_LVT_MASKED     /* disable */
	);

	/* Setup LVT[4] = Local Interrupt Pin 1 */
	apic_write(APIC_LVT1, 0
	             | APIC_DM_NMI         /* treat as non-maskable interrupt */
	                                   /* NMIs are routed to IDT vector 2 */
	             | ((this_cpu != 0)
	                 ? APIC_LVT_MASKED /* mask on all but bootstrap CPU */
	                 : 0)              /* bootstrap CPU (0) receives NMIs */
	);

	/* Setup LVT[5] = Internal APIC Error Detector Interrupt */
	apic_write(APIC_LVTERR, 0
	             | APIC_DM_FIXED       /* route to fixed IDT vector */
	             | APIC_ERROR_VECTOR   /* IDT vector to route to */
	);
	apic_write(APIC_ESR, 0); /* spec says to clear after enabling LVTERR */
}
Example #13
0
void
apic_init (struct cpu * core)
{
    struct apic_dev * apic = NULL;
    ulong_t base_addr;
    uint32_t val;

    apic = (struct apic_dev*)malloc(sizeof(struct apic_dev));
    if (!apic) {
        panic("Could not allocate apic struct\n");
    }
    memset(apic, 0, sizeof(struct apic_dev));
    core->apic = apic;

    if (!check_apic_avail()) {
        panic("No APIC found on core %u, dying\n", core->id);
    } 

    /* In response to AMD erratum #663 
     * the damn thing may give us lint interrupts
     * even when we have them masked
     */
    if (nk_is_amd()  && cpuid_get_family() == 0x15) {
        APIC_DEBUG("Writing Bridge Ctrl MSR for AMD Errata #663\n");
        msr_write(AMD_MSR_NBRIDGE_CTL, 
                msr_read(AMD_MSR_NBRIDGE_CTL) | 
                (1ULL<<23) | 
                (1ULL<<54));
    }

    base_addr       = apic_get_base_addr();

    /* idempotent when not compiled as HRT */
    apic->base_addr = pa_to_va(base_addr);

#ifndef NAUT_CONFIG_HVM_HRT
    if (core->is_bsp) {
        /* map in the lapic as uncacheable */
        if (nk_map_page_nocache(apic->base_addr, PTE_PRESENT_BIT|PTE_WRITABLE_BIT, PS_4K) == -1) {
            panic("Could not map APIC\n");
        }
    }
#endif

    apic->version   = apic_get_version(apic);
    apic->id        = apic_get_id(apic);

#ifndef NAUT_CONFIG_XEON_PHI
    if (apic->version < 0x10 || apic->version > 0x15) {
        panic("Unsupported APIC version (0x%1x)\n", (unsigned)apic->version);
    }
#endif

    val = apic_read(apic, APIC_REG_LDR) & ~APIC_LDR_MASK;
    val |= SET_APIC_LOGICAL_ID(0);
    apic_write(apic, APIC_REG_LDR, val);

    apic_write(apic, APIC_REG_TPR, apic_read(apic, APIC_REG_TPR) & 0xffffff00);                       // accept all interrupts
    apic_write(apic, APIC_REG_LVTT,    APIC_DEL_MODE_FIXED | APIC_LVT_DISABLED);                      // disable timer interrupts intially
    apic_write(apic, APIC_REG_LVTPC,   APIC_DEL_MODE_FIXED | APIC_LVT_DISABLED | APIC_PC_INT_VEC);    // disable perf cntr interrupts
    apic_write(apic, APIC_REG_LVTTHMR, APIC_DEL_MODE_FIXED | APIC_LVT_DISABLED | APIC_THRML_INT_VEC); // disable thermal interrupts

    /* do we have AMD extended LVT entries to deal with */
    if (nk_is_amd() && amd_has_ext_lvt(apic)) {
        amd_setup_ext_lvt(apic);
    }
            

    /* mask 8259a interrupts */
    apic_write(apic, APIC_REG_LVT0, APIC_DEL_MODE_EXTINT  | APIC_LVT_DISABLED);

    /* only BSP takes NMI interrupts */
    apic_write(apic, APIC_REG_LVT1, 
            APIC_DEL_MODE_NMI | (core->is_bsp ? 0 : APIC_LVT_DISABLED));

    apic_write(apic, APIC_REG_LVTERR, APIC_DEL_MODE_FIXED | APIC_ERROR_INT_VEC); // allow error interrupts

    // clear the ESR
    apic_write(apic, APIC_REG_ESR, 0u);

    apic_global_enable();

    // assign interrupt handlers
    if (core->is_bsp) {

        if (register_int_handler(APIC_NULL_KICK_VEC, null_kick, apic) != 0) {
            panic("Could not register null kick interrupt handler\n");
        }

        if (register_int_handler(APIC_SPUR_INT_VEC, spur_int_handler, apic) != 0) {
            panic("Could not register spurious interrupt handler\n");
        }

        if (register_int_handler(APIC_ERROR_INT_VEC, error_int_handler, apic) != 0) {
            panic("Could not register spurious interrupt handler\n");
            return;
        }

        /* we shouldn't ever get these, but just in case */
        if (register_int_handler(APIC_PC_INT_VEC, pc_int_handler, apic) != 0) {
            panic("Could not register perf counter interrupt handler\n");
            return;
        }

        if (register_int_handler(APIC_THRML_INT_VEC, thermal_int_handler, apic) != 0) {
            panic("Could not register thermal interrupt handler\n");
            return;
        }

        if (register_int_handler(APIC_EXT_LVT_DUMMY_VEC, dummy_int_handler, apic) != 0) {
            panic("Could not register dummy ext lvt handler\n");
            return;
        }
    }

    apic_assign_spiv(apic, APIC_SPUR_INT_VEC);

    /* turn it on */
    apic_sw_enable(apic);

    /* pass in quantum as milliseconds */
#ifndef NAUT_CONFIG_XEON_PHI
    apic_timer_setup(apic, 1000/NAUT_CONFIG_HZ);
#endif

    apic_dump(apic);
}