Ejemplo n.º 1
0
/*
 * This is only used on smaller machines.
 */
void send_IPI_mask_bitmask(cpumask_t cpumask, int vector)
{
	unsigned long mask = cpus_addr(cpumask)[0];
	unsigned long cfg;
	unsigned long flags;

	local_irq_save(flags);
	WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
	/*
	 * Wait for idle.
	 */
	apic_wait_icr_idle();
		
	/*
	 * prepare target chip field
	 */
	cfg = __prepare_ICR2(mask);
	apic_write_around(APIC_ICR2, cfg);
		
	/*
	 * program the ICR 
	 */
	cfg = __prepare_ICR(0, vector);
			
	/*
	 * Send the IPI. The write to APIC_ICR fires this off.
	 */
	apic_write_around(APIC_ICR, cfg);

	local_irq_restore(flags);
}
Ejemplo n.º 2
0
static inline void send_IPI_mask_bitmask(int mask, int vector)
{
	unsigned long cfg;
	unsigned long flags;

	__save_flags(flags);
	__cli();

		
	/*
	 * Wait for idle.
	 */
	apic_wait_icr_idle();
		
	/*
	 * prepare target chip field
	 */
	cfg = __prepare_ICR2(mask);
	apic_write_around(APIC_ICR2, cfg);
		
	/*
	 * program the ICR 
	 */
	cfg = __prepare_ICR(0, vector);
			
	/*
	 * Send the IPI. The write to APIC_ICR fires this off.
	 */
	apic_write_around(APIC_ICR, cfg);

	__restore_flags(flags);
}
Ejemplo n.º 3
0
void __send_IPI_shortcut(unsigned int shortcut, int vector)
{
	/*
	 * Subtle. In the case of the 'never do double writes' workaround
	 * we have to lock out interrupts to be safe.  As we don't care
	 * of the value read we use an atomic rmw access to avoid costly
	 * cli/sti.  Otherwise we use an even cheaper single atomic write
	 * to the APIC.
	 */
	unsigned int cfg;

	/*
	 * Wait for idle.
	 */
	apic_wait_icr_idle();

	/*
	 * No need to touch the target chip field
	 */
	cfg = __prepare_ICR(shortcut, vector);

#ifdef	CONFIG_KDB
	if (vector == KDB_VECTOR) {
		/*
		 * Setup KDB IPI to be delivered as an NMI
		 */
		cfg = (cfg&~APIC_VECTOR_MASK)|APIC_DM_NMI;
	}
#endif	/* CONFIG_KDB */

	/*
	 * Send the IPI. The write to APIC_ICR fires this off.
	 */
	apic_write_around(APIC_ICR, cfg);
}
Ejemplo n.º 4
0
Archivo: smp.c Proyecto: amodj/Utopia
void send_IPI_mask_flat(cpumask_t cpumask, int vector)
{
    unsigned long mask = cpus_addr(cpumask)[0];
    unsigned long cfg;
    unsigned long flags;

    /* An IPI with no target generates a send accept error from P5/P6 APICs. */
    WARN_ON(mask == 0);

    local_irq_save(flags);

    /*
     * Wait for idle.
     */
    apic_wait_icr_idle();

    /*
     * prepare target chip field
     */
    cfg = __prepare_ICR2(mask);
    apic_write_around(APIC_ICR2, cfg);

    /*
     * program the ICR
     */
    cfg = __prepare_ICR(0, vector) | APIC_DEST_LOGICAL;

    /*
     * Send the IPI. The write to APIC_ICR fires this off.
     */
    apic_write_around(APIC_ICR, cfg);
    
    local_irq_restore(flags);
}
Ejemplo n.º 5
0
static inline void __inquire_remote_apic(int apicid)
{
	int i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
	char *names[] = { "ID", "VERSION", "SPIV" };
	int timeout, status;

	printk("Inquiring remote APIC #%d...\n", apicid);

	for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) {
		printk("... APIC #%d %s: ", apicid, names[i]);

		/*
		 * Wait for idle.
		 */
		apic_wait_icr_idle();

		apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
		apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);

		timeout = 0;
		do {
			udelay(100);
			status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
		} while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);

		switch (status) {
		case APIC_ICR_RR_VALID:
			status = apic_read(APIC_RRR);
			printk("%08x\n", status);
			break;
		default:
			printk("failed\n");
		}
	}
}
Ejemplo n.º 6
0
Archivo: smp.c Proyecto: amodj/Utopia
void send_IPI_mask_phys(cpumask_t mask, int vector)
{
    unsigned long cfg, flags;
    unsigned int query_cpu;

    local_irq_save(flags);

    for_each_cpu_mask ( query_cpu, mask )
    {
        /*
         * Wait for idle.
         */
        apic_wait_icr_idle();

        /*
         * prepare target chip field
         */
        cfg = __prepare_ICR2(cpu_physical_id(query_cpu));
        apic_write_around(APIC_ICR2, cfg);

        /*
         * program the ICR
         */
        cfg = __prepare_ICR(0, vector) | APIC_DEST_PHYSICAL;

        /*
         * Send the IPI. The write to APIC_ICR fires this off.
         */
        apic_write_around(APIC_ICR, cfg);
    }
Ejemplo n.º 7
0
static inline void __send_IPI_shortcut(unsigned int shortcut, int vector)
{
	/*
	 * Subtle. In the case of the 'never do double writes' workaround
	 * we have to lock out interrupts to be safe.  As we don't care
	 * of the value read we use an atomic rmw access to avoid costly
	 * cli/sti.  Otherwise we use an even cheaper single atomic write
	 * to the APIC.
	 */
	unsigned int cfg;

	/*
	 * Wait for idle.
	 */
	apic_wait_icr_idle();

	/*
	 * No need to touch the target chip field
	 */
	cfg = __prepare_ICR(shortcut, vector);

	/*
	 * Send the IPI. The write to APIC_ICR fires this off.
	 */
	apic_write_around(APIC_ICR, cfg);
}
Ejemplo n.º 8
0
static void flat_send_IPI_mask(unsigned long cpumask, int vector)
{
	unsigned long mask = cpumask;
	unsigned long cfg;
	unsigned long flags;

	__save_flags(flags);
	__cli();

	/*
	 * Wait for idle.
	 */
	apic_wait_icr_idle();

	/*
	 * prepare target chip field
	 */
	cfg = __prepare_ICR2(mask);
	apic_write_around(APIC_ICR2, cfg);

	/*
	 * program the ICR
	 */
	cfg = __prepare_ICR(0, vector, APIC_DEST_LOGICAL);

	/*
	 * Send the IPI. The write to APIC_ICR fires this off.
	 */
	apic_write_around(APIC_ICR, cfg);
	__restore_flags(flags);
}
Ejemplo n.º 9
0
/*
 * This is used to send an IPI with no shorthand notation (the destination is
 * specified in bits 56 to 63 of the ICR).
 */
static inline void __send_IPI_dest_field(unsigned long mask, int vector)
{
    unsigned long cfg;

    /*
     * Wait for idle.
     */
    if (unlikely(vector == NMI_VECTOR))
        safe_apic_wait_icr_idle();
    else
        apic_wait_icr_idle();

    /*
     * prepare target chip field
     */
    cfg = __prepare_ICR2(mask);
    apic_write_around(APIC_ICR2, cfg);

    /*
     * program the ICR
     */
    cfg = __prepare_ICR(0, vector);

    /*
     * Send the IPI. The write to APIC_ICR fires this off.
     */
    apic_write_around(APIC_ICR, cfg);
}
Ejemplo n.º 10
0
/*
 * For some reason the destination shorthand for self is not valid
 * when used with the NMI delivery mode. This is documented in Tables
 * 8-3 and 8-4 in IA32 Reference Manual Volume 3. We send the IPI to
 * our own APIC ID explicitly which is valid.
 */
void self_nmi(void) 
{
    u32 id = get_apic_id();
    local_irq_disable();
    apic_wait_icr_idle();
    apic_icr_write(APIC_DM_NMI | APIC_DEST_PHYSICAL, id);
    local_irq_enable();
}
Ejemplo n.º 11
0
/*
 * For some reason the destination shorthand for self is not valid
 * when used with the NMI delivery mode. This is documented in Tables
 * 8-3 and 8-4 in IA32 Reference Manual Volume 3. We send the IPI to
 * our own APIC ID explicitly which is valid.
 */
void self_nmi(void)
{
    unsigned long flags;
    u32 id = get_apic_id();
    local_irq_save(flags);
    apic_wait_icr_idle();
    apic_icr_write(APIC_DM_NMI | APIC_DEST_PHYSICAL, id);
    local_irq_restore(flags);
}
Ejemplo n.º 12
0
void arch_irq_work_raise(void)
{
#ifdef CONFIG_X86_LOCAL_APIC
	if (!cpu_has_apic)
		return;

	apic->send_IPI_self(IRQ_WORK_VECTOR);
	apic_wait_icr_idle();
#endif
}
Ejemplo n.º 13
0
void __init sync_Arb_IDs(void)
{
	/*
	 * Wait for idle.
	 */
	apic_wait_icr_idle();

	apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
	apic_write_around(APIC_ICR, APIC_DEST_ALLINC | APIC_INT_LEVELTRIG
				| APIC_DM_INIT);
}
Ejemplo n.º 14
0
void __init sync_Arb_IDs(void)
{

    if (modern_apic() || boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
        return;


    apic_wait_icr_idle();

    apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
    apic_write(APIC_ICR, APIC_DEST_ALLINC |
               APIC_INT_LEVELTRIG | APIC_DM_INIT);
}
Ejemplo n.º 15
0
inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
{
	unsigned long cfg, flags;
	unsigned int query_cpu;

	/*
	 * Hack. The clustered APIC addressing mode doesn't allow us to send 
	 * to an arbitrary mask, so I do a unicasts to each CPU instead. This 
	 * should be modified to do 1 message per cluster ID - mbligh
	 */ 

	local_irq_save(flags);

	for (query_cpu = 0; query_cpu < NR_CPUS; ++query_cpu) {
		if (cpu_isset(query_cpu, mask)) {
		
			/*
			 * Wait for idle.
			 */
			apic_wait_icr_idle();
		
			/*
			 * prepare target chip field
			 */
			cfg = __prepare_ICR2(cpu_to_logical_apicid(query_cpu));
			apic_write_around(APIC_ICR2, cfg);
		
			/*
			 * program the ICR 
			 */
			cfg = __prepare_ICR(0, vector);

#ifdef	CONFIG_KDB
			if (vector == KDB_VECTOR) {
				/*
				 * Setup KDB IPI to be delivered as an NMI
				 */
				cfg = (cfg&~APIC_VECTOR_MASK)|APIC_DM_NMI;
			}
#endif	/* CONFIG_KDB */
			
			/*
			 * Send the IPI. The write to APIC_ICR fires this off.
			 */
			apic_write_around(APIC_ICR, cfg);
		}
	}
	local_irq_restore(flags);
}
Ejemplo n.º 16
0
void __init sync_Arb_IDs(void)
{
	/* Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 */
	unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
	if (ver >= 0x14)	/* P4 or higher */
		return;
	/*
	 * Wait for idle.
	 */
	apic_wait_icr_idle();

	apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
	apic_write_around(APIC_ICR, APIC_DEST_ALLINC | APIC_INT_LEVELTRIG
				| APIC_DM_INIT);
}
Ejemplo n.º 17
0
void __init sync_Arb_IDs(void)
{
    /* Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1
       And not needed on AMD */
    if (modern_apic())
        return;
    /*
     * Wait for idle.
     */
    apic_wait_icr_idle();

    apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
    apic_write_around(APIC_ICR, APIC_DEST_ALLINC | APIC_INT_LEVELTRIG
                      | APIC_DM_INIT);
}
Ejemplo n.º 18
0
static inline void send_IPI_mask_sequence(int mask, int vector)
{
	unsigned long cfg, flags;
	unsigned int query_cpu, query_mask;

	/*
	 * Hack. The clustered APIC addressing mode doesn't allow us to send 
	 * to an arbitrary mask, so I do a unicasts to each CPU instead. This 
	 * should be modified to do 1 message per cluster ID - mbligh
	 */ 

	__save_flags(flags);
	__cli();

	for (query_cpu = 0; query_cpu < NR_CPUS; ++query_cpu) {
		query_mask = 1 << query_cpu;
		if (query_mask & mask) {
		
			/*
			 * Wait for idle.
			 */
			apic_wait_icr_idle();
		
			/*
			 * prepare target chip field
			 */
			if(clustered_apic_mode == CLUSTERED_APIC_XAPIC)
				cfg = __prepare_ICR2(cpu_to_physical_apicid(query_cpu));
			else
				cfg = __prepare_ICR2(cpu_to_logical_apicid(query_cpu));
			apic_write_around(APIC_ICR2, cfg);
		
			/*
			 * program the ICR 
			 */
			cfg = __prepare_ICR(0, vector);
			
			/*
			 * Send the IPI. The write to APIC_ICR fires this off.
			 */
			apic_write_around(APIC_ICR, cfg);
		}
	}
	__restore_flags(flags);
}
Ejemplo n.º 19
0
static void __default_send_IPI_shortcut(unsigned int shortcut, int vector,
                                    unsigned int dest)
{
    unsigned int cfg;

    /*
     * Wait for idle.
     */
    apic_wait_icr_idle();

    /*
     * prepare target chip field
     */
    cfg = __prepare_ICR(shortcut, vector) | dest;
    /*
     * Send the IPI. The write to APIC_ICR fires this off.
     */
    apic_write_around(APIC_ICR, cfg);
}
Ejemplo n.º 20
0
void send_IPI_mask_flat(const cpumask_t *cpumask, int vector)
{
    unsigned long mask = cpumask_bits(cpumask)[0];
    unsigned long cfg;
    unsigned long flags;

    mask &= cpumask_bits(&cpu_online_map)[0];
    mask &= ~(1UL << smp_processor_id());
    if ( mask == 0 )
        return;

    local_irq_save(flags);

    /*
     * Wait for idle.
     */
    apic_wait_icr_idle();

    /*
     * prepare target chip field
     */
    cfg = __prepare_ICR2(mask);
    apic_write_around(APIC_ICR2, cfg);

    /*
     * program the ICR
     */
    cfg = __prepare_ICR(0, vector) | APIC_DEST_LOGICAL;

    /*
     * Send the IPI. The write to APIC_ICR fires this off.
     */
    apic_write_around(APIC_ICR, cfg);
    
    local_irq_restore(flags);
}