Ejemplo n.º 1
0
static void smp_send_nmi_allbutself(void)
{
	cpumask_t mask = cpu_online_map;
	cpu_clear(safe_smp_processor_id(), mask);
	if (!cpus_empty(mask))
		send_IPI_mask(mask, NMI_VECTOR);
}
Ejemplo n.º 2
0
static void __smp_call_function_many(cpumask_t *mask, void (*func) (void *info),
				     void *info, int nonatomic, int wait)
{
	struct call_data_struct data;
	int cpus;
	int cpu = smp_processor_id();

	if (cpu_isset(cpu, *mask))
		cpu_clear(cpu, *mask);
	cpus = cpus_weight(*mask);
	if (!cpus)
		return;

	data.func = func;
	data.info = info;
	atomic_set(&data.started, 0);
	data.wait = wait;
	if (wait)
		atomic_set(&data.finished, 0);

	call_data = &data;
	wmb();
	/* Send a message to all other CPUs and wait for them to respond */
	send_IPI_mask(*mask, CALL_FUNCTION_VECTOR);

	/* Wait for response */
	while (atomic_read(&data.started) != cpus)
		cpu_relax();

	if (!wait)
		return;

	while (atomic_read(&data.finished) != cpus)
		cpu_relax();
}
Ejemplo n.º 3
0
static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
						unsigned long va)
{
	int sender;
	union smp_flush_state *f;

	/* Caller has disabled preemption */
	sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
	f = &per_cpu(flush_state, sender);

	/* Could avoid this lock when
	   num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
	   probably not worth checking this for a cache-hot lock. */
	spin_lock(&f->tlbstate_lock);

	f->flush_mm = mm;
	f->flush_va = va;
	cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);

	/*
	 * We have to send the IPI only to
	 * CPUs affected.
	 */
	send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender);

	while (!cpus_empty(f->flush_cpumask))
		cpu_relax();

	f->flush_mm = NULL;
	f->flush_va = 0;
	spin_unlock(&f->tlbstate_lock);
}
Ejemplo n.º 4
0
/*
 * this function sends a 'generic call function' IPI to one other CPU
 * in the system.
 *
 * cpu is a standard Linux logical CPU number.
 */
static void
__smp_call_function_single(int cpu, void (*func) (void *info), void *info,
				int nonatomic, int wait)
{
	struct call_data_struct data;
	int cpus = 1;

	data.func = func;
	data.info = info;
	atomic_set(&data.started, 0);
	data.wait = wait;
	if (wait)
		atomic_set(&data.finished, 0);

	call_data = &data;
	wmb();
	/* Send a message to all other CPUs and wait for them to respond */
	send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);

	/* Wait for response */
	while (atomic_read(&data.started) != cpus)
		cpu_relax();

	if (!wait)
		return;

	while (atomic_read(&data.finished) != cpus)
		cpu_relax();
}
Ejemplo n.º 5
0
static inline void send_IPI_allbutself(int vector)
{
	/*
	 * if there are no other CPUs in the system then
	 * we get an APIC send error if we try to broadcast.
	 * thus we have to avoid sending IPIs in this case.
	 */
	if (!(smp_num_cpus > 1))
		return;

	if (clustered_apic_mode) {
		// Pointless. Use send_IPI_mask to do this instead
		int cpu;

		if (smp_num_cpus > 1) {
			for (cpu = 0; cpu < smp_num_cpus; ++cpu) {
				if (cpu != smp_processor_id())
					send_IPI_mask(1 << cpu, vector);
			}
		}
	} else {
		__send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
		return;
	}
}
Ejemplo n.º 6
0
static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
						unsigned long va)
{
	/*
	 * A couple of (to be removed) sanity checks:
	 *
	 * - current CPU must not be in mask
	 * - mask must exist :)
	 */
	BUG_ON(cpus_empty(cpumask));
	BUG_ON(cpu_isset(smp_processor_id(), cpumask));
	BUG_ON(!mm);

	/* If a CPU which we ran on has gone down, OK. */
	cpus_and(cpumask, cpumask, cpu_online_map);
	if (cpus_empty(cpumask))
		return;

	/*
	 * i'm not happy about this global shared spinlock in the
	 * MM hot path, but we'll see how contended it is.
	 * Temporarily this turns IRQs off, so that lockups are
	 * detected by the NMI watchdog.
	 */
	spin_lock(&tlbstate_lock);
	
	flush_mm = mm;
	flush_va = va;
#if NR_CPUS <= BITS_PER_LONG
	atomic_set_mask(cpumask, &flush_cpumask);
#else
	{
		int k;
		unsigned long *flush_mask = (unsigned long *)&flush_cpumask;
		unsigned long *cpu_mask = (unsigned long *)&cpumask;
		for (k = 0; k < BITS_TO_LONGS(NR_CPUS); ++k)
			atomic_set_mask(cpu_mask[k], &flush_mask[k]);
	}
#endif

	/*
	 * Make the above memory operations globally visible before
	 * sending the IPI.
	 */
	smp_mb();
	/*
	 * We have to send the IPI only to
	 * CPUs affected.
	 */
	send_IPI_mask(flush_cpumask, INVALIDATE_TLB_VECTOR);

	while (!cpus_empty(flush_cpumask))
		/* nothing. lockup detection does not belong here */
		mb();

	flush_mm = NULL;
	flush_va = 0;
	spin_unlock(&tlbstate_lock);
}
Ejemplo n.º 7
0
/*==========================================================================*
 * Name:         flush_tlb_others
 *
 * Description:  This routine requests other CPU to execute flush TLB.
 *               1.Setup parameters.
 *               2.Send 'INVALIDATE_TLB_IPI' to other CPU.
 *                 Request other CPU to execute 'smp_invalidate_interrupt()'.
 *               3.Wait for other CPUs operation finished.
 *
 * Born on Date: 2002.02.05
 *
 * Arguments:    cpumask - bitmap of target CPUs
 *               *mm -  a pointer to the mm struct for flush TLB
 *               *vma -  a pointer to the vma struct include va
 *               va - virtual address for flush TLB
 *
 * Returns:      void (cannot fail)
 *
 * Modification log:
 * Date       Who Description
 * ---------- --- --------------------------------------------------------
 *
 *==========================================================================*/
static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
	struct vm_area_struct *vma, unsigned long va)
{
	unsigned long *mask;
#ifdef DEBUG_SMP
	unsigned long flags;
	__save_flags(flags);
	if (!(flags & 0x0040))	/* Interrupt Disable NONONO */
		BUG();
#endif /* DEBUG_SMP */

	/*
	 * A couple of (to be removed) sanity checks:
	 *
	 * - we do not send IPIs to not-yet booted CPUs.
	 * - current CPU must not be in mask
	 * - mask must exist :)
	 */
	BUG_ON(cpumask_empty(&cpumask));

	BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask));
	BUG_ON(!mm);

	/* If a CPU which we ran on has gone down, OK. */
	cpumask_and(&cpumask, &cpumask, cpu_online_mask);
	if (cpumask_empty(&cpumask))
		return;

	/*
	 * i'm not happy about this global shared spinlock in the
	 * MM hot path, but we'll see how contended it is.
	 * Temporarily this turns IRQs off, so that lockups are
	 * detected by the NMI watchdog.
	 */
	spin_lock(&tlbstate_lock);

	flush_mm = mm;
	flush_vma = vma;
	flush_va = va;
	mask=cpumask_bits(&cpumask);
	atomic_set_mask(*mask, (atomic_t *)&flush_cpumask);

	/*
	 * We have to send the IPI only to
	 * CPUs affected.
	 */
	send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);

	while (!cpumask_empty((cpumask_t*)&flush_cpumask)) {
		/* nothing. lockup detection does not belong here */
		mb();
	}

	flush_mm = NULL;
	flush_vma = NULL;
	flush_va = 0;
	spin_unlock(&tlbstate_lock);
}
Ejemplo n.º 8
0
void smp_send_timer_broadcast_ipi(void)
{
	cpumask_t mask;

	cpus_and(mask, cpu_online_map, timer_interrupt_broadcast_ipi_mask);
	if (!cpus_empty(mask)) {
		send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
	}
}
Ejemplo n.º 9
0
static void
send_ipi_interrupt(cpumask_t *mask, int vector)
{
# if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
	/***********************************************/
	/*   Theres  'flat' and theres 'cluster'. The  */
	/*   cluster  functions  handle  more  than 8  */
	/*   cpus. The flat does not - since the APIC  */
	/*   only has room for an 8-bit cpu mask.      */
	/***********************************************/
	static void (*send_IPI_mask)(cpumask_t, int);
	if (send_IPI_mask == NULL)
	        send_IPI_mask = get_proc_addr("cluster_send_IPI_mask");
	if (send_IPI_mask == NULL) dtrace_printf("HELP ON send_ipi_interrupt!\n"); else
	        send_IPI_mask(*mask, vector);
# elif LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 28)
	/***********************************************/
	/*   Issue with GPL/inlined function.	       */
	/***********************************************/
	{
	void send_IPI_mask_sequence(cpumask_t mask, int vector);
	static void (*send_IPI_mask_sequence_ptr)(cpumask_t, int);
	if (send_IPI_mask_sequence_ptr == NULL)
		send_IPI_mask_sequence_ptr = get_proc_addr("send_IPI_mask_sequence");
	send_IPI_mask_sequence_ptr(*mask, vector);
	}
# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)
	send_IPI_mask(*mask, vector);
# else
	if (x_apic == NULL) {
		static void (*flat_send_IPI_mask)(cpumask_t *, int);
		if (flat_send_IPI_mask == NULL) 
			flat_send_IPI_mask = get_proc_addr("flat_send_IPI_mask");

		if (flat_send_IPI_mask)  {
			flat_send_IPI_mask(mask, vector);
			return;
		}
		dtrace_linux_panic("x_apic is null - giving up\n");
		return;
	}
	x_apic->send_IPI_mask(mask, vector);
# endif
}
Ejemplo n.º 10
0
void smp_send_call_function_mask(const cpumask_t *mask)
{
    send_IPI_mask(mask, CALL_FUNCTION_VECTOR);

    if ( cpumask_test_cpu(smp_processor_id(), mask) )
    {
        local_irq_disable();
        smp_call_function_interrupt();
        local_irq_enable();
    }
}
Ejemplo n.º 11
0
/**
 * smp_call_function_mask(): Run a function on a set of other CPUs.
 * @mask: The set of cpus to run on.  Must not include the current cpu.
 * @func: The function to run. This must be fast and non-blocking.
 * @info: An arbitrary pointer to pass to the function.
 * @wait: If true, wait (atomically) until function has completed on other CPUs.
 *
  * Returns 0 on success, else a negative status code.
 *
 * If @wait is true, then returns once @func has returned; otherwise
 * it returns just before the target cpu calls @func.
 *
 * You must not call this function with disabled interrupts or from a
 * hardware interrupt handler or from a bottom half handler.
 */
static int
native_smp_call_function_mask(cpumask_t mask,
                              void (*func)(void *), void *info,
                              int wait)
{
    struct call_data_struct data;
    cpumask_t allbutself;
    int cpus;

    /* Can deadlock when called with interrupts disabled */
    WARN_ON(irqs_disabled());

    /* Holding any lock stops cpus from going down. */
    spin_lock(&call_lock);

    allbutself = cpu_online_map;
    cpu_clear(smp_processor_id(), allbutself);

    cpus_and(mask, mask, allbutself);
    cpus = cpus_weight(mask);

    if (!cpus) {
        spin_unlock(&call_lock);
        return 0;
    }

    data.func = func;
    data.info = info;
    atomic_set(&data.started, 0);
    data.wait = wait;
    if (wait)
        atomic_set(&data.finished, 0);

    call_data = &data;
    mb();

    /* Send a message to other CPUs */
    if (cpus_equal(mask, allbutself))
        send_IPI_allbutself(CALL_FUNCTION_VECTOR);
    else
        send_IPI_mask(mask, CALL_FUNCTION_VECTOR);

    /* Wait for response */
    while (atomic_read(&data.started) != cpus)
        cpu_relax();

    if (wait)
        while (atomic_read(&data.finished) != cpus)
            cpu_relax();
    spin_unlock(&call_lock);

    return 0;
}
Ejemplo n.º 12
0
void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
                             unsigned long va)
{
    cpumask_t cpumask = *cpumaskp;

    /*
     * A couple of (to be removed) sanity checks:
     *
     * - current CPU must not be in mask
     * - mask must exist :)
     */
    BUG_ON(cpus_empty(cpumask));
    BUG_ON(cpu_isset(smp_processor_id(), cpumask));
    BUG_ON(!mm);

#ifdef CONFIG_HOTPLUG_CPU
    /* If a CPU which we ran on has gone down, OK. */
    cpus_and(cpumask, cpumask, cpu_online_map);
    if (unlikely(cpus_empty(cpumask)))
        return;
#endif

    /*
     * i'm not happy about this global shared spinlock in the
     * MM hot path, but we'll see how contended it is.
     * AK: x86-64 has a faster method that could be ported.
     */
    spin_lock(&tlbstate_lock);

    flush_mm = mm;
    flush_va = va;
    cpus_or(flush_cpumask, cpumask, flush_cpumask);

    /*
     * Make the above memory operations globally visible before
     * sending the IPI.
     */
    smp_mb();
    /*
     * We have to send the IPI only to
     * CPUs affected.
     */
    send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR);

    while (!cpus_empty(flush_cpumask))
        /* nothing. lockup detection does not belong here */
        cpu_relax();

    flush_mm = NULL;
    flush_va = 0;
    spin_unlock(&tlbstate_lock);
}
Ejemplo n.º 13
0
static inline void send_IPI_all(int vector)
{
	if (clustered_apic_mode) {
		// Pointless. Use send_IPI_mask to do this instead
		int cpu;

		for (cpu = 0; cpu < smp_num_cpus; ++cpu) {
			send_IPI_mask(1 << cpu, vector);
		}
	} else {
		__send_IPI_shortcut(APIC_DEST_ALLINC, vector);
	}
}
Ejemplo n.º 14
0
static void __smp_ipi_test_interrupt(void)
{
    cpumask_t mask;
    if (smp_processor_id() == test_cpu_x) {
	if (nr_trips == INITIAL_DISCARD) {
	    start_time = NOW();
	    send_ipi_time = 0;
	}
	if (nr_trips == NR_TRIPS + INITIAL_DISCARD) {
	    finish_time = NOW();
	    tasklet_schedule(&ipi_test_tasklet);
	    return;
	}
	nr_trips++;
	mask = cpumask_of_cpu(test_cpu_y);
	send_ipi_time -= NOW();
	send_IPI_mask(&mask, IPI_TEST_VECTOR);
	send_ipi_time += NOW();
    } else {
	mask = cpumask_of_cpu(test_cpu_x);
	send_IPI_mask(&mask, IPI_TEST_VECTOR);
    }
}
Ejemplo n.º 15
0
static void
send_ipi_interrupt(cpumask_t *mask, int vector)
{
# if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
	/***********************************************/
	/*   Theres  'flat' and theres 'cluster'. The  */
	/*   cluster  functions  handle  more  than 8  */
	/*   cpus. The flat does not - since the APIC  */
	/*   only has room for an 8-bit cpu mask.      */
	/***********************************************/
	static void (*send_IPI_mask)(cpumask_t, int);
	if (send_IPI_mask == NULL)
	        send_IPI_mask = get_proc_addr("cluster_send_IPI_mask");
	if (send_IPI_mask == NULL) dtrace_printf("HELP ON send_ipi_interrupt!\n"); else
	        send_IPI_mask(*mask, vector);
# elif LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 28)
	send_IPI_mask_sequence(*mask, vector);
# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)
	send_IPI_mask(*mask, vector);
# else
	x_apic->send_IPI_mask(mask, vector);
# endif
}
Ejemplo n.º 16
0
static void run_ipi_test_tasklet(unsigned long ignore)
{
    cpumask_t mask;

    BUG_ON(!local_irq_is_enabled());

    if (!done_initialisation) {
	printk("Running initialisation; x2 apic enabled %d\n", x2apic_enabled);
	set_intr_gate(IPI_TEST_VECTOR, ipi_test_interrupt);
	test_cpu_x = 0;
	test_cpu_y = 1;
	done_initialisation = 1;
    } else {
	unsigned long time_taken = finish_time - start_time;
	printk("CPUs %d -> %d took %ld nanoseconds to perform %ld round trips; RTT %ldns\n",
	       test_cpu_x, test_cpu_y,
	       time_taken, nr_trips - INITIAL_DISCARD,
	       time_taken / (nr_trips - INITIAL_DISCARD));
	printk("%d -> %d send IPI time %ld nanoseconds (%ld each)\n",
	       test_cpu_x, test_cpu_y,
	       send_ipi_time,
	       send_ipi_time / (nr_trips - INITIAL_DISCARD));
	nr_trips = 0;
	test_cpu_y = next_cpu(test_cpu_y, cpu_online_map);
	if (test_cpu_y == test_cpu_x)
	    test_cpu_y = next_cpu(test_cpu_y, cpu_online_map);
	if (test_cpu_y == NR_CPUS) {
	    test_cpu_x = next_cpu(test_cpu_x, cpu_online_map);
	    if (test_cpu_x == NR_CPUS) {
		printk("Finished test\n");
		machine_restart(0);
	    }
	    test_cpu_y = 0;
	}
    }

    BUG_ON(test_cpu_x == test_cpu_y);

    if (test_cpu_x == smp_processor_id()) {
	local_irq_disable();
	__smp_ipi_test_interrupt();
	local_irq_enable();
    } else {
	mask = cpumask_of_cpu(test_cpu_x);
	send_IPI_mask(&mask, IPI_TEST_VECTOR);
    }
}
Ejemplo n.º 17
0
void smp_send_timer_broadcast_ipi(struct pt_regs *regs)
{
	cpumask_t mask;

	cpus_and(mask, cpu_online_map, timer_bcast_ipi);
	if (!cpus_empty(mask)) {
#ifdef CONFIG_SMP
		send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
#else
		/*
		 * We can directly call the apic timer interrupt handler
		 * in UP case. Minus all irq related functions
		 */
		up_apic_timer_interrupt_call(regs);
#endif
	}
}
Ejemplo n.º 18
0
void smp_send_timer_broadcast_ipi(void)
{
	int cpu = smp_processor_id();
	cpumask_t mask;

	cpus_and(mask, cpu_online_map, timer_interrupt_broadcast_ipi_mask);

	if (cpu_isset(cpu, mask)) {
		cpu_clear(cpu, mask);
		add_pda(apic_timer_irqs, 1);
		smp_local_timer_interrupt();
	}

	if (!cpus_empty(mask)) {
		send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
	}
}
Ejemplo n.º 19
0
/*==========================================================================*
 * Name:         smp_flush_cache_all
 *
 * Description:  This routine sends a 'INVALIDATE_CACHE_IPI' to all other
 *               CPUs in the system.
 *
 * Born on Date: 2003-05-28
 *
 * Arguments:    NONE
 *
 * Returns:      void (cannot fail)
 *
 * Modification log:
 * Date       Who Description
 * ---------- --- --------------------------------------------------------
 *
 *==========================================================================*/
void smp_flush_cache_all(void)
{
	cpumask_t cpumask;
	unsigned long *mask;

	preempt_disable();
	cpumask_copy(&cpumask, cpu_online_mask);
	cpumask_clear_cpu(smp_processor_id(), &cpumask);
	spin_lock(&flushcache_lock);
	mask=cpumask_bits(&cpumask);
	atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
	send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
	_flush_cache_copyback_all();
	while (flushcache_cpumask)
		mb();
	spin_unlock(&flushcache_lock);
	preempt_enable();
}
Ejemplo n.º 20
0
Archivo: tlb_64.c Proyecto: E-LLP/n900
void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
			     unsigned long va)
{
	int sender;
	union smp_flush_state *f;
	cpumask_t cpumask = *cpumaskp;

	if (is_uv_system() && uv_flush_tlb_others(&cpumask, mm, va))
		return;

	/* Caller has disabled preemption */
	sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
	f = &per_cpu(flush_state, sender);

	/*
	 * Could avoid this lock when
	 * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
	 * probably not worth checking this for a cache-hot lock.
	 */
	spin_lock(&f->tlbstate_lock);

	f->flush_mm = mm;
	f->flush_va = va;
	cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);

	/*
	 * Make the above memory operations globally visible before
	 * sending the IPI.
	 */
	smp_mb();
	/*
	 * We have to send the IPI only to
	 * CPUs affected.
	 */
	send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender);

	while (!cpus_empty(f->flush_cpumask))
		cpu_relax();

	f->flush_mm = NULL;
	f->flush_va = 0;
	spin_unlock(&f->tlbstate_lock);
}
Ejemplo n.º 21
0
static void flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
						unsigned long va)
{
	/*
	 * A couple of (to be removed) sanity checks:
	 *
	 * - we do not send IPIs to not-yet booted CPUs.
	 * - current CPU must not be in mask
	 * - mask must exist :)
	 */
	if (!cpumask)
		BUG();
	if ((cpumask & cpu_online_map) != cpumask)
		BUG();
	if (cpumask & (1 << smp_processor_id()))
		BUG();
	if (!mm)
		BUG();

	/*
	 * i'm not happy about this global shared spinlock in the
	 * MM hot path, but we'll see how contended it is.
	 * Temporarily this turns IRQs off, so that lockups are
	 * detected by the NMI watchdog.
	 */
	spin_lock(&tlbstate_lock);
	
	flush_mm = mm;
	flush_va = va;
	atomic_set_mask(cpumask, &flush_cpumask);
	/*
	 * We have to send the IPI only to
	 * CPUs affected.
	 */
	send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);

	while (flush_cpumask)
		/* nothing. lockup detection does not belong here */;

	flush_mm = NULL;
	flush_va = 0;
	spin_unlock(&tlbstate_lock);
}
Ejemplo n.º 22
0
void flush_area_mask(const cpumask_t *mask, const void *va, unsigned int flags)
{
    ASSERT(local_irq_is_enabled());

    if ( cpumask_test_cpu(smp_processor_id(), mask) )
        flush_area_local(va, flags);

    if ( !cpumask_subset(mask, cpumask_of(smp_processor_id())) )
    {
        spin_lock(&flush_lock);
        cpumask_and(&flush_cpumask, mask, &cpu_online_map);
        cpumask_clear_cpu(smp_processor_id(), &flush_cpumask);
        flush_va      = va;
        flush_flags   = flags;
        send_IPI_mask(&flush_cpumask, INVALIDATE_TLB_VECTOR);
        while ( !cpumask_empty(&flush_cpumask) )
            cpu_relax();
        spin_unlock(&flush_lock);
    }
}
Ejemplo n.º 23
0
int fastcall __ipipe_send_ipi(unsigned ipi, cpumask_t cpumask)

{
	unsigned long flags;
	ipipe_declare_cpuid;
	int self;

	ipipe_lock_cpu(flags);

	self = cpu_isset(cpuid,cpumask);
	cpu_clear(cpuid,cpumask);

	if (!cpus_empty(cpumask))
		send_IPI_mask(cpumask,ipi + FIRST_EXTERNAL_VECTOR);

	if (self)
		ipipe_trigger_irq(ipi);

	ipipe_unlock_cpu(flags);

	return 0;
}
Ejemplo n.º 24
0
void smp_send_state_dump(unsigned int cpu)
{
    /* We overload the spurious interrupt handler to handle the dump. */
    per_cpu(state_dump_pending, cpu) = 1;
    send_IPI_mask(cpumask_of(cpu), SPURIOUS_APIC_VECTOR);
}
Ejemplo n.º 25
0
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
	send_IPI_mask(mask, IPI_CALL_FUNC);
}
Ejemplo n.º 26
0
/*
 * this function sends a 'reschedule' IPI to another CPU.
 * it goes straight through and wastes no time serializing
 * anything. Worst case is that we lose a reschedule ...
 */
void smp_send_reschedule(int cpu)
{
    WARN_ON(cpu_is_offline(cpu));
    send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
}
Ejemplo n.º 27
0
void arch_send_call_function_single_ipi(int cpu)
{
	send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI, 0);
}
Ejemplo n.º 28
0
/*
 * this function sends a 'reschedule' IPI to another CPU.
 * it goes straight through and wastes no time serializing
 * anything. Worst case is that we lose a reschedule ...
 */
void smp_send_reschedule(int cpu)
{
	send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
}
Ejemplo n.º 29
0
void smp_send_reschedule(int cpu)
{
	send_IPI_mask(1 << cpu, RESCHEDULE_VECTOR);
}
Ejemplo n.º 30
0
/*
 * Local APIC timer broadcast function
 */
static void lapic_timer_broadcast(cpumask_t mask)
{
#ifdef CONFIG_SMP
	send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
#endif
}