Ejemplo n.º 1
0
void _write_lock_irq(rwlock_t *lock)
{
    ASSERT(local_irq_is_enabled());
    local_irq_disable();
    check_lock(&lock->debug);
    _raw_write_lock(&lock->raw);
}
Ejemplo n.º 2
0
static void check_lock(struct lock_debug *debug)
{
    int irq_safe = !local_irq_is_enabled();

    if ( unlikely(atomic_read(&spin_debug) <= 0) )
        return;

    /* A few places take liberties with this. */
    /* BUG_ON(in_irq() && !irq_safe); */

    /*
     * We partition locks into IRQ-safe (always held with IRQs disabled) and
     * IRQ-unsafe (always held with IRQs enabled) types. The convention for
     * every lock must be consistently observed else we can deadlock in
     * IRQ-context rendezvous functions (a rendezvous which gets every CPU
     * into IRQ context before any CPU is released from the rendezvous).
     * 
     * If we can mix IRQ-disabled and IRQ-enabled callers, the following can
     * happen:
     *  * Lock is held by CPU A, with IRQs enabled
     *  * CPU B is spinning on same lock, with IRQs disabled
     *  * Rendezvous starts -- CPU A takes interrupt and enters rendezbous spin
     *  * DEADLOCK -- CPU B will never enter rendezvous, CPU A will never exit
     *                the rendezvous, and will hence never release the lock.
     * 
     * To guard against this subtle bug we latch the IRQ safety of every
     * spinlock in the system, on first use.
     */
    if ( unlikely(debug->irq_safe != irq_safe) )
    {
        int seen = cmpxchg(&debug->irq_safe, -1, irq_safe);
        BUG_ON(seen == !irq_safe);
    }
}
Ejemplo n.º 3
0
void update_last_cx_stat(struct acpi_processor_power *power,
                         struct acpi_processor_cx *cx, uint64_t ticks)
{
    ASSERT(!local_irq_is_enabled());

    spin_lock(&power->stat_lock);
    power->last_state = cx;
    power->last_state_update_tick = ticks;
    spin_unlock(&power->stat_lock);
}
Ejemplo n.º 4
0
int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu)
{
    cpumask_t allbutself;
    unsigned int i, nr_cpus;
    int ret;

    BUG_ON(!local_irq_is_enabled());

    allbutself = cpu_online_map;
    cpu_clear(smp_processor_id(), allbutself);
    nr_cpus = cpus_weight(allbutself);

    if ( nr_cpus == 0 )
    {
        BUG_ON(cpu != smp_processor_id());
        return (*fn)(data);
    }

    /* Note: We shouldn't spin on lock when it's held by others since others
     * is expecting this cpus to enter softirq context. Or else deadlock
     * is caused.
     */
    if ( !spin_trylock(&stopmachine_lock) )
        return -EBUSY;

    stopmachine_data.fn = fn;
    stopmachine_data.fn_data = data;
    stopmachine_data.nr_cpus = nr_cpus;
    stopmachine_data.fn_cpu = cpu;
    atomic_set(&stopmachine_data.done, 0);
    stopmachine_data.state = STOPMACHINE_START;

    smp_wmb();

    for_each_cpu_mask ( i, allbutself )
        cpu_raise_softirq(i, STOPMACHINE_SOFTIRQ);

    stopmachine_set_state(STOPMACHINE_PREPARE);

    local_irq_disable();
    stopmachine_set_state(STOPMACHINE_DISABLE_IRQ);

    if ( cpu == smp_processor_id() )
        stopmachine_data.fn_result = (*fn)(data);
    stopmachine_set_state(STOPMACHINE_INVOKE);
    ret = stopmachine_data.fn_result;

    stopmachine_set_state(STOPMACHINE_EXIT);
    local_irq_enable();

    spin_unlock(&stopmachine_lock);

    return ret;
}
Ejemplo n.º 5
0
void _spin_lock_irq(spinlock_t *lock)
{
    ASSERT(local_irq_is_enabled());
    local_irq_disable();
    check_lock(&lock->debug);
    while ( unlikely(!_raw_spin_trylock(&lock->raw)) )
    {
        local_irq_enable();
        while ( likely(_raw_spin_is_locked(&lock->raw)) )
            cpu_relax();
        local_irq_disable();
    }
}
Ejemplo n.º 6
0
void hvm_assert_evtchn_irq(struct vcpu *v)
{
    if ( unlikely(in_irq() || !local_irq_is_enabled()) )
    {
        tasklet_schedule(&v->arch.hvm_vcpu.assert_evtchn_irq_tasklet);
        return;
    }

    if ( is_hvm_pv_evtchn_vcpu(v) )
        vcpu_kick(v);
    else if ( v->vcpu_id == 0 )
        hvm_set_callback_irq_level(v);
}
Ejemplo n.º 7
0
static void check_lock(struct lock_debug *debug)
{
    int irq_safe = !local_irq_is_enabled();

    if ( unlikely(atomic_read(&spin_debug) <= 0) )
        return;

    /* A few places take liberties with this. */
    /* BUG_ON(in_irq() && !irq_safe); */

    if ( unlikely(debug->irq_safe != irq_safe) )
    {
        int seen = cmpxchg(&debug->irq_safe, -1, irq_safe);
        BUG_ON(seen == !irq_safe);
    }
}
Ejemplo n.º 8
0
static void run_ipi_test_tasklet(unsigned long ignore)
{
    cpumask_t mask;

    BUG_ON(!local_irq_is_enabled());

    if (!done_initialisation) {
	printk("Running initialisation; x2 apic enabled %d\n", x2apic_enabled);
	set_intr_gate(IPI_TEST_VECTOR, ipi_test_interrupt);
	test_cpu_x = 0;
	test_cpu_y = 1;
	done_initialisation = 1;
    } else {
	unsigned long time_taken = finish_time - start_time;
	printk("CPUs %d -> %d took %ld nanoseconds to perform %ld round trips; RTT %ldns\n",
	       test_cpu_x, test_cpu_y,
	       time_taken, nr_trips - INITIAL_DISCARD,
	       time_taken / (nr_trips - INITIAL_DISCARD));
	printk("%d -> %d send IPI time %ld nanoseconds (%ld each)\n",
	       test_cpu_x, test_cpu_y,
	       send_ipi_time,
	       send_ipi_time / (nr_trips - INITIAL_DISCARD));
	nr_trips = 0;
	test_cpu_y = next_cpu(test_cpu_y, cpu_online_map);
	if (test_cpu_y == test_cpu_x)
	    test_cpu_y = next_cpu(test_cpu_y, cpu_online_map);
	if (test_cpu_y == NR_CPUS) {
	    test_cpu_x = next_cpu(test_cpu_x, cpu_online_map);
	    if (test_cpu_x == NR_CPUS) {
		printk("Finished test\n");
		machine_restart(0);
	    }
	    test_cpu_y = 0;
	}
    }

    BUG_ON(test_cpu_x == test_cpu_y);

    if (test_cpu_x == smp_processor_id()) {
	local_irq_disable();
	__smp_ipi_test_interrupt();
	local_irq_enable();
    } else {
	mask = cpumask_of_cpu(test_cpu_x);
	send_IPI_mask(&mask, IPI_TEST_VECTOR);
    }
}
Ejemplo n.º 9
0
void context_switch(struct vcpu *prev, struct vcpu *next)
{
    ASSERT(local_irq_is_enabled());
    ASSERT(prev != next);
    ASSERT(cpumask_empty(next->vcpu_dirty_cpumask));

    if ( prev != next )
        update_runstate_area(prev);

    local_irq_disable();

    set_current(next);

    prev = __context_switch(prev, next);

    schedule_tail(prev);
}
Ejemplo n.º 10
0
void __stop_this_cpu(void)
{
    ASSERT(!local_irq_is_enabled());

    disable_local_APIC();

    hvm_cpu_down();

    /*
     * Clear FPU, zapping any pending exceptions. Needed for warm reset with
     * some BIOSes.
     */
    clts();
    asm volatile ( "fninit" );

    cpumask_clear_cpu(smp_processor_id(), &cpu_online_map);
}
Ejemplo n.º 11
0
static void check_barrier(struct lock_debug *debug)
{
    if ( unlikely(atomic_read(&spin_debug) <= 0) )
        return;

    /*
     * For a barrier, we have a relaxed IRQ-safety-consistency check.
     * 
     * It is always safe to spin at the barrier with IRQs enabled -- that does
     * not prevent us from entering an IRQ-context rendezvous, and nor are
     * we preventing anyone else from doing so (since we do not actually
     * acquire the lock during a barrier operation).
     * 
     * However, if we spin on an IRQ-unsafe lock with IRQs disabled then that
     * is clearly wrong, for the same reason outlined in check_lock() above.
     */
    BUG_ON(!local_irq_is_enabled() && (debug->irq_safe == 0));
}
Ejemplo n.º 12
0
void flush_area_mask(const cpumask_t *mask, const void *va, unsigned int flags)
{
    ASSERT(local_irq_is_enabled());

    if ( cpumask_test_cpu(smp_processor_id(), mask) )
        flush_area_local(va, flags);

    if ( !cpumask_subset(mask, cpumask_of(smp_processor_id())) )
    {
        spin_lock(&flush_lock);
        cpumask_and(&flush_cpumask, mask, &cpu_online_map);
        cpumask_clear_cpu(smp_processor_id(), &flush_cpumask);
        flush_va      = va;
        flush_flags   = flags;
        send_IPI_mask(&flush_cpumask, INVALIDATE_TLB_VECTOR);
        while ( !cpumask_empty(&flush_cpumask) )
            cpu_relax();
        spin_unlock(&flush_lock);
    }
}
Ejemplo n.º 13
0
static void __vmx_clear_vmcs(void *info)
{
    struct vcpu *v = info;
    struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;

    /* Otherwise we can nest (vmx_cpu_down() vs. vmx_clear_vmcs()). */
    ASSERT(!local_irq_is_enabled());

    if ( arch_vmx->active_cpu == smp_processor_id() )
    {
        __vmpclear(virt_to_maddr(arch_vmx->vmcs));

        arch_vmx->active_cpu = -1;
        arch_vmx->launched   = 0;

        list_del(&arch_vmx->active_list);

        if ( arch_vmx->vmcs == this_cpu(current_vmcs) )
            this_cpu(current_vmcs) = NULL;
    }
}
Ejemplo n.º 14
0
/*
 * Replace instructions with better alternatives for this CPU type.
 * This runs before SMP is initialized to avoid SMP problems with
 * self modifying code. This implies that asymmetric systems where
 * APs have less capabilities than the boot processor are not handled.
 * Tough. Make sure you disable such features by hand.
 */
static void __init apply_alternatives(struct alt_instr *start, struct alt_instr *end)
{
    struct alt_instr *a;
    u8 *instr, *replacement;
    u8 insnbuf[MAX_PATCH_LEN];

    ASSERT(!local_irq_is_enabled());

    printk(KERN_INFO "alt table %p -> %p\n", start, end);

    /*
     * The scan order should be from start to end. A later scanned
     * alternative code can overwrite a previous scanned alternative code.
     * Some kernel functions (e.g. memcpy, memset, etc) use this order to
     * patch code.
     *
     * So be careful if you want to change the scan order to any other
     * order.
     */
    for ( a = start; a < end; a++ )
    {
        instr = (u8 *)&a->instr_offset + a->instr_offset;
        replacement = (u8 *)&a->repl_offset + a->repl_offset;
        BUG_ON(a->replacementlen > a->instrlen);
        BUG_ON(a->instrlen > sizeof(insnbuf));
        BUG_ON(a->cpuid >= NCAPINTS * 32);
        if ( !boot_cpu_has(a->cpuid) )
            continue;

        memcpy(insnbuf, replacement, a->replacementlen);

        /* 0xe8/0xe9 are relative branches; fix the offset. */
        if ( (*insnbuf & 0xfe) == 0xe8 && a->replacementlen == 5 )
            *(s32 *)(insnbuf + 1) += replacement - instr;

        add_nops(insnbuf + a->replacementlen,
                 a->instrlen - a->replacementlen);
        text_poke_early(instr, insnbuf, a->instrlen);
    }
}
Ejemplo n.º 15
0
void _spin_lock_irq(spinlock_t *lock)
{
    ASSERT(local_irq_is_enabled());
    local_irq_disable();
    _spin_lock(lock);
}
Ejemplo n.º 16
0
void ASSERT_NOT_IN_ATOMIC(void)
{
    ASSERT(!preempt_count());
    ASSERT(!in_irq());
    ASSERT(local_irq_is_enabled());
}
Ejemplo n.º 17
0
bool_t in_atomic(void)
{
    return preempt_count() || in_irq() || !local_irq_is_enabled();
}
Ejemplo n.º 18
0
void process_pending_softirqs(void)
{
    ASSERT(!in_irq() && local_irq_is_enabled());
    /* Do not enter scheduler as it can preempt the calling context. */
    __do_softirq(1ul<<SCHEDULE_SOFTIRQ);
}