コード例 #1
0
static inline void __lock_kernel(void)
{
	preempt_disable();
	if (unlikely(!_raw_spin_trylock(&kernel_flag))) {
		/*
		 * If preemption was disabled even before this
		 * was called, there's nothing we can be polite
		 * about - just spin.
		 */
		if (preempt_count() > 1) {
			_raw_spin_lock(&kernel_flag);
			return;
		}

		/*
		 * Otherwise, let's wait for the kernel lock
		 * with preemption enabled..
		 */
		do {
			preempt_enable();
			while (spin_is_locked(&kernel_flag))
				cpu_relax();
			preempt_disable();
		} while (!_raw_spin_trylock(&kernel_flag));
	}
}
コード例 #2
0
ファイル: spinlock.c プロジェクト: a2k2/xen-unstable
void _spin_lock(spinlock_t *lock)
{
    check_lock(&lock->debug);
    while ( unlikely(!_raw_spin_trylock(&lock->raw)) )
        while ( likely(_raw_spin_is_locked(&lock->raw)) )
            cpu_relax();
}
コード例 #3
0
ファイル: spinlock.c プロジェクト: QiuLihua83/linux-2.6.10
int __lockfunc _spin_trylock(spinlock_t *lock)
{
	preempt_disable();
	if (_raw_spin_trylock(lock))
		return 1;
	
	preempt_enable();
	return 0;
}
コード例 #4
0
/*
 * Acquire/release the underlying lock from the scheduler.
 *
 * This is called with preemption disabled, and should
 * return an error value if it cannot get the lock and
 * TIF_NEED_RESCHED gets set.
 *
 * If it successfully gets the lock, it should increment
 * the preemption count like any spinlock does.
 *
 * (This works on UP too - _raw_spin_trylock will never
 * return false in that case)
 */
int __lockfunc __reacquire_kernel_lock(void)
{
	while (!_raw_spin_trylock(&kernel_flag)) {
		if (need_resched())
			return -EAGAIN;
		cpu_relax();
	}
	preempt_disable();
	return 0;
}
コード例 #5
0
/*
 * Acquire/release the underlying lock from the scheduler.
 *
 * This is called with preemption disabled, and should
 * return an error value if it cannot get the lock and
 * TIF_NEED_RESCHED gets set.
 *
 * If it successfully gets the lock, it should increment
 * the preemption count like any spinlock does.
 *
 * (This works on UP too - _raw_spin_trylock will never
 * return false in that case)
 */
int __lockfunc __reacquire_kernel_lock(void)
{
	while (!_raw_spin_trylock(&kernel_flag)) {
		if (test_thread_flag(TIF_NEED_RESCHED))
			return -EAGAIN;
		cpu_relax();
	}
	preempt_disable();
	return 0;
}
コード例 #6
0
int __lockfunc _spin_trylock(spinlock_t *lock)
{
	preempt_disable();
	if (_raw_spin_trylock(lock)) {
		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
		return 1;
	}
	
	preempt_enable();
	return 0;
}
コード例 #7
0
ファイル: spinlock.c プロジェクト: OpenHMR/Open-HMR600
int __lockfunc _spin_trylock_bh(spinlock_t *lock)
{
	local_bh_disable();
	preempt_disable();
	if (_raw_spin_trylock(lock))
		return 1;

	preempt_enable_no_resched();
	local_bh_enable();
	return 0;
}
コード例 #8
0
int __lockfunc _spin_trylock_bh(spinlock_t *lock)
{
	local_bh_disable();
	preempt_disable();
	if (_raw_spin_trylock(lock)) {
		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
		return 1;
	}

	preempt_enable_no_resched();
	local_bh_enable_ip((unsigned long)__builtin_return_address(0));
	return 0;
}
コード例 #9
0
ファイル: spinlock.c プロジェクト: a2k2/xen-unstable
void _spin_lock_irq(spinlock_t *lock)
{
    ASSERT(local_irq_is_enabled());
    local_irq_disable();
    check_lock(&lock->debug);
    while ( unlikely(!_raw_spin_trylock(&lock->raw)) )
    {
        local_irq_enable();
        while ( likely(_raw_spin_is_locked(&lock->raw)) )
            cpu_relax();
        local_irq_disable();
    }
}
コード例 #10
0
ファイル: spinlock.c プロジェクト: a2k2/xen-unstable
unsigned long _spin_lock_irqsave(spinlock_t *lock)
{
    unsigned long flags;
    local_irq_save(flags);
    check_lock(&lock->debug);
    while ( unlikely(!_raw_spin_trylock(&lock->raw)) )
    {
        local_irq_restore(flags);
        while ( likely(_raw_spin_is_locked(&lock->raw)) )
            cpu_relax();
        local_irq_save(flags);
    }
    return flags;
}
コード例 #11
0
ファイル: spinlock.c プロジェクト: QiuLihua83/linux-2.6.10
/*
 * This could be a long-held lock.  If another CPU holds it for a long time,
 * and that CPU is not asked to reschedule then *this* CPU will spin on the
 * lock for a long time, even if *this* CPU is asked to reschedule.
 *
 * So what we do here, in the slow (contended) path is to spin on the lock by
 * hand while permitting preemption.
 *
 * Called inside preempt_disable().
 */
static inline void __preempt_spin_lock(spinlock_t *lock)
{
	if (preempt_count() > 1) {
		_raw_spin_lock(lock);
		return;
	}

	do {
		preempt_enable();
		while (spin_is_locked(lock))
			cpu_relax();
		preempt_disable();
	} while (!_raw_spin_trylock(lock));
}
コード例 #12
0
ファイル: spinlock.c プロジェクト: a2k2/xen-unstable
int _spin_trylock(spinlock_t *lock)
{
    check_lock(&lock->debug);
    return _raw_spin_trylock(&lock->raw);
}
コード例 #13
0
ファイル: spinlock.c プロジェクト: QiuLihua83/linux-2.6.10
void __lockfunc _spin_lock(spinlock_t *lock)
{
	preempt_disable();
	if (unlikely(!_raw_spin_trylock(lock)))
		__preempt_spin_lock(lock);
}