Exemplo n.º 1
0
int
_pthread_spin_lock(pthread_spinlock_t *lock)
{
	struct pthread_spinlock	*lck;
	struct pthread *self = _pthread_self();
	int count, oldval, ret;

	if (lock == NULL || (lck = *lock) == NULL)
		ret = EINVAL;
	else if (lck->s_owner == self)
		ret = EDEADLK;
	else {
		do {
			count = SPIN_COUNT;
			while (lck->s_lock) {
#ifdef __i386__
				/* tell cpu we are spinning */
				__asm __volatile("pause");
#endif
				if (--count <= 0) {
					count = SPIN_COUNT;
					_pthread_yield();
				}
			}
			atomic_swap_int(&(lck)->s_lock, 1, &oldval);
		} while (oldval);

		lck->s_owner = self;
		ret = 0;
	}

	return (ret);
}
Exemplo n.º 2
0
/*
 * IPIs are 'fast' interrupts, so we deal with them directly from our
 * signal handler.
 *
 * WARNING: Signals are not physically disabled here so we have to enter
 * our critical section before bumping gd_intr_nesting_level or another
 * interrupt can come along and get really confused.
 */
static
void
ipisig(int nada, siginfo_t *info, void *ctxp)
{
	globaldata_t gd = mycpu;
	thread_t td = gd->gd_curthread;

	if (td->td_critcount == 0) {
		++td->td_critcount;
		++gd->gd_intr_nesting_level;
		atomic_swap_int(&gd->gd_npoll, 0);
		lwkt_process_ipiq();
		--gd->gd_intr_nesting_level;
		--td->td_critcount;
	} else {
		need_ipiq();
	}
}
Exemplo n.º 3
0
int
_pthread_spin_unlock(pthread_spinlock_t *lock)
{
	struct pthread_spinlock	*lck;
	int ret;

	if (lock == NULL || (lck = *lock) == NULL)
		ret = EINVAL;
	else {
		if (lck->s_owner != _pthread_self())
			ret = EPERM;
		else {
			lck->s_owner = NULL;
			atomic_swap_int(&lck->s_lock, 0, &ret);
			ret = 0;
		}
	}

	return (ret);
}
Exemplo n.º 4
0
/*
 * IPIs are 'fast' interrupts, so we deal with them directly from our
 * signal handler.
 *
 * WARNING: Signals are not physically disabled here so we have to enter
 * our critical section before bumping gd_intr_nesting_level or another
 * interrupt can come along and get really confused.
 */
static
void
ipisig(int nada, siginfo_t *info, void *ctxp)
{
	globaldata_t gd = mycpu;
	thread_t td = gd->gd_curthread;
	int save;

	save = errno;
	if (td->td_critcount == 0) {
		crit_enter_raw(td);
		++gd->gd_cnt.v_ipi;
		++gd->gd_intr_nesting_level;
		atomic_swap_int(&gd->gd_npoll, 0);
		lwkt_process_ipiq();
		--gd->gd_intr_nesting_level;
		crit_exit_raw(td);
	} else {
		need_ipiq();
	}
	errno = save;
}
Exemplo n.º 5
0
static int
acpi_cst_set_lowest_oncpu(struct acpi_cst_softc *sc, int val)
{
    int old_lowest, error = 0, old_lowest_req;
    uint32_t old_type, type;

    KKASSERT(mycpuid == sc->cst_cpuid);

    old_lowest_req = sc->cst_cx_lowest_req;
    sc->cst_cx_lowest_req = val;

    if (val > sc->cst_cx_count - 1)
	val = sc->cst_cx_count - 1;
    old_lowest = atomic_swap_int(&sc->cst_cx_lowest, val);

    old_type = sc->cst_cx_states[old_lowest].type;
    type = sc->cst_cx_states[val].type;
    if (old_type >= ACPI_STATE_C3 && type < ACPI_STATE_C3) {
	cputimer_intr_powersave_remreq();
    } else if (type >= ACPI_STATE_C3 && old_type < ACPI_STATE_C3) {
	error = cputimer_intr_powersave_addreq();
	if (error) {
	    /* Restore */
	    sc->cst_cx_lowest_req = old_lowest_req;
	    sc->cst_cx_lowest = old_lowest;
	}
    }

    if (error)
	return error;

    /* Cache the new lowest non-C3 state. */
    acpi_cst_non_c3(sc);

    /* Reset the statistics counters. */
    bzero(sc->cst_cx_stats, sizeof(sc->cst_cx_stats));
    return (0);
}
Exemplo n.º 6
0
int
_pthread_spin_trylock(pthread_spinlock_t *lock)
{
	struct pthread_spinlock	*lck;
	struct pthread *self = _pthread_self();
	int oldval, ret;

	if (lock == NULL || (lck = *lock) == NULL)
		ret = EINVAL;
	else if (lck->s_owner == self)
		ret = EDEADLK;
	else if (lck->s_lock != 0)
		ret = EBUSY;
	else {
		atomic_swap_int(&(lck)->s_lock, 1, &oldval);
		if (oldval)
			ret = EBUSY;
		else {
			lck->s_owner = _pthread_self();
			ret = 0;
		}
	}
	return (ret);
}
Exemplo n.º 7
0
/*
 * Release a lock.
 */
void
_lock_release(struct lock *lck, struct lockuser *lu)
{
	struct lockuser *lu_tmp, *lu_h;
	struct lockreq *myreq;
	int prio_h;
	int lval;

	/**
	 * XXX - We probably want to remove these checks to optimize
	 *       performance.  It is also a bug if any one of the 
	 *       checks fail, so it's probably better to just let it
	 *       SEGV and fix it.
	 */
#if 0
	if ((lck == NULL) || (lu == NULL))
		return;
#endif
	if ((lck->l_type & LCK_PRIORITY) != 0) {
		prio_h = 0;
		lu_h = NULL;

		/* Update tail if our request is last. */
		if (lu->lu_watchreq->lr_owner == NULL) {
			atomic_store_rel_ptr((volatile uintptr_t *)
			    (void *)&lck->l_tail,
			    (uintptr_t)lu->lu_myreq);
			atomic_store_rel_ptr((volatile uintptr_t *)
			    (void *)&lu->lu_myreq->lr_owner,
			    (uintptr_t)NULL);
		} else {
			/* Remove ourselves from the list. */
			atomic_store_rel_ptr((volatile uintptr_t *)
			    (void *)&lu->lu_myreq->lr_owner,
			    (uintptr_t)lu->lu_watchreq->lr_owner);
			atomic_store_rel_ptr((volatile uintptr_t *)
			    (void *)&lu->lu_watchreq->lr_owner->lu_myreq,
			    (uintptr_t)lu->lu_myreq);
		}
		/*
		 * The watch request now becomes our own because we've
		 * traded away our previous request.  Save our previous
		 * request so that we can grant the lock.
		 */
		myreq = lu->lu_myreq;
		lu->lu_myreq = lu->lu_watchreq;
		lu->lu_watchreq = NULL;
		lu->lu_myreq->lr_locked = 1;
		lu->lu_myreq->lr_owner = lu;
		lu->lu_myreq->lr_watcher = NULL;
		/*
		 * Traverse the list of lock requests in reverse order
		 * looking for the user with the highest priority.
		 */
		for (lu_tmp = lck->l_tail->lr_watcher; lu_tmp != NULL;
		     lu_tmp = lu_tmp->lu_myreq->lr_watcher) {
			if (lu_tmp->lu_priority > prio_h) {
				lu_h = lu_tmp;
				prio_h = lu_tmp->lu_priority;
			}
		}
		if (lu_h != NULL) {
			/* Give the lock to the highest priority user. */
			if (lck->l_wakeup != NULL) {
				atomic_swap_int(
				    &lu_h->lu_watchreq->lr_locked,
				    0, &lval);
				if (lval == 2)
					/* Notify the sleeper */
					lck->l_wakeup(lck,
					    lu_h->lu_myreq->lr_watcher);
			}
			else
				atomic_store_rel_int(
				    &lu_h->lu_watchreq->lr_locked, 0);
		} else {
			if (lck->l_wakeup != NULL) {
				atomic_swap_int(&myreq->lr_locked,
				    0, &lval);
				if (lval == 2)
					/* Notify the sleeper */
					lck->l_wakeup(lck, myreq->lr_watcher);
			}
			else
				/* Give the lock to the previous request. */
				atomic_store_rel_int(&myreq->lr_locked, 0);
		}
	} else {
		/*
		 * The watch request now becomes our own because we've
		 * traded away our previous request.  Save our previous
		 * request so that we can grant the lock.
		 */
		myreq = lu->lu_myreq;
		lu->lu_myreq = lu->lu_watchreq;
		lu->lu_watchreq = NULL;
		lu->lu_myreq->lr_locked = 1;
		if (lck->l_wakeup) {
			atomic_swap_int(&myreq->lr_locked, 0, &lval);
			if (lval == 2)
				/* Notify the sleeper */
				lck->l_wakeup(lck, myreq->lr_watcher);
		}
		else
			/* Give the lock to the previous request. */
			atomic_store_rel_int(&myreq->lr_locked, 0);
	}
	lu->lu_myreq->lr_active = 0;
}
Exemplo n.º 8
0
/*
 * Acquire a lock waiting (spin or sleep) for it to become available.
 */
void
_lock_acquire(struct lock *lck, struct lockuser *lu, int prio)
{
	int i;
	int lval;

	/**
	 * XXX - We probably want to remove these checks to optimize
	 *       performance.  It is also a bug if any one of the 
	 *       checks fail, so it's probably better to just let it
	 *       SEGV and fix it.
	 */
#if 0
	if (lck == NULL || lu == NULL || lck->l_head == NULL)
		return;
#endif
	if ((lck->l_type & LCK_PRIORITY) != 0) {
		LCK_ASSERT(lu->lu_myreq->lr_locked == 1);
		LCK_ASSERT(lu->lu_myreq->lr_watcher == NULL);
		LCK_ASSERT(lu->lu_myreq->lr_owner == lu);
		LCK_ASSERT(lu->lu_watchreq == NULL);

		lu->lu_priority = prio;
	}
	/*
	 * Atomically swap the head of the lock request with
	 * this request.
	 */
	atomic_swap_ptr((void *)&lck->l_head, lu->lu_myreq,
	    (void *)&lu->lu_watchreq);

	if (lu->lu_watchreq->lr_locked != 0) {
		atomic_store_rel_ptr
		    ((volatile uintptr_t *)(void *)&lu->lu_watchreq->lr_watcher,
		    (uintptr_t)lu);
		if ((lck->l_wait == NULL) ||
		    ((lck->l_type & LCK_ADAPTIVE) == 0)) {
			while (lu->lu_watchreq->lr_locked != 0)
				;	/* spin, then yield? */
		} else {
			/*
			 * Spin for a bit before invoking the wait function.
			 *
			 * We should be a little smarter here.  If we're
			 * running on a single processor, then the lock
			 * owner got preempted and spinning will accomplish
			 * nothing but waste time.  If we're running on
			 * multiple processors, the owner could be running
			 * on another CPU and we might acquire the lock if
			 * we spin for a bit.
			 *
			 * The other thing to keep in mind is that threads
			 * acquiring these locks are considered to be in
			 * critical regions; they will not be preempted by
			 * the _UTS_ until they release the lock.  It is
			 * therefore safe to assume that if a lock can't
			 * be acquired, it is currently held by a thread
			 * running in another KSE.
			 */
			for (i = 0; i < MAX_SPINS; i++) {
				if (lu->lu_watchreq->lr_locked == 0)
					return;
				if (lu->lu_watchreq->lr_active == 0)
					break;
			}
			atomic_swap_int(&lu->lu_watchreq->lr_locked,
			    2, &lval);
			if (lval == 0)
				lu->lu_watchreq->lr_locked = 0;
			else
				lck->l_wait(lck, lu);

		}
	}
	lu->lu_myreq->lr_active = 1;
}
Exemplo n.º 9
0
static int
acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc, int val)
{
    int i, old_lowest, error = 0;
    uint32_t old_type, type;

    get_mplock();

    old_lowest = atomic_swap_int(&sc->cpu_cx_lowest, val);

    old_type = sc->cpu_cx_states[old_lowest].type;
    type = sc->cpu_cx_states[val].type;
    if (old_type == ACPI_STATE_C3 && type != ACPI_STATE_C3) {
	KKASSERT(cpu_c3_ncpus > 0);
	if (atomic_fetchadd_int(&cpu_c3_ncpus, -1) == 1) {
	    /*
	     * All of the CPUs exit C3 state, use a better
	     * one shot timer.
	     */
	    error = cputimer_intr_select_caps(CPUTIMER_INTR_CAP_NONE);
	    KKASSERT(!error);
	    cputimer_intr_restart();
    	}
    } else if (type == ACPI_STATE_C3 && old_type != ACPI_STATE_C3) {
	if (atomic_fetchadd_int(&cpu_c3_ncpus, 1) == 0) {
	    /*
	     * When the first CPU enters C3 state, switch
	     * to an one shot timer, which could handle
	     * C3 state, i.e. the timer will not hang.
	     */
	    error = cputimer_intr_select_caps(CPUTIMER_INTR_CAP_PS);
	    if (!error) {
		cputimer_intr_restart();
	    } else {
		kprintf("no suitable intr cputimer found\n");

		/* Restore */
		sc->cpu_cx_lowest = old_lowest;
		atomic_fetchadd_int(&cpu_c3_ncpus, -1);
	    }
	}
    }

    rel_mplock();

    if (error)
	return error;

    /* If not disabling, cache the new lowest non-C3 state. */
    sc->cpu_non_c3 = 0;
    for (i = sc->cpu_cx_lowest; i >= 0; i--) {
	if (sc->cpu_cx_states[i].type < ACPI_STATE_C3) {
	    sc->cpu_non_c3 = i;
	    break;
	}
    }

    /* Reset the statistics counters. */
    bzero(sc->cpu_cx_stats, sizeof(sc->cpu_cx_stats));
    return (0);
}