Beispiel #1
0
void
_thread_lock(struct thread *td)
#endif
{
	struct mtx *m;
	uintptr_t tid, v;

	tid = (uintptr_t)curthread;

	spinlock_enter();
	m = td->td_lock;
	thread_lock_validate(m, 0, file, line);
	v = MTX_READ_VALUE(m);
	if (__predict_true(v == MTX_UNOWNED)) {
		if (__predict_false(!_mtx_obtain_lock(m, tid)))
			goto slowpath_unlocked;
	} else if (v == tid) {
		m->mtx_recurse++;
	} else
		goto slowpath_unlocked;
	if (__predict_true(m == td->td_lock)) {
		WITNESS_LOCK(&m->lock_object, LOP_EXCLUSIVE, file, line);
		return;
	}
	if (m->mtx_recurse != 0)
		m->mtx_recurse--;
	else
		_mtx_release_lock_quick(m);
slowpath_unlocked:
	spinlock_exit();
	thread_lock_flags_(td, 0, 0, 0);
}
/*
 * uvm_emap_switch: if the CPU is 'behind' the LWP in emap visibility,
 * perform TLB flush and thus update the local view.  Main purpose is
 * to handle kernel preemption, while emap is in use.
 *
 * => called from mi_switch(), when LWP returns after block or preempt.
 */
void
uvm_emap_switch(lwp_t *l)
{
	struct uvm_cpu *ucpu;
	u_int curgen, gen;

	KASSERT(kpreempt_disabled());

	/* If LWP did not use emap, then nothing to do. */
	if (__predict_true(l->l_emap_gen == UVM_EMAP_INACTIVE)) {
		return;
	}

	/*
	 * No need to synchronise if generation number of current CPU is
	 * newer than the number of this LWP.
	 *
	 * This test assumes two's complement arithmetic and allows
	 * ~2B missed updates before it will produce bad results.
	 */
	ucpu = curcpu()->ci_data.cpu_uvm;
	curgen = ucpu->emap_gen;
	gen = l->l_emap_gen;
	if (__predict_true((signed int)(curgen - gen) >= 0)) {
		return;
	}

	/*
	 * See comments in uvm_emap_consume() about memory
	 * barriers and race conditions.
	 */
	curgen = uvm_emap_gen_return();
	pmap_emap_sync(false);
	ucpu->emap_gen = curgen;
}
Beispiel #3
0
void
swi_handler(trapframe_t *frame)
{
	struct thread *td = curthread;

	td->td_frame = frame;
	
	td->td_pticks = 0;
	/*
      	 * Make sure the program counter is correctly aligned so we
	 * don't take an alignment fault trying to read the opcode.
	 */
	if (__predict_false(((frame->tf_pc - INSN_SIZE) & 3) != 0)) {
		call_trapsignal(td, SIGILL, 0);
		userret(td, frame);
		return;
	}
	/*
	 * Enable interrupts if they were enabled before the exception.
	 * Since all syscalls *should* come from user mode it will always
	 * be safe to enable them, but check anyway.
	 */
	if (td->td_md.md_spinlock_count == 0) {
		if (__predict_true(frame->tf_spsr & I32_bit) == 0)
			enable_interrupts(I32_bit);
		if (__predict_true(frame->tf_spsr & F32_bit) == 0)
			enable_interrupts(F32_bit);
	}

	syscall(td, frame);
}
void
pcu_discard_all(lwp_t *l)
{
	const uint32_t pcu_inuse = l->l_pcu_used[PCU_USER];

	KASSERT(l == curlwp || ((l->l_flag & LW_SYSTEM) && pcu_inuse == 0));
	KASSERT(l->l_pcu_used[PCU_KERNEL] == 0);

	if (__predict_true(pcu_inuse == 0)) {
		/* PCUs are not in use. */
		return;
	}
	const int s = splsoftclock();
	for (u_int id = 0; id < PCU_UNIT_COUNT; id++) {
		if ((pcu_inuse & (1 << id)) == 0) {
			continue;
		}
		if (__predict_true(l->l_pcu_cpu[id] == NULL)) {
			continue;
		}
		const pcu_ops_t * const pcu = pcu_ops_md_defs[id];
		/*
		 * We aren't releasing since this LWP isn't giving up PCU,
		 * just saving it.
		 */
		pcu_lwp_op(pcu, l, PCU_RELEASE);
	}
	l->l_pcu_used[PCU_USER] = 0;
	splx(s);
}
__LIBC_HIDDEN__
int pthread_mutex_unlock_impl(pthread_mutex_t *mutex)
{
    int mvalue, mtype, tid, shared;

    if (__predict_false(mutex == NULL))
        return EINVAL;

    mvalue = mutex->value;
    mtype  = (mvalue & MUTEX_TYPE_MASK);
    shared = (mvalue & MUTEX_SHARED_MASK);

    /* Handle common case first */
    if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
        _normal_unlock(mutex, shared);
        return 0;
    }

    /* Do we already own this recursive or error-check mutex ? */
    tid = __get_thread()->tid;
    if ( tid != MUTEX_OWNER_FROM_BITS(mvalue) )
        return EPERM;

    /* If the counter is > 0, we can simply decrement it atomically.
     * Since other threads can mutate the lower state bits (and only the
     * lower state bits), use a cmpxchg to do it.
     */
    if (!MUTEX_COUNTER_BITS_IS_ZERO(mvalue)) {
        for (;;) {
            int newval = mvalue - MUTEX_COUNTER_BITS_ONE;
            if (__predict_true(__bionic_cmpxchg(mvalue, newval, &mutex->value) == 0)) {
                /* success: we still own the mutex, so no memory barrier */
                return 0;
            }
            /* the value changed, so reload and loop */
            mvalue = mutex->value;
        }
    }

    /* the counter is 0, so we're going to unlock the mutex by resetting
     * its value to 'unlocked'. We need to perform a swap in order
     * to read the current state, which will be 2 if there are waiters
     * to awake.
     *
     * TODO: Change this to __bionic_swap_release when we implement it
     *        to get rid of the explicit memory barrier below.
     */
    ANDROID_MEMBAR_FULL();  /* RELEASE BARRIER */
    mvalue = __bionic_swap(mtype | shared | MUTEX_STATE_BITS_UNLOCKED, &mutex->value);

    /* Wake one waiting thread, if any */
    if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(mvalue)) {
        __futex_wake_ex(&mutex->value, shared, 1);
    }
    return 0;
}
Beispiel #6
0
static inline __always_inline int __pthread_rwlock_trywrlock(pthread_rwlock_internal_t* rwlock) {
  int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);

  while (__predict_true(__can_acquire_write_lock(old_state))) {
    if (__predict_true(atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state,
          __state_add_writer_flag(old_state), memory_order_acquire, memory_order_relaxed))) {

      atomic_store_explicit(&rwlock->writer_tid, __get_thread()->tid, memory_order_relaxed);
      return 0;
    }
  }
  return EBUSY;
}
int pthread_mutex_lock(pthread_mutex_t* mutex_interface) {
    pthread_mutex_internal_t* mutex = __get_internal_mutex(mutex_interface);

    uint16_t old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed);
    uint16_t mtype = (old_state & MUTEX_TYPE_MASK);
    uint16_t shared = (old_state & MUTEX_SHARED_MASK);
    // Avoid slowing down fast path of normal mutex lock operation.
    if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
      if (__predict_true(__pthread_normal_mutex_trylock(mutex, shared) == 0)) {
        return 0;
      }
    }
    return __pthread_mutex_lock_with_timeout(mutex, NULL, 0);
}
Beispiel #8
0
pid_t getpid() {
  pthread_internal_t* self = __get_thread();

  if (__predict_true(self)) {
    // Do we have a valid cached pid?
    pid_t cached_pid;
    if (__predict_true(self->get_cached_pid(&cached_pid))) {
      return cached_pid;
    }
  }

  // We're still in the dynamic linker or we're in the middle of forking, so ask the kernel.
  // We don't know whether it's safe to update the cached value, so don't try.
  return __getpid();
}
/* This common inlined function is used to increment the counter of an
 * errorcheck or recursive mutex.
 *
 * For errorcheck mutexes, it will return EDEADLK
 * If the counter overflows, it will return EAGAIN
 * Otherwise, it atomically increments the counter and returns 0
 * after providing an acquire barrier.
 *
 * mtype is the current mutex type
 * mvalue is the current mutex value (already loaded)
 * mutex pointers to the mutex.
 */
static __inline__ __attribute__((always_inline)) int
_recursive_increment(pthread_mutex_t* mutex, int mvalue, int mtype)
{
    if (mtype == MUTEX_TYPE_BITS_ERRORCHECK) {
        /* trying to re-lock a mutex we already acquired */
        return EDEADLK;
    }

    /* Detect recursive lock overflow and return EAGAIN.
     * This is safe because only the owner thread can modify the
     * counter bits in the mutex value.
     */
    if (MUTEX_COUNTER_BITS_WILL_OVERFLOW(mvalue)) {
        return EAGAIN;
    }

    /* We own the mutex, but other threads are able to change
     * the lower bits (e.g. promoting it to "contended"), so we
     * need to use an atomic cmpxchg loop to update the counter.
     */
    for (;;) {
        /* increment counter, overflow was already checked */
        int newval = mvalue + MUTEX_COUNTER_BITS_ONE;
        if (__predict_true(__bionic_cmpxchg(mvalue, newval, &mutex->value) == 0)) {
            /* mutex is still locked, not need for a memory barrier */
            return 0;
        }
        /* the value was changed, this happens when another thread changes
         * the lower state bits from 1 to 2 to indicate contention. This
         * cannot change the counter, so simply reload and try again.
         */
        mvalue = mutex->value;
    }
}
Beispiel #10
0
int _Mutex_recursive_Try_acquire( struct _Mutex_recursive_Control *_mutex )
{
  Mutex_recursive_Control *mutex;
  ISR_lock_Context         lock_context;
  Thread_Control          *executing;
  Thread_Control          *owner;
  int success;

  mutex = _Mutex_recursive_Get( _mutex );
  executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );

  owner = mutex->Mutex.owner;

  if ( __predict_true( owner == NULL ) ) {
    mutex->Mutex.owner = executing;
    ++executing->resource_count;
    success = 1;
  } else if ( owner == executing ) {
    ++mutex->nest_level;
    success = 1;
  } else {
    success = 0;
  }

  _Mutex_Queue_release( &mutex->Mutex, &lock_context );

  return success;
}
int __init_thread(pthread_internal_t* thread) {
  int error = 0;

  if (__predict_true((thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) == 0)) {
    atomic_init(&thread->join_state, THREAD_NOT_JOINED);
  } else {
    atomic_init(&thread->join_state, THREAD_DETACHED);
  }

  // Set the scheduling policy/priority of the thread.
  if (thread->attr.sched_policy != SCHED_NORMAL) {
    sched_param param;
    param.sched_priority = thread->attr.sched_priority;
    if (sched_setscheduler(thread->tid, thread->attr.sched_policy, &param) == -1) {
#if __LP64__
      // For backwards compatibility reasons, we only report failures on 64-bit devices.
      error = errno;
#endif
      __libc_format_log(ANDROID_LOG_WARN, "libc",
                        "pthread_create sched_setscheduler call failed: %s", strerror(errno));
    }
  }

  thread->cleanup_stack = NULL;

  return error;
}
Beispiel #12
0
void
mutex_spin_enter(kmutex_t *mtx)
{

	if (__predict_true(mtx != RUMP_LMUTEX_MAGIC))
		mutex_enter(mtx);
}
Beispiel #13
0
void
kprintf_lock(void)
{

	if (__predict_true(kprintf_inited))
		mutex_enter(&kprintf_mtx);
}
Beispiel #14
0
/*
 * Read the IA32_APERF and IA32_MPERF counters. The first
 * increments at the rate of the fixed maximum frequency
 * configured during the boot, whereas APERF counts at the
 * rate of the actual frequency. Note that the MSRs must be
 * read without delay, and that only the ratio between
 * IA32_APERF and IA32_MPERF is architecturally defined.
 *
 * The function thus returns the percentage of the actual
 * frequency in terms of the maximum frequency of the calling
 * CPU since the last call. A value zero implies an error.
 *
 * For further details, refer to:
 *
 *	Intel Corporation: Intel 64 and IA-32 Architectures
 *	Software Developer's Manual. Section 13.2, Volume 3A:
 *	System Programming Guide, Part 1. July, 2008.
 *
 *	Advanced Micro Devices: BIOS and Kernel Developer's
 *	Guide (BKDG) for AMD Family 10h Processors. Section
 *	2.4.5, Revision 3.48, April 2010.
 */
uint8_t
acpicpu_md_pstate_hwf(struct cpu_info *ci)
{
	struct acpicpu_softc *sc;
	uint64_t aperf, mperf;
	uint8_t rv = 0;

	sc = acpicpu_sc[ci->ci_acpiid];

	if (__predict_false(sc == NULL))
		return 0;

	if (__predict_false((sc->sc_flags & ACPICPU_FLAG_P_HWF) == 0))
		return 0;

	aperf = sc->sc_pstate_aperf;
	mperf = sc->sc_pstate_mperf;

	x86_disable_intr();

	sc->sc_pstate_aperf = rdmsr(MSR_APERF);
	sc->sc_pstate_mperf = rdmsr(MSR_MPERF);

	x86_enable_intr();

	aperf = sc->sc_pstate_aperf - aperf;
	mperf = sc->sc_pstate_mperf - mperf;

	if (__predict_true(mperf != 0))
		rv = (aperf * 100) / mperf;

	return rv;
}
Beispiel #15
0
int
acpicpu_md_pstate_set(struct acpicpu_pstate *ps)
{
	uint64_t val = 0;

	if (__predict_false(ps->ps_control_addr == 0))
		return EINVAL;

	if ((ps->ps_flags & ACPICPU_FLAG_P_FIDVID) != 0)
		return acpicpu_md_pstate_fidvid_set(ps);

	/*
	 * If the mask is set, do a read-modify-write.
	 */
	if (__predict_true(ps->ps_control_mask != 0)) {
		val = rdmsr(ps->ps_control_addr);
		val &= ~ps->ps_control_mask;
	}

	val |= ps->ps_control;

	wrmsr(ps->ps_control_addr, val);
	DELAY(ps->ps_latency);

	return 0;
}
Beispiel #16
0
static void copy_wr_to_sq(struct t4_wq *wq, union t4_wr *wqe, u8 len16)
{
	void *src, *dst;
	uintptr_t end;
	int total, len;

	src = &wqe->flits[0];
	dst = &wq->sq.queue->flits[wq->sq.wq_pidx *
	    (T4_EQ_ENTRY_SIZE / sizeof(__be64))];
	if (t4_sq_onchip(wq)) {
		len16 = align(len16, 4);

		/* In onchip mode the copy below will be made to WC memory and
		 * could trigger DMA. In offchip mode the copy below only
		 * queues the WQE, DMA cannot start until t4_ring_sq_db
		 * happens */
		mmio_wc_start();
	}

	/* NOTE len16 cannot be large enough to write to the
	   same sq.queue memory twice in this loop */
	total = len16 * 16;
	end = (uintptr_t)&wq->sq.queue[wq->sq.size];
	if (__predict_true((uintptr_t)dst + total <= end)) {
		/* Won't wrap around. */
		memcpy(dst, src, total);
	} else {
		len = end - (uintptr_t)dst;
		memcpy(dst, src, len);
		memcpy(wq->sq.queue, src + len, total - len);
	}

	if (t4_sq_onchip(wq))
		mmio_flush_writes();
}
Beispiel #17
0
/*
 * Re-align the payload in the mbuf.  This is mainly used (right now)
 * to handle IP header alignment requirements on certain architectures.
 */
struct mbuf *
ieee80211_realign(struct ieee80211vap *vap, struct mbuf *m, size_t align)
{
	int pktlen, space;
	struct mbuf *n;

	pktlen = m->m_pkthdr.len;
	space = pktlen + align;
	if (space < MINCLSIZE)
		n = m_gethdr(M_NOWAIT, MT_DATA);
	else {
		n = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
		    space <= MCLBYTES ?     MCLBYTES :
#if MJUMPAGESIZE != MCLBYTES
		    space <= MJUMPAGESIZE ? MJUMPAGESIZE :
#endif
		    space <= MJUM9BYTES ?   MJUM9BYTES : MJUM16BYTES);
	}
	if (__predict_true(n != NULL)) {
		m_move_pkthdr(n, m);
		n->m_data = (caddr_t)(ALIGN(n->m_data + align) - align);
		m_copydata(m, 0, pktlen, mtod(n, caddr_t));
		n->m_len = pktlen;
	} else {
		IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY,
		    mtod(m, const struct ieee80211_frame *), NULL,
		    "%s", "no mbuf to realign");
		vap->iv_stats.is_rx_badalign++;
	}
	m_freem(m);
	return n;
}
/*
 * Lock a mutex of type NORMAL.
 *
 * As noted above, there are three states:
 *   0 (unlocked, no contention)
 *   1 (locked, no contention)
 *   2 (locked, contention)
 *
 * Non-recursive mutexes don't use the thread-id or counter fields, and the
 * "type" value is zero, so the only bits that will be set are the ones in
 * the lock state field.
 */
static inline __always_inline int __pthread_normal_mutex_lock(pthread_mutex_internal_t* mutex,
                                                              uint16_t shared,
                                                              bool use_realtime_clock,
                                                              const timespec* abs_timeout_or_null) {
    if (__predict_true(__pthread_normal_mutex_trylock(mutex, shared) == 0)) {
        return 0;
    }
    int result = check_timespec(abs_timeout_or_null, true);
    if (result != 0) {
        return result;
    }

    ScopedTrace trace("Contending for pthread mutex");

    const uint16_t unlocked           = shared | MUTEX_STATE_BITS_UNLOCKED;
    const uint16_t locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;

    // We want to go to sleep until the mutex is available, which requires
    // promoting it to locked_contended. We need to swap in the new state
    // and then wait until somebody wakes us up.
    // An atomic_exchange is used to compete with other threads for the lock.
    // If it returns unlocked, we have acquired the lock, otherwise another
    // thread still holds the lock and we should wait again.
    // If lock is acquired, an acquire fence is needed to make all memory accesses
    // made by other threads visible to the current CPU.
    while (atomic_exchange_explicit(&mutex->state, locked_contended,
                                    memory_order_acquire) != unlocked) {
        if (__futex_wait_ex(&mutex->state, shared, locked_contended, use_realtime_clock,
                            abs_timeout_or_null) == -ETIMEDOUT) {
            return ETIMEDOUT;
        }
    }
    return 0;
}
int pthread_mutex_init(pthread_mutex_t* mutex_interface, const pthread_mutexattr_t* attr) {
    pthread_mutex_internal_t* mutex = __get_internal_mutex(mutex_interface);

    memset(mutex, 0, sizeof(pthread_mutex_internal_t));

    if (__predict_true(attr == NULL)) {
        atomic_init(&mutex->state, MUTEX_TYPE_BITS_NORMAL);
        return 0;
    }

    uint16_t state = 0;
    if ((*attr & MUTEXATTR_SHARED_MASK) != 0) {
        state |= MUTEX_SHARED_MASK;
    }

    switch (*attr & MUTEXATTR_TYPE_MASK) {
    case PTHREAD_MUTEX_NORMAL:
      state |= MUTEX_TYPE_BITS_NORMAL;
      break;
    case PTHREAD_MUTEX_RECURSIVE:
      state |= MUTEX_TYPE_BITS_RECURSIVE;
      break;
    case PTHREAD_MUTEX_ERRORCHECK:
      state |= MUTEX_TYPE_BITS_ERRORCHECK;
      break;
    default:
        return EINVAL;
    }

    atomic_init(&mutex->state, state);
    atomic_init(&mutex->owner_tid, 0);
    return 0;
}
Beispiel #20
0
/*
 * softintr_establish:		[interface]
 *
 *	Register a software interrupt handler.
 */
void *
softintr_establish(int ipl, void (*func)(void *), void *arg)
{
	struct alpha_soft_intr *asi;
	struct alpha_soft_intrhand *sih;
	int s;

	if (__predict_false(ipl >= IPL_NSOFT || ipl < 0))
		panic("softintr_establish");

	asi = &alpha_soft_intrs[ipl];

	sih = malloc(sizeof(*sih), M_DEVBUF, M_NOWAIT);
	if (__predict_true(sih != NULL)) {
		sih->sih_intrhead = asi;
		sih->sih_fn = func;
		sih->sih_arg = arg;
		sih->sih_pending = 0;
		s = splsoft();
		simple_lock(&asi->softintr_slock);
		LIST_INSERT_HEAD(&asi->softintr_q, sih, sih_q);
		simple_unlock(&asi->softintr_slock);
		splx(s);
	}
	return (sih);
}
Beispiel #21
0
static inline __always_inline int __pthread_rwlock_tryrdlock(pthread_rwlock_internal_t* rwlock) {
  int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);

  while (__predict_true(__can_acquire_read_lock(old_state, rwlock->writer_nonrecursive_preferred))) {

    int new_state = old_state + STATE_READER_COUNT_CHANGE_STEP;
    if (__predict_false(!__state_owned_by_readers(new_state))) { // Happens when reader count overflows.
      return EAGAIN;
    }
    if (__predict_true(atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state, new_state,
                                              memory_order_acquire, memory_order_relaxed))) {
      return 0;
    }
  }
  return EBUSY;
}
Beispiel #22
0
static void
linux_worker_intr(void *arg)
{
	struct delayed_work *dw = arg;
	struct workqueue_struct *wq;

	linux_work_lock(&dw->work);

	KASSERT((dw->work.w_state == WORK_DELAYED) ||
	    (dw->work.w_state == WORK_DELAYED_CANCELLED));

	wq = dw->work.w_wq;
	mutex_enter(&wq->wq_lock);

	/* Queue the work, or return it to idle and alert any cancellers.  */
	if (__predict_true(dw->work.w_state == WORK_DELAYED)) {
		dw->work.w_state = WORK_PENDING;
		workqueue_enqueue(dw->work.w_wq->wq_workqueue, &dw->work.w_wk,
		    NULL);
	} else {
		KASSERT(dw->work.w_state == WORK_DELAYED_CANCELLED);
		dw->work.w_state = WORK_IDLE;
		dw->work.w_wq = NULL;
		cv_broadcast(&wq->wq_cv);
	}

	/* Either way, the callout is done.  */
	TAILQ_REMOVE(&dw->work.w_wq->wq_delayed, dw, dw_entry);
	callout_destroy(&dw->dw_callout);

	mutex_exit(&wq->wq_lock);
	linux_work_unlock(&dw->work);
}
Beispiel #23
0
int pthread_mutex_init(pthread_mutex_t* mutex, const pthread_mutexattr_t* attr) {
    if (__predict_true(attr == NULL)) {
        mutex->value = MUTEX_TYPE_BITS_NORMAL;
        return 0;
    }

    int value = 0;
    if ((*attr & MUTEXATTR_SHARED_MASK) != 0) {
        value |= MUTEX_SHARED_MASK;
    }

    switch (*attr & MUTEXATTR_TYPE_MASK) {
    case PTHREAD_MUTEX_NORMAL:
        value |= MUTEX_TYPE_BITS_NORMAL;
        break;
    case PTHREAD_MUTEX_RECURSIVE:
        value |= MUTEX_TYPE_BITS_RECURSIVE;
        break;
    case PTHREAD_MUTEX_ERRORCHECK:
        value |= MUTEX_TYPE_BITS_ERRORCHECK;
        break;
    default:
        return EINVAL;
    }

    mutex->value = value;
    return 0;
}
Beispiel #24
0
static void _Mutex_Release_critical(
  Mutex_Control        *mutex,
  Thread_Control       *executing,
  ISR_Level             level,
  Thread_queue_Context *queue_context
)
{
  Thread_queue_Heads *heads;

  heads = mutex->Queue.Queue.heads;
  mutex->Queue.Queue.owner = NULL;
  _Thread_Resource_count_decrement( executing );

  if ( __predict_true( heads == NULL ) ) {
    _Mutex_Queue_release( mutex, level, queue_context );
  } else {
    _Thread_queue_Context_set_ISR_level( queue_context, level );
    _Thread_queue_Surrender(
      &mutex->Queue.Queue,
      heads,
      executing,
      queue_context,
      MUTEX_TQ_OPERATIONS
    );
  }
}
Beispiel #25
0
static __inline int
prefetch_abort_fixup(trapframe_t *tf, struct ksig *ksig)
{
#ifdef CPU_ABORT_FIXUP_REQUIRED
	int error;

	/* Call the cpu specific prefetch abort fixup routine */
	error = cpu_prefetchabt_fixup(tf);
	if (__predict_true(error != ABORT_FIXUP_FAILED))
		return (error);

	/*
	 * Oops, couldn't fix up the instruction
	 */
	printf(
	    "prefetch_abort_fixup: fixup for %s mode prefetch abort failed.\n",
	    TRAP_USERMODE(tf) ? "user" : "kernel");
	printf("pc = 0x%08x, opcode 0x%08x, insn = ", tf->tf_pc,
	    *((u_int *)tf->tf_pc));
	disassemble(tf->tf_pc);

	/* Die now if this happened in kernel mode */
	if (!TRAP_USERMODE(tf))
		dab_fatal(tf, 0, tf->tf_pc, NULL, ksig);

	return (error);
#else
	return (ABORT_FIXUP_OK);
#endif /* CPU_ABORT_FIXUP_REQUIRED */
}
Beispiel #26
0
void _Mutex_recursive_Acquire( struct _Mutex_recursive_Control *_mutex )
{
  Mutex_recursive_Control *mutex;
  Thread_queue_Context     queue_context;
  ISR_Level             level;
  Thread_Control          *executing;
  Thread_Control          *owner;

  mutex = _Mutex_recursive_Get( _mutex );
  _Thread_queue_Context_initialize( &queue_context );
  _Thread_queue_Context_ISR_disable( &queue_context, level );
  executing = _Mutex_Queue_acquire_critical( &mutex->Mutex, &queue_context );

  owner = mutex->Mutex.Queue.Queue.owner;

  if ( __predict_true( owner == NULL ) ) {
    mutex->Mutex.Queue.Queue.owner = executing;
    _Thread_Resource_count_increment( executing );
    _Mutex_Queue_release( &mutex->Mutex, level, &queue_context );
  } else if ( owner == executing ) {
    ++mutex->nest_level;
    _Mutex_Queue_release( &mutex->Mutex, level, &queue_context );
  } else {
    _Thread_queue_Context_set_no_timeout( &queue_context );
    _Mutex_Acquire_slow( &mutex->Mutex, owner, executing, level, &queue_context );
  }
}
/*
 * Append an mbuf to the ageq and mark it with the specified max age
 * If the frame is not removed before the age (in seconds) expires
 * then it is reclaimed (along with any node reference).
 */
int
ieee80211_ageq_append(struct ieee80211_ageq *aq, struct mbuf *m, int age)
{
	IEEE80211_AGEQ_LOCK(aq);
	if (__predict_true(aq->aq_len < aq->aq_maxlen)) {
		if (aq->aq_tail == NULL) {
			aq->aq_head = m;
		} else {
			aq->aq_tail->m_nextpkt = m;
			age -= M_AGE_GET(aq->aq_head);
		}
		KASSERT(age >= 0, ("age %d", age));
		M_AGE_SET(m, age);
		m->m_nextpkt = NULL;
		aq->aq_tail = m;
		aq->aq_len++;
		IEEE80211_AGEQ_UNLOCK(aq);
		return 0;
	} else {
		/*
		 * No space, drop and cleanup references.
		 */
		aq->aq_drops++;
		IEEE80211_AGEQ_UNLOCK(aq);
		/* XXX tail drop? */
		ageq_mfree(m);
		return ENOSPC;
	}
}
Beispiel #28
0
int _Mutex_recursive_Try_acquire( struct _Mutex_recursive_Control *_mutex )
{
  Mutex_recursive_Control *mutex;
  Thread_queue_Context     queue_context;
  ISR_Level                level;
  Thread_Control          *executing;
  Thread_Control          *owner;
  int                      eno;

  mutex = _Mutex_recursive_Get( _mutex );
  _Thread_queue_Context_initialize( &queue_context );
  _Thread_queue_Context_ISR_disable( &queue_context, level );
  executing = _Mutex_Queue_acquire_critical( &mutex->Mutex, &queue_context );

  owner = mutex->Mutex.Queue.Queue.owner;

  if ( __predict_true( owner == NULL ) ) {
    mutex->Mutex.Queue.Queue.owner = executing;
    _Thread_Resource_count_increment( executing );
    eno = 0;
  } else if ( owner == executing ) {
    ++mutex->nest_level;
    eno = 0;
  } else {
    eno = EBUSY;
  }

  _Mutex_Queue_release( &mutex->Mutex, level, &queue_context );

  return eno;
}
Beispiel #29
0
/*
 * Register a software interrupt handler.
 */
void *
softintr_establish(int ipl, void (*func)(void *), void *arg)
{
	struct soft_intrhand *sih;
	int si;

	switch (ipl) {
	case IPL_SOFT:
		si = SI_SOFT;
		break;
	case IPL_SOFTCLOCK:
		si = SI_SOFTCLOCK;
		break;
	case IPL_SOFTNET:
		si = SI_SOFTNET;
		break;
	case IPL_TTY:			/* XXX until MI code is fixed */
	case IPL_SOFTTTY:
		si = SI_SOFTTTY;
		break;
	default:
		printf("softintr_establish: unknown soft IPL %d\n", ipl);
		return NULL;
	}

	sih = malloc(sizeof(*sih), M_DEVBUF, M_NOWAIT);
	if (__predict_true(sih != NULL)) {
		sih->sih_func = func;
		sih->sih_arg = arg;
		sih->sih_siq = &soft_intrq[si];
		sih->sih_pending = 0;
	}
	return (sih);
}
Beispiel #30
0
void _Mutex_recursive_Release( struct _Mutex_recursive_Control *_mutex )
{
  Mutex_recursive_Control *mutex;
  Thread_queue_Context     queue_context;
  ISR_Level                level;
  Thread_Control          *executing;
  unsigned int             nest_level;

  mutex = _Mutex_recursive_Get( _mutex );
  _Thread_queue_Context_initialize( &queue_context );
  _Thread_queue_Context_ISR_disable( &queue_context, level );
  executing = _Mutex_Queue_acquire_critical( &mutex->Mutex, &queue_context );

  _Assert( mutex->Mutex.Queue.Queue.owner == executing );

  nest_level = mutex->nest_level;

  if ( __predict_true( nest_level == 0 ) ) {
    _Mutex_Release_critical( &mutex->Mutex, executing, level, &queue_context );
  } else {
    mutex->nest_level = nest_level - 1;

    _Mutex_Queue_release( &mutex->Mutex, level, &queue_context );
  }
}