Esempio n. 1
0
/*
 * This function is used to acquire a contested lock.
 */
int
__sysv_umtx_lock(volatile umtx_t *mtx, int timo)
{
	int v, errval, ret = 0;

	/* contested */
	do {
		v = *mtx;
		if (v == 2 || atomic_cmpset_acq_int(mtx, 1, 2)) {
			if (timo == 0)
				umtx_sleep(mtx, 2, timo);
			else if ( (errval = umtx_sleep(mtx, 2, timo)) > 0) {
				if (errval == EAGAIN) {
					if (atomic_cmpset_acq_int(mtx, 0, 2))
						ret = 0;
					else
						ret = ETIMEDOUT;
					break;
				}
			}
		}
	} while (!atomic_cmpset_acq_int(mtx, 0, 2));

	return (ret);
}
Esempio n. 2
0
int
_pthread_once(pthread_once_t *once_control, void (*init_routine) (void))
{
	struct pthread *curthread;
	int state;

	_thr_check_init();

	for (;;) {
		state = once_control->state;
		if (state == ONCE_DONE)
			return (0);
		if (state == ONCE_NEVER_DONE) {
			if (atomic_cmpset_acq_int(&once_control->state, state, ONCE_IN_PROGRESS))
				break;
		} else if (state == ONCE_IN_PROGRESS) {
			if (atomic_cmpset_acq_int(&once_control->state, state, ONCE_WAIT))
				_thr_umtx_wait_uint(&once_control->state, ONCE_WAIT, NULL, 0);
		} else if (state == ONCE_WAIT) {
			_thr_umtx_wait_uint(&once_control->state, state, NULL, 0);
		} else
			return (EINVAL);
        }

	curthread = _get_curthread();
	THR_CLEANUP_PUSH(curthread, once_cancel_handler, once_control);
	init_routine();
	THR_CLEANUP_POP(curthread, 0);
	if (atomic_cmpset_rel_int(&once_control->state, ONCE_IN_PROGRESS, ONCE_DONE))
		return (0);
	atomic_store_rel_int(&once_control->state, ONCE_DONE);
	_thr_umtx_wake(&once_control->state, INT_MAX, 0);
	return (0);
}
Esempio n. 3
0
int
_pthread_cancel(pthread_t pthread)
{
	struct pthread *curthread = tls_get_curthread();
	int oldval, newval = 0;
	int oldtype;
	int ret;

	/*
	 * POSIX says _pthread_cancel should be async cancellation safe,
	 * so we temporarily disable async cancellation.
	 */
	_pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, &oldtype);
	if ((ret = _thr_ref_add(curthread, pthread, 0)) != 0) {
		_pthread_setcanceltype(oldtype, NULL);
		return (ret);
	}

	do {
		oldval = pthread->cancelflags;
		if (oldval & THR_CANCEL_NEEDED)
			break;
		newval = oldval | THR_CANCEL_NEEDED;
	} while (!atomic_cmpset_acq_int(&pthread->cancelflags, oldval, newval));

	if (!(oldval & THR_CANCEL_NEEDED) && SHOULD_ASYNC_CANCEL(newval))
		_thr_send_sig(pthread, SIGCANCEL);

	_thr_ref_delete(curthread, pthread);
	_pthread_setcanceltype(oldtype, NULL);
	return (0);
}
Esempio n. 4
0
static void
def_wlock_acquire(void *lock)
{
    Lock *l = (Lock *)lock;
    sigset_t tmp_oldsigmask;

    for ( ; ; ) {
	sigprocmask(SIG_BLOCK, &fullsigmask, &tmp_oldsigmask);
	if (atomic_cmpset_acq_int(&l->lock, 0, WAFLAG))
	    break;
	sigprocmask(SIG_SETMASK, &tmp_oldsigmask, NULL);
    }
    oldsigmask = tmp_oldsigmask;
}
Esempio n. 5
0
/*
 * This function is used to acquire a contested lock.
 *
 * A *mtx value of 1 indicates locked normally.
 * A *mtx value of 2 indicates locked and contested.
 */
int
__thr_umtx_lock(volatile umtx_t *mtx, int id, int timo)
{
	int v;
	int errval;
	int ret = 0;
	int retry = 4;

	v = *mtx;
	cpu_ccfence();
	id &= 0x3FFFFFFF;

	for (;;) {
		cpu_pause();
		if (v == 0) {
			if (atomic_fcmpset_int(mtx, &v, id))
				break;
			continue;
		}
		if (--retry) {
			sched_yield();
			v = *mtx;
			continue;
		}

		/*
		 * Set the waiting bit.  If the fcmpset fails v is loaded
		 * with the current content of the mutex, and if the waiting
		 * bit is already set, we can also sleep.
		 */
		if (atomic_fcmpset_int(mtx, &v, v|0x40000000) ||
		    (v & 0x40000000)) {
			if (timo == 0) {
				_umtx_sleep_err(mtx, v|0x40000000, timo);
			} else if ((errval = _umtx_sleep_err(mtx, v|0x40000000, timo)) > 0) {
				if (errval == EAGAIN) {
					if (atomic_cmpset_acq_int(mtx, 0, id))
						ret = 0;
					else
						ret = ETIMEDOUT;
					break;
				}
			}
		}
		retry = 4;
	}
	return (ret);
}
Esempio n. 6
0
void
__sysv_umtx_unlock(volatile umtx_t *mtx)
{
	int v;

	for (;;) {
		v = *mtx;
		if (atomic_cmpset_acq_int(mtx, v, v-1)) {
			if (v != 1) {
				*mtx = 0;
				umtx_wakeup(mtx, 1);
			}
			break;
		}
	}
}
Esempio n. 7
0
/*
 * MP-friendly version of ppsratecheck().
 *
 * Returns non-negative if we are in the rate, negative otherwise.
 *  0 - rate limit not reached.
 * -1 - rate limit reached.
 * >0 - rate limit was reached before, and was just reset. The return value
 *      is number of events since last reset.
 */
int64_t
counter_ratecheck(struct counter_rate *cr, int64_t limit)
{
	int64_t val;
	int now;

	val = cr->cr_over;
	now = ticks;

	if (now - cr->cr_ticks >= hz) {
		/*
		 * Time to clear the structure, we are in the next second.
		 * First try unlocked read, and then proceed with atomic.
		 */
		if ((cr->cr_lock == 0) &&
		    atomic_cmpset_acq_int(&cr->cr_lock, 0, 1)) {
			/*
			 * Check if other thread has just went through the
			 * reset sequence before us.
			 */
			if (now - cr->cr_ticks >= hz) {
				val = counter_u64_fetch(cr->cr_rate);
				counter_u64_zero(cr->cr_rate);
				cr->cr_over = 0;
				cr->cr_ticks = now;
				if (val <= limit)
					val = 0;
			}
			atomic_store_rel_int(&cr->cr_lock, 0);
		} else
			/*
			 * We failed to lock, in this case other thread may
			 * be running counter_u64_zero(), so it is not safe
			 * to do an update, we skip it.
			 */
			return (val);
	}

	counter_u64_add(cr->cr_rate, 1);
	if (cr->cr_over != 0)
		return (-1);
	if (counter_u64_fetch(cr->cr_rate) > limit)
		val = cr->cr_over = -1;

	return (val);
}
Esempio n. 8
0
/* _mcount; may be static, inline, etc */
_MCOUNT_DECL(uintfptr_t frompc, uintfptr_t selfpc)
{
#ifdef GUPROF
	u_int delta;
#endif
	fptrdiff_t frompci;
	u_short *frompcindex;
	struct tostruct *top, *prevtop;
	struct gmonparam *p;
	long toindex;
#ifdef _KERNEL
	MCOUNT_DECL(s)
#endif

	p = &_gmonparam;
#ifndef GUPROF			/* XXX */
	/*
	 * check that we are profiling
	 * and that we aren't recursively invoked.
	 */
	if (p->state != GMON_PROF_ON)
		return;
#endif
#ifdef _KERNEL
	MCOUNT_ENTER(s);
#else
	if (!atomic_cmpset_acq_int(&p->state, GMON_PROF_ON, GMON_PROF_BUSY))
		return;
#endif
	frompci = frompc - p->lowpc;

#ifdef _KERNEL
	/*
	 * When we are called from an exception handler, frompci may be
	 * for a user address.  Convert such frompci's to the index of
	 * user() to merge all user counts.
	 */
	if (frompci >= p->textsize) {
		if (frompci + p->lowpc
		    >= (uintfptr_t)(VM_MAXUSER_ADDRESS + UPAGES * PAGE_SIZE))
			goto done;
		frompci = (uintfptr_t)user - p->lowpc;
		if (frompci >= p->textsize)
		    goto done;
	}
#endif

#ifdef GUPROF
	if (p->state != GMON_PROF_HIRES)
		goto skip_guprof_stuff;
	/*
	 * Look at the clock and add the count of clock cycles since the
	 * clock was last looked at to a counter for frompc.  This
	 * solidifies the count for the function containing frompc and
	 * effectively starts another clock for the current function.
	 * The count for the new clock will be solidified when another
	 * function call is made or the function returns.
	 *
	 * We use the usual sampling counters since they can be located
	 * efficiently.  4-byte counters are usually necessary.
	 *
	 * There are many complications for subtracting the profiling
	 * overheads from the counts for normal functions and adding
	 * them to the counts for mcount(), mexitcount() and cputime().
	 * We attempt to handle fractional cycles, but the overheads
	 * are usually underestimated because they are calibrated for
	 * a simpler than usual setup.
	 */
	delta = cputime() - p->mcount_overhead;
	p->cputime_overhead_resid += p->cputime_overhead_frac;
	p->mcount_overhead_resid += p->mcount_overhead_frac;
	if ((int)delta < 0)
		*p->mcount_count += delta + p->mcount_overhead
				    - p->cputime_overhead;
	else if (delta != 0) {
		if (p->cputime_overhead_resid >= CALIB_SCALE) {
			p->cputime_overhead_resid -= CALIB_SCALE;
			++*p->cputime_count;
			--delta;
		}
		if (delta != 0) {
			if (p->mcount_overhead_resid >= CALIB_SCALE) {
				p->mcount_overhead_resid -= CALIB_SCALE;
				++*p->mcount_count;
				--delta;
			}
			KCOUNT(p, frompci) += delta;
		}
		*p->mcount_count += p->mcount_overhead_sub;
	}
	*p->cputime_count += p->cputime_overhead;
skip_guprof_stuff:
#endif /* GUPROF */

#ifdef _KERNEL
	/*
	 * When we are called from an exception handler, frompc is faked
	 * to be for where the exception occurred.  We've just solidified
	 * the count for there.  Now convert frompci to the index of btrap()
	 * for trap handlers and bintr() for interrupt handlers to make
	 * exceptions appear in the call graph as calls from btrap() and
	 * bintr() instead of calls from all over.
	 */
	if ((uintfptr_t)selfpc >= (uintfptr_t)btrap
	    && (uintfptr_t)selfpc < (uintfptr_t)eintr) {
		if ((uintfptr_t)selfpc >= (uintfptr_t)bintr)
			frompci = (uintfptr_t)bintr - p->lowpc;
		else
			frompci = (uintfptr_t)btrap - p->lowpc;
	}
#endif

	/*
	 * check that frompc is a reasonable pc value.
	 * for example:	signal catchers get called from the stack,
	 *		not from text space.  too bad.
	 */
	if (frompci >= p->textsize)
		goto done;

	frompcindex = &p->froms[frompci / (p->hashfraction * sizeof(*p->froms))];
	toindex = *frompcindex;
	if (toindex == 0) {
		/*
		 *	first time traversing this arc
		 */
		toindex = ++p->tos[0].link;
		if (toindex >= p->tolimit)
			/* halt further profiling */
			goto overflow;

		*frompcindex = toindex;
		top = &p->tos[toindex];
		top->selfpc = selfpc;
		top->count = 1;
		top->link = 0;
		goto done;
	}
	top = &p->tos[toindex];
	if (top->selfpc == selfpc) {
		/*
		 * arc at front of chain; usual case.
		 */
		top->count++;
		goto done;
	}
	/*
	 * have to go looking down chain for it.
	 * top points to what we are looking at,
	 * prevtop points to previous top.
	 * we know it is not at the head of the chain.
	 */
	for (; /* goto done */; ) {
		if (top->link == 0) {
			/*
			 * top is end of the chain and none of the chain
			 * had top->selfpc == selfpc.
			 * so we allocate a new tostruct
			 * and link it to the head of the chain.
			 */
			toindex = ++p->tos[0].link;
			if (toindex >= p->tolimit)
				goto overflow;

			top = &p->tos[toindex];
			top->selfpc = selfpc;
			top->count = 1;
			top->link = *frompcindex;
			*frompcindex = toindex;
			goto done;
		}
		/*
		 * otherwise, check the next arc on the chain.
		 */
		prevtop = top;
		top = &p->tos[top->link];
		if (top->selfpc == selfpc) {
			/*
			 * there it is.
			 * increment its count
			 * move it to the head of the chain.
			 */
			top->count++;
			toindex = prevtop->link;
			prevtop->link = top->link;
			top->link = *frompcindex;
			*frompcindex = toindex;
			goto done;
		}

	}
done:
#ifdef _KERNEL
	MCOUNT_EXIT(s);
#else
	atomic_store_rel_int(&p->state, GMON_PROF_ON);
#endif
	return;
overflow:
	atomic_store_rel_int(&p->state, GMON_PROF_ERROR);
#ifdef _KERNEL
	MCOUNT_EXIT(s);
#endif
	return;
}