Ejemplo n.º 1
0
Archivo: main.c Proyecto: olivo/BP
inline void acquire_lock(){
	int delay;
	int cond;

	delay = 1;
	TAS(lock,cond);
	while(cond == locked){
		bassume(cond == locked);
		pause(delay);
		if(delay*2 > delay) 
			delay *= 2;
		TAS(lock,cond);
	}
	bassume(!(cond == locked));
	assert(cond != lock);
}
Ejemplo n.º 2
0
void
ilock(Lock *l)
{
	Proc *up = externup();
	Mpl pl;
	uintptr_t pc;
	uint64_t t0;

	pc = getcallerpc();
	lockstats.locks++;

	pl = splhi();
	if(TAS(&l->key) != 0){
		cycles(&t0);
		lockstats.glare++;
		/*
		 * Cannot also check l->pc, l->m, or l->isilock here
		 * because they might just not be set yet, or
		 * (for pc and m) the lock might have just been unlocked.
		 */
		for(;;){
			lockstats.inglare++;
			splx(pl);
			while(l->key)
				;
			pl = splhi();
			if(TAS(&l->key) == 0){
				if(l != &waitstatslk)
					addwaitstat(pc, t0, WSlock);
				goto acquire;
			}
		}
	}
acquire:
	machp()->ilockdepth++;
	if(up)
		up->lastilock = l;
	l->pl = pl;
	l->_pc = pc;
	l->p = up;
	l->isilock = 1;
	l->m = machp();
	if(LOCKCYCLES)
		cycles(&l->lockcycles);
}
Ejemplo n.º 3
0
int
canlock(Lock *l)
{
	Proc *up = externup();
	if(up)
		ainc(&up->nlocks);
	if(TAS(&l->key)){
		if(up)
			adec(&up->nlocks);
		return 0;
	}

	if(up)
		up->lastlock = l;
	l->_pc = getcallerpc();
	l->p = up;
	l->isilock = 0;
	if(LOCKCYCLES)
		cycles(&l->lockcycles);

	return 1;
}
Ejemplo n.º 4
0
int
lock(Lock *l)
{
	Proc *up = externup();
	int i;
	uintptr_t pc;
	uint64_t t0;

	pc = getcallerpc();

	lockstats.locks++;
	if(up)
		ainc(&up->nlocks);	/* prevent being scheded */
	if(TAS(&l->key) == 0){
		if(up)
			up->lastlock = l;
		l->_pc = pc;
		l->p = up;
		l->isilock = 0;
		if(LOCKCYCLES)
			cycles(&l->lockcycles);

		return 0;
	}
	if(up)
		adec(&up->nlocks);

	cycles(&t0);
	lockstats.glare++;
	for(;;){
		lockstats.inglare++;
		i = 0;
		while(l->key){
			if(sys->nmach < 2 && up && up->edf && (up->edf->flags & Admitted)){
				/*
				 * Priority inversion, yield on a uniprocessor; on a
				 * multiprocessor, the other processor will unlock
				 */
				print("inversion %#p pc %#p proc %d held by pc %#p proc %d\n",
					l, pc, up ? up->pid : 0, l->_pc, l->p ? l->p->pid : 0);
				up->edf->d = todget(nil);	/* yield to process with lock */
			}
			if(i++ > 100000000){
				i = 0;
				lockloop(l, pc);
			}
		}
		if(up)
			ainc(&up->nlocks);
		if(TAS(&l->key) == 0){
			if(up)
				up->lastlock = l;
			l->_pc = pc;
			l->p = up;
			l->isilock = 0;
			if(LOCKCYCLES)
				cycles(&l->lockcycles);
			if(l != &waitstatslk)
				addwaitstat(pc, t0, WSlock);
			return 1;
		}
		if(up)
			adec(&up->nlocks);
	}
}
void lock() {
    while(TAS(1)) { }
}
Ejemplo n.º 6
0
/*
 * s_lock(lock) - platform-independent portion of waiting for a spinlock.
 */
void
s_lock(volatile slock_t *lock, const char *file, int line TSRMLS_DC)
{
	/*
	 * We loop tightly for awhile, then delay using pg_usleep() and try again.
	 * Preferably, "awhile" should be a small multiple of the maximum time we
	 * expect a spinlock to be held.  100 iterations seems about right as an
	 * initial guess.  However, on a uniprocessor the loop is a waste of
	 * cycles, while in a multi-CPU scenario it's usually better to spin a bit
	 * longer than to call the kernel, so we try to adapt the spin loop count
	 * depending on whether we seem to be in a uniprocessor or multiprocessor.
	 *
	 * Note: you might think MIN_SPINS_PER_DELAY should be just 1, but you'd
	 * be wrong; there are platforms where that can result in a "stuck
	 * spinlock" failure.  This has been seen particularly on Alphas; it seems
	 * that the first TAS after returning from kernel space will always fail
	 * on that hardware.
	 *
	 * Once we do decide to block, we use randomly increasing pg_usleep()
	 * delays. The first delay is 1 msec, then the delay randomly increases to
	 * about one second, after which we reset to 1 msec and start again.  The
	 * idea here is that in the presence of heavy contention we need to
	 * increase the delay, else the spinlock holder may never get to run and
	 * release the lock.  (Consider situation where spinlock holder has been
	 * nice'd down in priority by the scheduler --- it will not get scheduled
	 * until all would-be acquirers are sleeping, so if we always use a 1-msec
	 * sleep, there is a real possibility of starvation.)  But we can't just
	 * clamp the delay to an upper bound, else it would take a long time to
	 * make a reasonable number of tries.
	 *
	 * We time out and declare error after NUM_DELAYS delays (thus, exactly
	 * that many tries).  With the given settings, this will usually take 2 or
	 * so minutes.	It seems better to fix the total number of tries (and thus
	 * the probability of unintended failure) than to fix the total time
	 * spent.
	 *
	 * The pg_usleep() delays are measured in milliseconds because 1 msec is a
	 * common resolution limit at the OS level for newer platforms. On older
	 * platforms the resolution limit is usually 10 msec, in which case the
	 * total delay before timeout will be a bit more.
	 */
#define MIN_SPINS_PER_DELAY 10
#define MAX_SPINS_PER_DELAY 1000
#define NUM_DELAYS			1000
#define MIN_DELAY_MSEC		1
#define MAX_DELAY_MSEC		1000

	int			spins = 0;
	int			delays = 0;
	int			cur_delay = 0;
  
	while (TAS(lock))
	{
		/* CPU-specific delay each time through the loop */
		SPIN_DELAY();

		/* Block the process every spins_per_delay tries */
		if (++spins >= spins_per_delay)
		{
			if (++delays > NUM_DELAYS)
				s_lock_stuck(lock, file, line TSRMLS_CC);

			if (cur_delay == 0) /* first time to delay? */
				cur_delay = MIN_DELAY_MSEC;

			pg_usleep(cur_delay * 1000L);

#if defined(S_LOCK_TEST)
			fprintf(stdout, "*");
			fflush(stdout);
#endif

			/* increase delay by a random fraction between 1X and 2X */
			cur_delay += (int) (cur_delay *
					  ((double) rand() / (double) MAX_RANDOM_VALUE) + 0.5);
			/* wrap back to minimum delay when max is exceeded */
			if (cur_delay > MAX_DELAY_MSEC)
				cur_delay = MIN_DELAY_MSEC;

			spins = 0;
		}
	}

	/*
	 * If we were able to acquire the lock without delaying, it's a good
	 * indication we are in a multiprocessor.  If we had to delay, it's a sign
	 * (but not a sure thing) that we are in a uniprocessor. Hence, we
	 * decrement spins_per_delay slowly when we had to delay, and increase it
	 * rapidly when we didn't.  It's expected that spins_per_delay will
	 * converge to the minimum value on a uniprocessor and to the maximum
	 * value on a multiprocessor.
	 *
	 * Note: spins_per_delay is local within our current process. We want to
	 * average these observations across multiple backends, since it's
	 * relatively rare for this function to even get entered, and so a single
	 * backend might not live long enough to converge on a good value.	That
	 * is handled by the two routines below.
	 */
	if (cur_delay == 0)
	{
		/* we never had to delay */
		if (spins_per_delay < MAX_SPINS_PER_DELAY)
			spins_per_delay = Min(spins_per_delay + 100, MAX_SPINS_PER_DELAY);
	}
	else
	{
		if (spins_per_delay > MIN_SPINS_PER_DELAY)
			spins_per_delay = Max(spins_per_delay - 1, MIN_SPINS_PER_DELAY);
	}
}
Ejemplo n.º 7
0
bool
pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
{
	return TAS((slock_t *) &ptr->sema);
}
Ejemplo n.º 8
0
/*
 ****************************************************************
 *	Inicializa a rede ou obtém um EP			*
 ****************************************************************
 */
void
itnopen (dev_t dev, int oflag)
{
	ITSCB		*ip = &itscb;
	KFILE		*fp = u.u_fileptr;
	IT_MINOR	minor = MINOR (dev);

	/*
	 *	Verifica se a a rede está inicializada
	 */
	if (TAS (&ip->it_init_lock) >= 0)
	{
		if (minor == DAEMON)
		{
			if (superuser () < 0)
				return;

			init_it_block ();

			ether_init ();

			raw_ep_free_init ();

			udp_ep_free_init ();

			tcp_ep_free_init ();

			ip->it_N_BLOCK = scb.y_n_itblock;

			if (ip->it_WND_SZ == 0)
				ip->it_WND_SZ = WND_SZ;

			if (ip->it_GOOD_WND == 0)
				ip->it_GOOD_WND = GOOD_WND;

			if (ip->it_ALPHA == 0)
				ip->it_ALPHA = DEF_ALPHA;

			if (ip->it_BETA == 0)
				ip->it_BETA = DEF_BETA;

			if (ip->it_SRTT == 0)
				ip->it_SRTT = INIT_SRTT;

			if (ip->it_N_TRANS == 0)
				ip->it_N_TRANS = DEF_N_TRANS;

			if (ip->it_WAIT == 0)
				ip->it_WAIT = DEF_WAIT;

			if (ip->it_SILENCE == 0)
				ip->it_SILENCE = DEF_SILENCE;

			if (ip->it_MAX_SGSZ == 0)
				ip->it_MAX_SGSZ = MAX_SGSZ;

			SEMAINIT (&ip->it_block_sema, ip->it_N_BLOCK, 0 /* sem histórico */);

		   /***	ip->it_gateway = 1; ***/
		   	ip->it_pipe_mode = 1;
		}
		else
		{
			CLEAR (&ip->it_init_lock);

			u.u_error = TBADNET;
		}

		return;
	}

	/*
	 *	Pequena consistência
	 */
	if ((oflag & O_RW) != O_RW)
		{ u.u_error = EINVAL; return; }

	/*
	 *	Obtém e inicializa um "endpoint"
	 */
	switch (minor)
	{
		/*
		 *	O "in_daemon": "open"s seguintes não fazem efeito
		 */
	    case DAEMON:
		return;

		/*
		 *	Protocolo RAW
		 */
	    case RAW:
	    {
		RAW_EP		*rp;

		if (fp->f_union != KF_NULL)
			{ u.u_error = EBADF; return; }

		if ((rp = get_raw_ep ()) == NO_RAW_EP)
			{ u.u_error = EAGAIN; return; }

	   /***	SPINLOCK (&rp->rp_inq_lock); ***/

		rp->rp_state	= S_UNBOUND;

		fp->f_union	= KF_ITNET;
		fp->f_endpoint	= rp;

	   /***	SPINFREE (&rp->rp_inq_lock); ***/

		return;
	    }

		/*
		 *	Protocolo UDP
		 */
	    case UDP:
	    {
		UDP_EP		*up;

		if (fp->f_union != KF_NULL)
			{ u.u_error = EBADF; return; }

		if ((up = get_udp_ep ()) == NO_UDP_EP)
			{ u.u_error = EAGAIN; return; }

	   /***	SPINLOCK (&up->up_inq_lock); ***/

		up->up_state	= S_UNBOUND;

		fp->f_union	= KF_ITNET;
		fp->f_endpoint	= up;

	   /***	SPINFREE (&up->up_inq_lock); ***/

		return;
	    }

		/*
		 *	Protocolo TCP
		 */
	    case TCP:
	    {
		TCP_EP		*tp;

		if (fp->f_union != KF_NULL)
			{ u.u_error = EBADF; return; }

		if ((tp = get_tcp_ep ()) == NO_TCP_EP)
			{ u.u_error = EAGAIN; return; }

	   /***	SLEEPLOCK (&tp->tp_lock, PITNETOUT); ***/

		tp->tp_state		 = S_UNBOUND;
		tp->tp_SRTT		 = ip->it_SRTT;
		tp->tp_max_seg_sz 	 = ip->it_MAX_SGSZ;
		tp->tp_good_wnd 	 = ip->it_GOOD_WND;
		tp->tp_max_wait		 = ip->it_WAIT;
		tp->tp_max_silence	 = ip->it_SILENCE;
		tp->tp_rnd_in.tp_rnd_sz  = ip->it_WND_SZ;
		tp->tp_rnd_out.tp_rnd_sz = ip->it_WND_SZ;
		tp->tp_last_rcv_time	 = time;

		fp->f_union	= KF_ITNET;
		fp->f_endpoint	= tp;

	   /***	SLEEPFREE (&tp->tp_lock); ***/

		return;
	    }

	}	/* end switch */

	u.u_error = ENXIO;
	return;

}	/* end itnopen */