Esempio n. 1
0
File: ceq.c Progetto: anandab/akaros
/* Consumer side, returns TRUE on success and fills *msg with the ev_msg.  If
 * the ceq appears empty, it will return FALSE.  Messages may have arrived after
 * we started getting that we do not receive. */
bool get_ceq_msg(struct ceq *ceq, struct event_msg *msg)
{
	int32_t idx = get_ring_idx(ceq);
	if (idx == -1) {
		if (!ceq->ring_overflowed)
			return FALSE;
		/* We didn't get anything via the ring, but if we're overflowed, then we
		 * need to look in the array directly.  Note that we only handle
		 * overflow when we failed to get something.  Eventually, we'll deal
		 * with overflow (which should be very rare).  Also note that while we
		 * are dealing with overflow, the kernel could be producing and using
		 * the ring, and we could have consumers consuming from the ring.
		 *
		 * Overall, we need to clear the overflow flag, make sure the list is
		 * empty, and turn the flag back on if it isn't.  That'll make sure
		 * overflow is set if there's a chance there is a message in the array
		 * that doesn't have an idx in the ring.
		 *
		 * However, if we do that, there's a time when overflow isn't set and
		 * the ring is empty.  A concurrent consumer could think that the ring
		 * is empty, when in fact it isn't.  That's bad, since we could miss a
		 * message (i.e. sleep when we have a message we needed).  So we'll need
		 * to deal with concurrent consumers, and whatever we do will also need
		 * to deal with concurrent conusmers who handle overflow too.  Easiest
		 * thing is to just lock.  If the lock is set, then that also means the
		 * mailbox isn't empty. */
		spin_pdr_lock((struct spin_pdr_lock*)&ceq->u_lock);
		/* Check again - someone may have handled it while we were waiting on
		 * the lock */
		if (!ceq->ring_overflowed) {
			spin_pdr_unlock((struct spin_pdr_lock*)&ceq->u_lock);
			return FALSE;
		}
		ceq->ring_overflowed = FALSE;
		wrmb(); /* clear overflowed before reading event entries */
		for (int i = 0; i < ceq->nr_events; i++) {
			if (extract_ceq_msg(ceq, i, msg)) {
				/* We found something.  There might be more, but a future
				 * consumer will have to deal with it, or verify there isn't. */
				ceq->ring_overflowed = TRUE;
				spin_pdr_unlock((struct spin_pdr_lock*)&ceq->u_lock);
				return TRUE;
			}
		}
		/* made it to the end, looks like there was no overflow left.  there
		 * could be new ones added behind us (they'd be in the ring or overflow
		 * would be turned on again), but those message were added after we
		 * started consuming, and therefore not our obligation to extract. */
		spin_pdr_unlock((struct spin_pdr_lock*)&ceq->u_lock);
		return FALSE;
	}
	if (!extract_ceq_msg(ceq, idx, msg))
		return FALSE;
	return TRUE;
}
Esempio n. 2
0
File: ktimer.c Progetto: brho/kweb
static void *__ktimer(void *arg)
{
  struct ktimer *t = (struct ktimer*)arg;

  t->state = S_TIMER_STARTED;
  for(;;) {
    usleep(1000*t->period_ms);

    spin_pdr_lock(&t->lock);
    if(t->state == S_TIMER_STOPPING)
      break;
    spin_pdr_unlock(&t->lock);
    t->callback(t->callback_arg);
  }
  t->state = S_TIMER_STOPPED;
  spin_pdr_unlock(&t->lock);
}
Esempio n. 3
0
static void block(struct uthread *uthread, void *arg)
{
	upthread_t upthread = (upthread_t)uthread;
	upthread_mutex_t *mutex = (upthread_mutex_t *) arg;

	assert(mutex);
	uthread_has_blocked(uthread, UTH_EXT_BLK_MUTEX);
	STAILQ_INSERT_TAIL(&mutex->queue, upthread, next);
	spin_pdr_unlock(&mutex->lock);
}
Esempio n. 4
0
File: ktimer.c Progetto: brho/kweb
int ktimer_stop(struct ktimer *t)
{
  if(t->state != S_TIMER_STARTED)
    return -1;

  spin_pdr_lock(&t->lock);
  t->state = S_TIMER_STOPPING;
  spin_pdr_unlock(&t->lock);
  while(t->state != S_TIMER_STOPPED) {
    pthread_yield();
    cpu_relax();
  }
}
Esempio n. 5
0
int upthread_mutex_unlock(upthread_mutex_t* mutex)
{
	if(mutex == NULL)
		return EINVAL;

	spin_pdr_lock(&mutex->lock);
	mutex->locked--;
	if(mutex->locked == 0) {
		upthread_t upthread = STAILQ_FIRST(&mutex->queue);
		if(upthread)
			STAILQ_REMOVE_HEAD(&mutex->queue, next);
		mutex->owner = NULL;
		spin_pdr_unlock(&mutex->lock);

		if(upthread != NULL) {
			uthread_runnable((struct uthread*)upthread);
		}
	}
	else {
		spin_pdr_unlock(&mutex->lock);
	}
	return 0;
}
Esempio n. 6
0
int upthread_mutex_trylock(upthread_mutex_t* mutex)
{
	if(mutex == NULL)
		return EINVAL;

	int retval = 0;
	spin_pdr_lock(&mutex->lock);
	if(mutex->attr.type == UPTHREAD_MUTEX_RECURSIVE &&
		mutex->owner == upthread_self()) {
		mutex->locked++;
	}
	else if(mutex->locked) {
		retval = EBUSY;
	}
	else {
		mutex->owner = upthread_self();
		mutex->locked++;
	}
	spin_pdr_unlock(&mutex->lock);
	return retval;
}
Esempio n. 7
0
int upthread_mutex_lock(upthread_mutex_t* mutex)
{
	if(mutex == NULL)
		return EINVAL;

	spin_pdr_lock(&mutex->lock);
	if(mutex->attr.type == UPTHREAD_MUTEX_RECURSIVE &&
		mutex->owner == upthread_self()) {
		mutex->locked++;
	}
	else {
		while(mutex->locked) {
			uthread_yield(true, block, mutex);

			spin_pdr_lock(&mutex->lock);
		}
		mutex->owner = upthread_self();
		mutex->locked++;
	}
	spin_pdr_unlock(&mutex->lock);
	return 0;
}
Esempio n. 8
0
void
clean(uint16_t seq, int64_t now, void *v)
{
	int ttl;
	Req **l, *r;

	ttl = 0;
	if (v) {
		if (proto->version == 4)
			ttl = ((struct ip4hdr *)v)->ttl;
		else
			ttl = ((struct ip6hdr *)v)->ttl;
	}
	spin_pdr_lock(&listlock);
	last = NULL;
	for(l = &first; *l; ){
		r = *l;
		if(v && r->seq == seq){
			r->rtt = ndiff(r->tsctime, now);
			r->ttl = ttl;
			reply(r, v);
		}
		if (now - r->tsctime > MINUTETSC) {
			*l = r->next;
			r->rtt = ndiff(r->tsctime, now);
			if(v)
				r->ttl = ttl;
			if(r->replied == 0)
				lost(r, v);
			free(r);
		}else{
			last = r;
			l = &r->next;
		}
	}
	spin_pdr_unlock(&listlock);
}
Esempio n. 9
0
void
rcvr(int fd, int msglen, int interval, int nmsg)
{
	int i, n, munged;
	uint16_t x;
	int64_t now;
	uint8_t *buf = malloc(BUFSIZE);
	struct icmphdr *icmp;
	Req *r;
	struct alarm_waiter waiter;

	init_awaiter(&waiter, alarm_abort_sysc);
	waiter.data = current_uthread;

	sum = 0;
	while(lostmsgs+rcvdmsgs < nmsg){
		/* arm to wake ourselves if the read doesn't connect in time */
		set_awaiter_rel(&waiter, 1000 *
		                ((nmsg - lostmsgs - rcvdmsgs) * interval + waittime));
		set_alarm(&waiter);
		n = read(fd, buf, BUFSIZE);
		/* cancel immediately, so future syscalls don't get aborted */
		unset_alarm(&waiter);

		now = read_tsc();
		if(n <= 0){	/* read interrupted - time to go */
			/* Faking time being a minute in the future, so clean marks our
			 * message as lost.  Note this will also end up cancelling any other
			 * pending replies that would have expired by then.  Whatever. */
			clean(0, now + MINUTETSC, NULL);
			continue;
		}
		if(n < msglen){
			printf("bad len %d/%d\n", n, msglen);
			continue;
		}
		icmp = geticmp(buf);
		munged = 0;
		for(i = proto->iphdrsz + ICMP_HDRSIZE; i < msglen; i++)
			if(buf[i] != (uint8_t)i)
				munged++;
		if(munged)
			printf("corrupted reply\n");
		x = nhgets(icmp->seq);
		if(icmp->type != proto->echoreply || icmp->code != 0) {
			printf("bad type/code/sequence %d/%d/%d (want %d/%d/%d)\n",
				icmp->type, icmp->code, x,
				proto->echoreply, 0, x);
			continue;
		}
		clean(x, now, buf);
	}

	spin_pdr_lock(&listlock);
	for(r = first; r; r = r->next)
		if(r->replied == 0)
			lostmsgs++;
	spin_pdr_unlock(&listlock);

	if(!quiet && lostmsgs)
		printf("%d out of %d messages lost\n", lostmsgs,
			lostmsgs+rcvdmsgs);
}
Esempio n. 10
0
void
sender(int fd, int msglen, int interval, int n)
{
	int i, extra;
	uint16_t seq;
	char *buf = malloc(BUFSIZE);
	uint8_t me[IPaddrlen], mev4[IPv4addrlen];
	struct icmphdr *icmp;
	Req *r;

	firstseq = seq = rand();

	icmp = geticmp(buf);
	memset(buf, 0, proto->iphdrsz + ICMP_HDRSIZE);
	for(i = proto->iphdrsz + ICMP_HDRSIZE; i < msglen; i++)
		buf[i] = i;
	icmp->type = proto->echocmd;
	icmp->code = 0;

	/* arguably the kernel should fill in the right src addr. */
	myipvnaddr(me, proto, network);
	if (proto->version == 4) {
		v6tov4(mev4, me);
		memmove(((struct ip4hdr *)buf)->src, mev4, IPv4addrlen);
	} else
		ipmove(((struct ip6hdr *)buf)->src, me);
	if (addresses)
		printf("\t%i -> %s\n", me, target);

	if(pingrint != 0 && interval <= 0)
		pingrint = 0;
	extra = 0;
	for(i = 0; i < n; i++){
		if(i != 0){
			if(pingrint != 0)
				extra = rand();
			/* uth_sleep takes seconds, interval is in ms */
			uthread_usleep((interval + extra) * 1000);
		}
		r = calloc(sizeof *r, 1);
		if (r == NULL){
			printf("out of memory? \n");
			break;
		}
		hnputs(icmp->seq, seq);
		r->seq = seq;
		r->next = NULL;
		r->replied = 0;
		r->tsctime = read_tsc();	/* avoid early free in reply! */
		spin_pdr_lock(&listlock);
		if(first == NULL)
			first = r;
		else
			last->next = r;
		last = r;
		spin_pdr_unlock(&listlock);
		r->tsctime = read_tsc();
		if(write(fd, buf, msglen) < msglen){
			fprintf(stderr, "%s: write failed: %r\n", argv0);
			return;
		}
		seq++;
	}
	done = 1;
}
Esempio n. 11
0
File: ucq.c Progetto: brho/akaros
/* Consumer side, returns TRUE on success and fills *msg with the ev_msg.  If
 * the ucq appears empty, it will return FALSE.  Messages may have arrived after
 * we started getting that we do not receive. */
bool get_ucq_msg(struct ucq *ucq, struct event_msg *msg)
{
	uintptr_t my_idx;
	struct ucq_page *old_page, *other_page;
	struct msg_container *my_msg;
	struct spin_pdr_lock *ucq_lock = (struct spin_pdr_lock*)(&ucq->u_lock);

	do {
loop_top:
		cmb();
		my_idx = atomic_read(&ucq->cons_idx);
		/* The ucq is empty if the consumer and producer are on the same
		 * 'next' slot. */
		if (my_idx == atomic_read(&ucq->prod_idx))
			return FALSE;
		/* Is the slot we want good?  If not, we're going to need to try
		 * and move on to the next page.  If it is, we bypass all of
		 * this and try to CAS on us getting my_idx. */
		if (slot_is_good(my_idx))
			goto claim_slot;
		/* Slot is bad, let's try and fix it */
		spin_pdr_lock(ucq_lock);
		/* Reread the idx, in case someone else fixed things up while we
		 * were waiting/fighting for the lock */
		my_idx = atomic_read(&ucq->cons_idx);
		if (slot_is_good(my_idx)) {
			/* Someone else fixed it already, let's just try to get
			 * out */
			spin_pdr_unlock(ucq_lock);
			/* Make sure this new slot has a producer (ucq isn't
			 * empty) */
			if (my_idx == atomic_read(&ucq->prod_idx))
				return FALSE;
			goto claim_slot;
		}
		/* At this point, the slot is bad, and all other possible
		 * consumers are spinning on the lock.  Time to fix things up:
		 * Set the counter to the next page, and free the old one. */
		/* First, we need to wait and make sure the kernel has posted
		 * the next page.  Worst case, we know that the kernel is
		 * working on it, since prod_idx != cons_idx */
		old_page = (struct ucq_page*)PTE_ADDR(my_idx);
		while (!old_page->header.cons_next_pg)
			cpu_relax();
		/* Now set the counter to the next page */
		assert(!PGOFF(old_page->header.cons_next_pg));
		atomic_set(&ucq->cons_idx, old_page->header.cons_next_pg);
		/* Side note: at this point, any *new* consumers coming in will
		 * grab slots based off the new counter index (cons_idx) */
		/* Now free up the old page.  Need to make sure all other
		 * consumers are done.  We spin til enough are done, like an
		 * inverted refcnt. */
		while (atomic_read(&old_page->header.nr_cons) < NR_MSG_PER_PAGE)
		{
			/* spinning on userspace here, specifically, another
			 * vcore and we don't know who it is.  This will spin a
			 * bit, then make sure they aren't preeempted */
			cpu_relax_any();
		}
		/* Now the page is done.  0 its metadata and give it up. */
		old_page->header.cons_next_pg = 0;
		atomic_set(&old_page->header.nr_cons, 0);
		/* We want to "free" the page.  We'll try and set it as the
		 * spare.  If there is already a spare, we'll free that one. */
		other_page = (struct ucq_page*)atomic_swap(&ucq->spare_pg,
		                                           (long)old_page);
		assert(!PGOFF(other_page));
		if (other_page) {
			munmap(other_page, PGSIZE);
			atomic_dec(&ucq->nr_extra_pgs);
		}
		/* All fixed up, unlock.  Other consumers may lock and check to
		 * make sure things are done. */
		spin_pdr_unlock(ucq_lock);
		/* Now that everything is fixed, try again from the top */
		goto loop_top;
claim_slot:
		cmb();	/* so we can goto claim_slot */
		/* If we're still here, my_idx is good, and we'll try to claim
		 * it.  If we fail, we need to repeat the whole process. */
	} while (!atomic_cas(&ucq->cons_idx, my_idx, my_idx + 1));
	assert(slot_is_good(my_idx));
	/* Now we have a good slot that we can consume */
	my_msg = slot2msg(my_idx);
	/* linux would put an rmb_depends() here */
	/* Wait til the msg is ready (kernel sets this flag) */
	while (!my_msg->ready)
		cpu_relax();
	rmb();	/* order the ready read before the contents */
	/* Copy out */
	*msg = my_msg->ev_msg;
	/* Unset this for the next usage of the container */
	my_msg->ready = FALSE;
	wmb();	/* post the ready write before incrementing */
	/* Increment nr_cons, showing we're done */
	atomic_inc(&((struct ucq_page*)PTE_ADDR(my_idx))->header.nr_cons);
	return TRUE;
}