Пример #1
0
void call_cs(void)
{
	static int first = 0;
	static int high, low;
	u64_t start = 0, end = 0;

	if(first == 1){
		low = cos_get_thd_id();
		sched_wakeup(cos_spd_id(), high);
	}

	if(first == 0){
		first = 1;
		high = cos_get_thd_id();
		sched_block(cos_spd_id(), 0);
		rdtscll(start);
		sched_block(cos_spd_id(), low);
	}

	if (cos_get_thd_id() == low) {
		sched_wakeup(cos_spd_id(), high);
	}

	if (cos_get_thd_id() == high) {
		rdtscll(end);
		printc("context switch cost: %llu cycs\n", (end-start) >> 1);
		first = 0;
	}
Пример #2
0
/* Wait for a specific event */
int evt_wait(spdid_t spdid, long extern_evt)
{
	struct evt *e;

	while (1) {
		int ret;

		lock_take(&evt_lock);
		e = mapping_find(extern_evt);
		if (NULL == e) goto err;
		if (0 > (ret = __evt_read(e))) goto err;
		ACT_RECORD(ACT_WAIT, spdid, e->extern_id, cos_get_thd_id(), 0);
		lock_release(&evt_lock);
		if (1 == ret) {
			assert(extern_evt == e->extern_id);
			return 0;
		} else {
			ACT_RECORD(ACT_SLEEP, spdid, e->extern_id, cos_get_thd_id(), 0);
			if (0 > sched_block(cos_spd_id(), 0)) BUG();
		}
	}
err:
	lock_release(&evt_lock);
	return -1; 
}
Пример #3
0
int __evt_wait(spdid_t spdid, long extern_evt, int n)
{
    struct evt *e;

    while (1) {
        int ret;

        lock_take(&evt_lock);
        e = mapping_find(extern_evt);
        if (NULL == e) goto err;
        if (0 > (ret = __evt_read(e))) goto err;
        ACT_RECORD(ACT_WAIT, spdid, e->extern_id, cos_get_thd_id(), 0);
        e->n_wait = n;
        e->core_id = cos_cpuid();
        if (ret == 1) e->n_received = 0;
        lock_release(&evt_lock);
        if (1 == ret) {
            assert(extern_evt == e->extern_id);
            return 0;
        } else {
            ACT_RECORD(ACT_SLEEP, spdid, e->extern_id, cos_get_thd_id(), 0);

            /* We can use acaps to block / wakeup, which
             * can avoid calling scheduler. But it's like
             * a hack. */

            if (0 > sched_block(cos_spd_id(), 0)) BUG();
        }
    }

err:
    lock_release(&evt_lock);
    return -1;
}
Пример #4
0
/* Wait on a group of events (like epoll) */
long evt_grp_wait(spdid_t spdid)
{
	struct evt_grp *g;
	struct evt *e = NULL;
	long extern_evt;

	while (1) {
		lock_take(&evt_lock);

		g = evt_grp_find(cos_get_thd_id());
		ACT_RECORD(ACT_WAIT_GRP, spdid, e ? e->extern_id : 0, cos_get_thd_id(), 0);
		if (NULL == g) goto err;
		if (__evt_grp_read(g, &e)) goto err;

		if (NULL != e) {
			extern_evt = e->extern_id;
			lock_release(&evt_lock);
			return extern_evt;
		} else {
			lock_release(&evt_lock);
			ACT_RECORD(ACT_SLEEP, spdid, 0, cos_get_thd_id(), 0);
			if (0 > sched_block(cos_spd_id(), 0)) BUG();
		}
	}
err:
	lock_release(&evt_lock);
	return -1; 
}
Пример #5
0
/**
 * Asks for a stack back from all of the components.  Will release and
 * take the lock.
 */
static void
stkmgr_wait_for_stack(struct spd_stk_info *ssi)
{
	struct blocked_thd *bthd;

	DOUT("stkmgr_request_stack\n");
	stkmgr_spd_mark_relinquish(ssi->spdid);

	DOUT("All stacks for %d set to relinquish, %d waiting\n", ssi->spdid, cos_get_thd_id());
        
	bthd = malloc(sizeof(struct blocked_thd));
	if (bthd == NULL) BUG();

	bthd->thd_id = cos_get_thd_id();
	DOUT("Adding thd to the blocked list: %d\n", bthd->thd_id);
	ADD_LIST(&ssi->bthd_list, bthd, next, prev);
	ssi->num_blocked_thds++;

	RELEASE();

	DOUT("Blocking thread: %d\n", bthd->thd_id);
	/* FIXME: dependencies */
	sched_block(cos_spd_id(), 0);
	TAKE(); 
	DOUT("Thd %d wokeup and is obtaining a stack\n", cos_get_thd_id());

	return;
}
void call(void)
{
    static int first = 0;
    static int low = 0, high = 0;

    if (first == 0 ) {
        high = cos_get_thd_id();
        low = high + 1;
        first = 1;
    }

    u64_t start = 0, end = 0;

    int j = 0;
    while(j++ < 10) {
        if (cos_get_thd_id() == high) {
            /* printc("p3\n"); */
            sched_block(cos_spd_id(), 0);
            /* printc("p4\n"); */
            rdtscll(start);
        }
        /* printc(" thd %d is calling lower \n", cos_get_thd_id()); */

        call_lower(low, high);

        if (cos_get_thd_id() == high) {
            rdtscll(end);
            printc("cost of cached stkPIP %llu cycs\n", end-start);
        }
    }
    return;
}
Пример #7
0
// track blocked threads here for all clients (on each thread stack)
int __sg_sched_block(spdid_t spdid, int dependency_thd)
{
	struct blocked_thd blk_thd;

	// add to list
	cos_sched_lock_take();

	if (unlikely(!bthds[spdid].next)) {
		INIT_LIST(&bthds[spdid], next, prev);
	}
	INIT_LIST(&blk_thd, next, prev);
	blk_thd.id = cos_get_thd_id();
	blk_thd.dep_thd = dependency_thd;
	/* printc("add to the list..... thd %d\n", cos_get_thd_id()); */
	ADD_LIST(&bthds[spdid], &blk_thd, next, prev);

	cos_sched_lock_release();

	sched_block(spdid, dependency_thd);

	// remove from list in both normal path and reflect path
	cos_sched_lock_take();
	/* printc("remove from the list..... thd %d\n", cos_get_thd_id()); */
	REM_LIST(&blk_thd, next, prev);
	cos_sched_lock_release();

	return 0;
}
Пример #8
0
int __sg_sched_block(spdid_t spdid, unsigned short int dependency_thd, int crash_flag)
{
	/* printc("in scheduler server block interface\n"); */
	if (unlikely(crash_flag)) {
		return sched_block_helper(spdid, dependency_thd);
	}
	return sched_block(spdid, dependency_thd);
}
Пример #9
0
/*
 * FIXME: allow amnt to be specified in time units rather than ticks.
 */
int timed_event_block(spdid_t spdinv, unsigned int amnt)
{
	spdid_t spdid = cos_spd_id();
	struct thread_event *te;
	int block_time;
	event_time_t t;

	if (amnt == 0) return 0;
	/* 
	 * Convert from usec to ticks
	 *
	 * +2 here as we don't know how far through the current clock
	 * tick we are _and_ we don't know how far into the clock tick
	 * the wakeup time is.  The sleep is supposed to be for _at
	 * least_ amnt clock ticks, thus here we are conservative.
	 */
	//amnt = (amnt/(unsigned int)usec_per_tick) + 2;
	/* update: seems like +1 should be enough */
	amnt++;
	
	TAKE(spdid);
	te = te_get(cos_get_thd_id());
	if (NULL == te) BUG();
	assert(EMPTY_LIST(te, next, prev));

	te->thread_id = cos_get_thd_id();
	te->flags &= ~TE_TIMED_OUT;
	te->flags |= TE_BLOCKED;

	ticks = sched_timestamp();
	te->event_expiration = ticks + amnt;
	block_time = ticks;
   	assert(te->event_expiration > ticks);
	t = next_event_time();
	insert_event(te);
	assert(te->next && te->prev && !EMPTY_LIST(te, next, prev));
	RELEASE(spdid);

	if (t != next_event_time()) sched_timeout(spdid, amnt);
	if (-1 == sched_block(spdid, 0)) {
		prints("fprr: sched block failed in timed_event_block.");
	}

	/* we better have been taking off the list! */
	assert(EMPTY_LIST(te, next, prev));
	if (te->flags & TE_TIMED_OUT) return TIMER_EXPIRED;

	/* 
	 * The event has already been removed from event list in
	 * event_expiration by the timeout thread.
	 * 
	 * Minus 1 here as we must report the amount of time we are
	 * sure we waited for.  As we don't know how far into the tick
	 * we were when we slept, and how far the wakeup is into a
	 * tick, we must account for this.
	 */
	return ((int)ticks - block_time - 1); //*usec_per_tick; /* expressed in ticks currently */
}
Пример #10
0
static int __net_connect(spdid_t spdid, net_connection_t nc, struct ip_addr *ip, u16_t port)
{
	struct intern_connection *ic;
	u16_t tid = cos_get_thd_id();
	
	NET_LOCK_TAKE();
	if (!net_conn_valid(nc)) goto perm_err;
	ic = net_conn_get_internal(nc);
	if (NULL == ic) goto perm_err;
	if (tid != ic->tid) goto perm_err;
	assert(ACTIVE == ic->thd_status);

	switch (ic->conn_type) {
	case UDP:
	{
		struct udp_pcb *up;

		up = ic->conn.up;
		if (ERR_OK != udp_connect(up, ip, port)) {
			NET_LOCK_RELEASE();
			return -EISCONN;
		}
		break;
	}
	case TCP:
	{
		struct tcp_pcb *tp;

		tp = ic->conn.tp;
		ic->thd_status = CONNECTING;
		if (ERR_OK != tcp_connect(tp, ip, port, cos_net_lwip_tcp_connected)) {
			ic->thd_status = ACTIVE;
			NET_LOCK_RELEASE();
			return -ENOMEM;
		}
		NET_LOCK_RELEASE();
		if (sched_block(cos_spd_id(), 0) < 0) BUG();
		assert(ACTIVE == ic->thd_status);
		/* When we wake up, we should be connected. */
		return 0;
	}
	case TCP_CLOSED:
//		__net_close(ic);
		NET_LOCK_RELEASE();
		return -EPIPE;
	default:
		BUG();
	}
	NET_LOCK_RELEASE();
	return 0;
perm_err:
	NET_LOCK_RELEASE();
	return -EPERM;
}
Пример #11
0
int __sg_sched_block(spdid_t spdid, unsigned short int dependency_thd)
{
	/* printc("ser: sched_block (thd %d)\n", cos_get_thd_id()); */
	int ret;
#ifdef LOG_MONITOR
	evt_enqueue(cos_get_thd_id(), spdid, cos_spd_id(), FN_SCHED_BLOCK, dependency_thd, EVT_SINV);
#endif
	ret = sched_block(spdid, dependency_thd);
#ifdef LOG_MONITOR
	evt_enqueue(cos_get_thd_id(), cos_spd_id(), spdid, FN_SCHED_BLOCK, dependency_thd, EVT_SRET);
#endif
	return ret;
}
void call(void)
{
    static int flag = 0;

    static int first,  second;

    u64_t start = 0, end = 0;

    if(flag == 1) {
        /* printc("2\n"); */
        second = cos_get_thd_id();
        sched_wakeup(cos_spd_id(), first);
        /* printc("4\n");  */
    }

    if(flag == 0) {
        /* printc("1\n"); */
        flag = 1;
        first = cos_get_thd_id();
        sched_block(cos_spd_id(), 0);
        /* printc("3\n"); */
        rdtscll(start);
        sched_block(cos_spd_id(), second);
        /* printc("6\n"); */
    }

    if (cos_get_thd_id() == second)
        /* printc("5\n"); */
        sched_wakeup(cos_spd_id(), first);

    if (cos_get_thd_id() == first) {
        /* printc("7\n"); */
        rdtscll(end);
        printc("cost of basics %llu cycs\n", end-start);
    }

    return;
}
Пример #13
0
static inline void
cbuf_thread_block(struct cbuf_comp_info *cci, unsigned long request_size)
{
	struct blocked_thd bthd;

	bthd.thd_id       = cos_get_thd_id();
	bthd.request_size = request_size;
	rdtscll(bthd.blk_start);
	ADD_LIST(&cci->bthd_list, &bthd, next, prev);
	cci->num_blocked_thds++;
	cbuf_mark_relinquish_all(cci);
	CBUF_RELEASE();
	sched_block(cos_spd_id(), 0);
}
Пример #14
0
int __sg_sched_block(spdid_t spdid, unsigned short int dependency_thd)
{
	/* printc("ser: sched_block (thd %d)\n", cos_get_thd_id()); */
	int ret;
#ifdef LOG_MONITOR
	monevt_enqueue(cos_spd_id(), 11, dependency_thd);
#endif
	ret = sched_block(spdid, dependency_thd);
#ifdef LOG_MONITOR
	monevt_enqueue(0, 11, dependency_thd);
#endif

	return ret;
}
Пример #15
0
/* As above, but return more than one event notifications */
int evt_grp_mult_wait(spdid_t spdid, struct cos_array *data)
{
	struct evt_grp *g;
	struct evt *e = NULL;
	int evt_gathered = 0, evt_max;

	if (!cos_argreg_arr_intern(data)) return -EINVAL;
	evt_max = data->sz / sizeof(long);

	while (1) {
		lock_take(&evt_lock);

		g = evt_grp_find(cos_get_thd_id());
		ACT_RECORD(ACT_WAIT_GRP, spdid, e ? e->extern_id : 0, cos_get_thd_id(), 0);
		if (NULL == g) goto err;

		/* gather multiple events */
		do {
			if (__evt_grp_read_noblock(g, &e)) goto err;
			if (NULL != e) {
				((long*)data->mem)[evt_gathered] = e->extern_id;
				evt_gathered++;
			}
		} while (e && evt_gathered < evt_max);

		/* return them if they were gathered */
		if (evt_gathered > 0) {
			lock_release(&evt_lock);
			return evt_gathered;
		}

		/* 
		 * otherwise sleep till there is an event (first we
		 * need to call evt_grp_read to set the blocked
		 * status)
		 */
		if (__evt_grp_read(g, &e)) goto err;
		assert(NULL == e);
		lock_release(&evt_lock);
		ACT_RECORD(ACT_SLEEP, spdid, 0, cos_get_thd_id(), 0);
		if (0 > sched_block(cos_spd_id(), 0)) BUG();
	}
err:
	lock_release(&evt_lock);
	return -1; 
	
}
Пример #16
0
void loader_enqueue(int* cola) {
  if (*cola == -1) {
    // Esta vacia, hay que agregar el primer elemento
    task_table[cur_pid].next = cur_pid;
    task_table[cur_pid].prev = cur_pid;
    *cola = cur_pid;
  } else {
    int prev_pid = task_table[*cola].prev;
    // Lo agregamos atras para evitar starvation
    task_table[prev_pid].next = cur_pid;
    task_table[*cola].prev = cur_pid;
    task_table[cur_pid].next = *cola;
    task_table[cur_pid].prev = prev_pid;
  }

  int i = sched_block();
  tasks_blocked++;
  tasks_running--;
  loader_switchto(i);
}
Пример #17
0
int periodic_wake_wait(spdid_t spdinv)
{
	spdid_t spdid = cos_spd_id();
	struct thread_event *te;
	u16_t tid = cos_get_thd_id();
	long long t;

	TAKE(spdid);
	te = te_pget(tid);
	if (NULL == te) BUG();
	if (!(te->flags & TE_PERIODIC)) goto err;
		
	assert(!EMPTY_LIST(te, next, prev));
	te->flags |= TE_BLOCKED;

	rdtscll(t);
	if (te->missed) {	/* we're late */
		long long diff;
		assert(te->completion);

		diff = (t - te->completion);
		te->lateness_tot += diff;
		//te->samples++;
		te->miss_lateness_tot += diff;
		//te->miss_samples++;
		
		te->completion = 0;
	} else {		/* on time! */
		te->completion = t;
	}
	RELEASE(spdid);

	if (-1 == sched_block(spdid, 0)) {
		prints("fprr: sched block failed in timed_event_periodic_wait.");
	}

	return 0;
err:
	RELEASE(spdid);
	return -1;
}
void call(void)
{
    static int first = 0;
    static int low = 0, high = 0;

    if (first == 0 ) {
        high = cos_get_thd_id();
        low = high + 1;
        first = 1;
    }
    int j = 0;
    while(j++ < 100) {
        if (cos_get_thd_id() == high) {
            /* printc("put thd %d to sleep\n", high); */
            /* printc("p3\n"); */
            sched_block(cos_spd_id(), 0);
            /* printc("thd %d is up\n", high); */
            /* printc("p4\n"); */
        }

        call_lower(low, high);
    }
    return;
}
Пример #19
0
/////////////////// move to lib later
int cos_ainv_handling(void) {
    struct __cos_ainv_srv_thd curr_data = { .stop = 0 };
    struct __cos_ainv_srv_thd *curr = &curr_data;
    int acap, i;
    int curr_thd_id = cos_get_thd_id();

    assert(curr);

    printc("upcall thread %d (core %ld) waiting in pong...\n", cos_get_thd_id(), cos_cpuid());
    sched_block(cos_spd_id(), 0);
    printc("upcall thread %d (core %ld) up!\n", cos_get_thd_id(), cos_cpuid());

    curr->acap = acap_srv_lookup(cos_spd_id());
    curr->cli_ncaps = acap_srv_ncaps(cos_spd_id());
    curr->shared_page = acap_srv_lookup_ring(cos_spd_id());
    assert(curr->acap && curr->cli_ncaps && curr->shared_page);

    init_shared_page(&curr->shared_struct, curr->shared_page);

    curr->fn_mapping = malloc(sizeof(vaddr_t) * curr->cli_ncaps);
    if (unlikely(curr->fn_mapping == NULL)) goto err_nomem;
    for (i = 0; i < curr->cli_ncaps; i++) {
        curr->fn_mapping[i] = (vaddr_t)acap_srv_fn_mapping(cos_spd_id(), i);
    }

    assert(curr);
    acap = curr->acap;

    printc("server %ld, upcall thd %d has acap %d.\n",
           cos_spd_id(), curr_thd_id, acap);

    struct shared_struct *shared_struct = &curr->shared_struct;
    CK_RING_INSTANCE(inv_ring) *ring = shared_struct->ring;
    assert(ring);

    struct inv_data inv;
    while (curr->stop == 0) {
        CLEAR_SERVER_ACTIVE(shared_struct); // clear active early to avoid race (and atomic instruction)
        if (CK_RING_DEQUEUE_SPSC(inv_ring, ring, &inv) == false) {
            printc("thread %d waiting on acap %d\n", cos_get_thd_id(), acap);
            cos_areceive(acap);
            printc("thread %d up from areceive\n", cos_get_thd_id());
        } else {
            SET_SERVER_ACTIVE(shared_struct); /* setting us active */
            printc("core %ld: got inv for cap %d, param %d, %d, %d, %d\n",
                   cos_cpuid(), inv.cap, inv.params[0], inv.params[1], inv.params[2], inv.params[3]);
            if (unlikely(inv.cap > curr->cli_ncaps || !curr->fn_mapping[inv.cap])) {
                printc("Server thread %d in comp %ld: receiving invalid cap %d\n",
                       cos_get_thd_id(), cos_spd_id(), inv.cap);
            } else {
                assert(curr->fn_mapping[inv.cap]);
                //execute!
                exec_fn((void *)curr->fn_mapping[inv.cap], 4, inv.params);
                // and write to the return value.
            }
        }
    }

    return 0;
err_nomem:
    printc("couldn't allocate memory in spd %ld\n", cos_spd_id());
    return -1;
}

void cos_upcall_fn(upcall_type_t t, void *arg1, void *arg2, void *arg3)
{
    switch (t) {
    case COS_UPCALL_THD_CREATE:
    {
        cos_ainv_handling();
        break;
    }
    default:
        /* fault! */
        //*(int*)NULL = 0;
        printc("\n upcall type t %d\n", t);
        return;
    }
    return;
}
Пример #20
0
/* 
 * Dependencies here (thus priority inheritance) will NOT be used if
 * you specify a timeout value.
 */
int lock_component_take(spdid_t spd, unsigned long lock_id, unsigned short int thd_id, unsigned int microsec)
{
	struct meta_lock *ml;
	spdid_t spdid = cos_spd_id();
	unsigned short int curr = (unsigned short int)cos_get_thd_id();
	struct blocked_thds blocked_desc = {.thd_id = curr};
	int ret = 0;
	
//	print("thread %d from spd %d locking for %d micrseconds.", curr, spdid, microsec);

	ACT_RECORD(ACT_LOCK, spd, lock_id, cos_get_thd_id(), thd_id);
	TAKE(spdid);

	if (0 == microsec) {
		ret = TIMER_EXPIRED;
		goto error;
	}
	ml = lock_find(lock_id, spd);
	/* tried to access a lock not yet created */
	if (!ml) {
		ret = -1;
		//print("take wtf%d%d%d", 0,0,0);
		goto error;
	}
	if (lock_is_thd_blocked(ml, curr)) {
		prints("lock: lock_is_thd_blocked failed in lock_component_take\n");
		goto error;
	}

	/* The calling component needs to retry its user-level lock,
	 * some preemption has caused the generation count to get off,
	 * i.e. we don't have the most up-to-date view of the
	 * lock's state */
	if (ml->gen_num != generation) {
		ml->gen_num = generation;
		ret = 0;
		goto error;
	}
	generation++;

	/* Note that we are creating the list of blocked threads from
	 * memory allocated on the individual thread's stacks. */
	INIT_LIST(&blocked_desc, next, prev);
	ADD_LIST(&ml->b_thds, &blocked_desc, next, prev);
	blocked_desc.timed = (TIMER_EVENT_INF != microsec);
	//ml->owner = thd_id;

	RELEASE(spdid);

	/* Bypass calling the timed every component if there is an infinite wait */
//	assert(TIMER_EVENT_INF == microsec);
//	assert(!blocked_desc.timed);
	if (TIMER_EVENT_INF == microsec) {
		if (-1 == sched_block(spdid, thd_id)) BUG();
		if (!EMPTY_LIST(&blocked_desc, next, prev)) BUG();
		/* 
		 * OK, this seems ridiculous but here is the rational: Assume
		 * we are a middle-prio thread, and were just woken by a low
		 * priority thread. We will preempt that thread when woken,
		 * and will continue here.  If a high priority thread is also
		 * waiting on the lock, then we would preempt the low priority
		 * thread while it should wake the high prio thread. With the
		 * following crit sect will switch to the low prio thread that
		 * still holds the component lock.  See the comments in
		 * lock_component_release. 
		 */
		//TAKE(spdid);
		//RELEASE(spdid);

		ACT_RECORD(ACT_WAKEUP, spd, lock_id, cos_get_thd_id(), 0);
		ret = 0;
	} else {
		assert(0);
#ifdef NIL
		/* ret here will fall through.  We do NOT use the
		 * dependency here as I can't think through the
		 * repercussions */
		if (-1 == (ret = timed_event_block(spdid, microsec))) return ret;

		/* 
		 * We might have woken from a timeout, which means
		 * that we need to remove this thread from the waiting
		 * list for the lock.
		 */
		TAKE(spdid);
		ml = lock_find(lock_id, spd);
		if (!ml) {
			ret = -1;
			goto error;
		}
		REM_LIST(&blocked_desc, next, prev);
		RELEASE(spdid);

		ACT_RECORD(ACT_WAKEUP, spd, lock_id, cos_get_thd_id(), 0); 
		/* ret is set to the amnt of time we blocked */
#endif 
	}
	return ret;
error:
	RELEASE(spdid);
	return ret;
}
void
bin(void)
{
	sched_block(cos_spd_id(), 0);
}
Пример #22
0
static void start_timer_thread(void)
{
	spdid_t spdid = cos_spd_id();
	unsigned int tick_freq;

	INIT_LIST(&events, next, prev);
	events.thread_id = 0;
	INIT_LIST(&periodic, next, prev);
	periodic.thread_id = 0;

	cos_vect_init_static(&thd_evts);
	cos_vect_init_static(&thd_periodic);

	sched_timeout_thd(spdid);
	tick_freq = sched_tick_freq();
	assert(tick_freq == 100);
	ticks = sched_timestamp();
	/* currently timeouts are expressed in ticks, so we don't need this */
//	usec_per_tick = USEC_PER_SEC/tick_freq;
	cyc_per_tick = sched_cyc_per_tick();
	//	printc("cyc_per_tick = %lld\n", cyc_per_tick);

	/* When the system boots, we have no pending waits */
	assert(EMPTY_LIST(&events, next, prev));
	sched_block(spdid, 0);
	/* Wait for events, then act on expired events.  Loop. */
	while (1) {
		event_time_t next_wakeup;

		cos_mpd_update(); /* update mpd config given this
				   * thread is now in this component
				   * (no dependency if we are in the
				   * same protection domain as the
				   * scheduler) */
		ticks = sched_timestamp();
		if (sched_component_take(spdid)) {
			prints("fprr: scheduler lock failed!!!");
			BUG();
		}
		event_expiration(ticks);
		next_wakeup = next_event_time();

		/* Are there no pending events??? */
		if (TIMER_NO_EVENTS == next_wakeup) {
			if (sched_component_release(spdid)) {
				prints("fprr: scheduler lock release failed!!!");
				BUG();
			}

			sched_block(spdid, 0);
		} else {
			unsigned int wakeup;

			assert(next_wakeup > ticks);
			wakeup = (unsigned int)(next_wakeup - ticks);
			if (sched_component_release(spdid)) {
				prints("fprr: scheduler lock release failed!!!");
				BUG();
			}
			sched_timeout(spdid, wakeup);
		}
	}
}
Пример #23
0
/* 
 * Dependencies here (thus priority inheritance) will NOT be used if
 * you specify a timeout value.
 *
 * Return 0: lock taken, -1: could not find lock, 1: inconsistency -- retry!
 */
int lock_component_take(spdid_t spd, unsigned long lock_id, unsigned short int thd_id)
{
	struct meta_lock *ml;
	spdid_t spdid = cos_spd_id();
	unsigned short int curr = (unsigned short int)cos_get_thd_id();
	struct blocked_thds blocked_desc = {.thd_id = curr};
	int ret = -1;
	
	ACT_RECORD(ACT_LOCK, spd, lock_id, cos_get_thd_id(), thd_id);
	TAKE(spdid);

	ml = lock_find(lock_id, spd);
	/* tried to access a lock not yet created */
	if (!ml) goto error;
	assert(!lock_is_thd_blocked(ml, curr));

	/* The calling component needs to retry its user-level lock,
	 * some preemption has caused the generation count to get off,
	 * i.e. we don't have the most up-to-date view of the
	 * lock's state */
	if (ml->gen_num != generation) {
		ml->gen_num = generation;
		ret = 1;
		goto error;
	}
	generation++;

	/* Note that we are creating the list of blocked threads from
	 * memory allocated on the individual thread's stacks. */
	INIT_LIST(&blocked_desc, next, prev);
	ADD_LIST(&ml->b_thds, &blocked_desc, next, prev);
	//ml->owner = thd_id;

	RELEASE(spdid);

	/* printc("cpu %ld: thd %d going to blk waiting for lock %d\n", cos_cpuid(), cos_get_thd_id(), (int)lock_id); */
	if (-1 == sched_block(spdid, thd_id)) {
		printc("Deadlock including thdids %d -> %d in spd %d, lock id %d.\n", 
		       cos_get_thd_id(), thd_id, spd, (int)lock_id);
		debug_print("BUG: Possible deadlock @ "); 
		assert(0);
		if (-1 == sched_block(spdid, 0)) assert(0);
	}

	if (!EMPTY_LIST(&blocked_desc, next, prev)) BUG();
	/* 
	 * OK, this seems ridiculous but here is the rational: Assume
	 * we are a middle-prio thread, and were just woken by a low
	 * priority thread. We will preempt that thread when woken,
	 * and will continue here.  If a high priority thread is also
	 * waiting on the lock, then we would preempt the low priority
	 * thread while it should wake the high prio thread. With the
	 * following crit sect will switch to the low prio thread that
	 * still holds the component lock.  See the comments in
	 * lock_component_release. 
	 */
	//TAKE(spdid);
	//RELEASE(spdid);

	ACT_RECORD(ACT_WAKEUP, spd, lock_id, cos_get_thd_id(), 0);
	ret = 0;
done:
	return ret;
error:
	RELEASE(spdid);
	goto done;
}