int evt_trigger(spdid_t spdid, long extern_evt) { struct evt *e; int ret = 0; lock_take(&evt_lock); e = mapping_find(extern_evt); if (NULL == e) goto err; ACT_RECORD(ACT_TRIGGER, spdid, e->extern_id, cos_get_thd_id(), 0); /* Trigger an event being waited for? */ if (0 != (ret = __evt_trigger(e))) { lock_release(&evt_lock); ACT_RECORD(ACT_WAKEUP, spdid, e->extern_id, cos_get_thd_id(), ret); if (sched_wakeup(cos_spd_id(), ret)) BUG(); } else { lock_release(&evt_lock); } return 0; err: lock_release(&evt_lock); return -1; }
/* Wait for a specific event */ int evt_wait(spdid_t spdid, long extern_evt) { struct evt *e; while (1) { int ret; lock_take(&evt_lock); e = mapping_find(extern_evt); if (NULL == e) goto err; if (0 > (ret = __evt_read(e))) goto err; ACT_RECORD(ACT_WAIT, spdid, e->extern_id, cos_get_thd_id(), 0); lock_release(&evt_lock); if (1 == ret) { assert(extern_evt == e->extern_id); return 0; } else { ACT_RECORD(ACT_SLEEP, spdid, e->extern_id, cos_get_thd_id(), 0); if (0 > sched_block(cos_spd_id(), 0)) BUG(); } } err: lock_release(&evt_lock); return -1; }
/* Wait on a group of events (like epoll) */ long evt_grp_wait(spdid_t spdid) { struct evt_grp *g; struct evt *e = NULL; long extern_evt; while (1) { lock_take(&evt_lock); g = evt_grp_find(cos_get_thd_id()); ACT_RECORD(ACT_WAIT_GRP, spdid, e ? e->extern_id : 0, cos_get_thd_id(), 0); if (NULL == g) goto err; if (__evt_grp_read(g, &e)) goto err; if (NULL != e) { extern_evt = e->extern_id; lock_release(&evt_lock); return extern_evt; } else { lock_release(&evt_lock); ACT_RECORD(ACT_SLEEP, spdid, 0, cos_get_thd_id(), 0); if (0 > sched_block(cos_spd_id(), 0)) BUG(); } } err: lock_release(&evt_lock); return -1; }
int __evt_wait(spdid_t spdid, long extern_evt, int n) { struct evt *e; while (1) { int ret; lock_take(&evt_lock); e = mapping_find(extern_evt); if (NULL == e) goto err; if (0 > (ret = __evt_read(e))) goto err; ACT_RECORD(ACT_WAIT, spdid, e->extern_id, cos_get_thd_id(), 0); e->n_wait = n; e->core_id = cos_cpuid(); if (ret == 1) e->n_received = 0; lock_release(&evt_lock); if (1 == ret) { assert(extern_evt == e->extern_id); return 0; } else { ACT_RECORD(ACT_SLEEP, spdid, e->extern_id, cos_get_thd_id(), 0); /* We can use acaps to block / wakeup, which * can avoid calling scheduler. But it's like * a hack. */ if (0 > sched_block(cos_spd_id(), 0)) BUG(); } } err: lock_release(&evt_lock); return -1; }
/* As above, but return more than one event notifications */ int evt_grp_mult_wait(spdid_t spdid, struct cos_array *data) { struct evt_grp *g; struct evt *e = NULL; int evt_gathered = 0, evt_max; if (!cos_argreg_arr_intern(data)) return -EINVAL; evt_max = data->sz / sizeof(long); while (1) { lock_take(&evt_lock); g = evt_grp_find(cos_get_thd_id()); ACT_RECORD(ACT_WAIT_GRP, spdid, e ? e->extern_id : 0, cos_get_thd_id(), 0); if (NULL == g) goto err; /* gather multiple events */ do { if (__evt_grp_read_noblock(g, &e)) goto err; if (NULL != e) { ((long*)data->mem)[evt_gathered] = e->extern_id; evt_gathered++; } } while (e && evt_gathered < evt_max); /* return them if they were gathered */ if (evt_gathered > 0) { lock_release(&evt_lock); return evt_gathered; } /* * otherwise sleep till there is an event (first we * need to call evt_grp_read to set the blocked * status) */ if (__evt_grp_read(g, &e)) goto err; assert(NULL == e); lock_release(&evt_lock); ACT_RECORD(ACT_SLEEP, spdid, 0, cos_get_thd_id(), 0); if (0 > sched_block(cos_spd_id(), 0)) BUG(); } err: lock_release(&evt_lock); return -1; }
/* * The problem being solved here is this: T_1 wishes to take the * mutex, finds that it is taken by another thread. It calls into * this function, but is preempted by T_2, the lock holder. The lock * is released. T_1 is switched back to and it invokes this component * asking to block till the lock is released. This component has no * way of knowing that the lock already has been released, so we block * for no reason in wait for the lock to be "released". Thus what we * do is have the client call the pretake function checking before and * after invoking it that the lock is still taken. We record the * generation number in pretake and make sure that it is consistent in * take. This signifies that no release has happened in the interim, * and that we really should sleep. */ int lock_component_pretake(spdid_t spd, unsigned long lock_id, unsigned short int thd) { struct meta_lock *ml; spdid_t spdid = cos_spd_id(); int ret = 0; ACT_RECORD(ACT_PRELOCK, spd, lock_id, cos_get_thd_id(), thd); TAKE(spdid); // lock_print_all(); ml = lock_find(lock_id, spd); if (NULL == ml) { ret = -1; goto done; } ml->gen_num = generation; done: RELEASE(spdid); return ret; }
int lock_component_release(spdid_t spd, unsigned long lock_id) { struct meta_lock *ml; struct blocked_thds *sent, *bt; spdid_t spdid = cos_spd_id(); ACT_RECORD(ACT_UNLOCK, spd, lock_id, cos_get_thd_id(), 0); TAKE(spdid); generation++; ml = lock_find(lock_id, spd); if (!ml) goto error; /* Apparently, lock_take calls haven't been made. */ if (EMPTY_LIST(&ml->b_thds, next, prev)) { RELEASE(spdid); return 0; } sent = bt = FIRST_LIST(&ml->b_thds, next, prev); /* Remove all threads from the lock's list */ REM_LIST(&ml->b_thds, next, prev); /* Unblock all waiting threads */ while (1) { struct blocked_thds *next; u16_t tid; /* This is suboptimal: if we wake a thread with a * higher priority, it will be switched to. Given we * are holding the component lock here, we should get * switched _back_ to so as to wake the rest of the * components. */ next = FIRST_LIST(bt, next, prev); REM_LIST(bt, next, prev); ACT_RECORD(ACT_WAKE, spd, lock_id, cos_get_thd_id(), bt->thd_id); /* cache locally */ tid = bt->thd_id; /* Last node in the list? */ if (bt == next) { /* This is sneaky, so to reiterate: Keep this * lock till now so that if we wake another * thread, and it begins execution, the system * will switch back to this thread so that we * can wake up the rest of the waiting threads * (one of which might have the highest * priority). We release before we wake the * last as we don't really need the lock * anymore, an it will avoid quite a few * invocations.*/ RELEASE(spdid); } /* Wakeup the way we were put to sleep */ assert(tid != cos_get_thd_id()); /* printc("CPU %ld: %d waking up %d for lock %d\n", cos_cpuid(), cos_get_thd_id(), tid, lock_id); */ sched_wakeup(spdid, tid); if (bt == next) break; bt = next; } return 0; error: RELEASE(spdid); return -1; }
/* * Dependencies here (thus priority inheritance) will NOT be used if * you specify a timeout value. * * Return 0: lock taken, -1: could not find lock, 1: inconsistency -- retry! */ int lock_component_take(spdid_t spd, unsigned long lock_id, unsigned short int thd_id) { struct meta_lock *ml; spdid_t spdid = cos_spd_id(); unsigned short int curr = (unsigned short int)cos_get_thd_id(); struct blocked_thds blocked_desc = {.thd_id = curr}; int ret = -1; ACT_RECORD(ACT_LOCK, spd, lock_id, cos_get_thd_id(), thd_id); TAKE(spdid); ml = lock_find(lock_id, spd); /* tried to access a lock not yet created */ if (!ml) goto error; assert(!lock_is_thd_blocked(ml, curr)); /* The calling component needs to retry its user-level lock, * some preemption has caused the generation count to get off, * i.e. we don't have the most up-to-date view of the * lock's state */ if (ml->gen_num != generation) { ml->gen_num = generation; ret = 1; goto error; } generation++; /* Note that we are creating the list of blocked threads from * memory allocated on the individual thread's stacks. */ INIT_LIST(&blocked_desc, next, prev); ADD_LIST(&ml->b_thds, &blocked_desc, next, prev); //ml->owner = thd_id; RELEASE(spdid); /* printc("cpu %ld: thd %d going to blk waiting for lock %d\n", cos_cpuid(), cos_get_thd_id(), (int)lock_id); */ if (-1 == sched_block(spdid, thd_id)) { printc("Deadlock including thdids %d -> %d in spd %d, lock id %d.\n", cos_get_thd_id(), thd_id, spd, (int)lock_id); debug_print("BUG: Possible deadlock @ "); assert(0); if (-1 == sched_block(spdid, 0)) assert(0); } if (!EMPTY_LIST(&blocked_desc, next, prev)) BUG(); /* * OK, this seems ridiculous but here is the rational: Assume * we are a middle-prio thread, and were just woken by a low * priority thread. We will preempt that thread when woken, * and will continue here. If a high priority thread is also * waiting on the lock, then we would preempt the low priority * thread while it should wake the high prio thread. With the * following crit sect will switch to the low prio thread that * still holds the component lock. See the comments in * lock_component_release. */ //TAKE(spdid); //RELEASE(spdid); ACT_RECORD(ACT_WAKEUP, spd, lock_id, cos_get_thd_id(), 0); ret = 0; done: return ret; error: RELEASE(spdid); goto done; }
/* * Dependencies here (thus priority inheritance) will NOT be used if * you specify a timeout value. */ int lock_component_take(spdid_t spd, unsigned long lock_id, unsigned short int thd_id, unsigned int microsec) { struct meta_lock *ml; spdid_t spdid = cos_spd_id(); unsigned short int curr = (unsigned short int)cos_get_thd_id(); struct blocked_thds blocked_desc = {.thd_id = curr}; int ret = 0; // print("thread %d from spd %d locking for %d micrseconds.", curr, spdid, microsec); ACT_RECORD(ACT_LOCK, spd, lock_id, cos_get_thd_id(), thd_id); TAKE(spdid); if (0 == microsec) { ret = TIMER_EXPIRED; goto error; } ml = lock_find(lock_id, spd); /* tried to access a lock not yet created */ if (!ml) { ret = -1; //print("take wtf%d%d%d", 0,0,0); goto error; } if (lock_is_thd_blocked(ml, curr)) { prints("lock: lock_is_thd_blocked failed in lock_component_take\n"); goto error; } /* The calling component needs to retry its user-level lock, * some preemption has caused the generation count to get off, * i.e. we don't have the most up-to-date view of the * lock's state */ if (ml->gen_num != generation) { ml->gen_num = generation; ret = 0; goto error; } generation++; /* Note that we are creating the list of blocked threads from * memory allocated on the individual thread's stacks. */ INIT_LIST(&blocked_desc, next, prev); ADD_LIST(&ml->b_thds, &blocked_desc, next, prev); blocked_desc.timed = (TIMER_EVENT_INF != microsec); //ml->owner = thd_id; RELEASE(spdid); /* Bypass calling the timed every component if there is an infinite wait */ // assert(TIMER_EVENT_INF == microsec); // assert(!blocked_desc.timed); if (TIMER_EVENT_INF == microsec) { if (-1 == sched_block(spdid, thd_id)) BUG(); if (!EMPTY_LIST(&blocked_desc, next, prev)) BUG(); /* * OK, this seems ridiculous but here is the rational: Assume * we are a middle-prio thread, and were just woken by a low * priority thread. We will preempt that thread when woken, * and will continue here. If a high priority thread is also * waiting on the lock, then we would preempt the low priority * thread while it should wake the high prio thread. With the * following crit sect will switch to the low prio thread that * still holds the component lock. See the comments in * lock_component_release. */ //TAKE(spdid); //RELEASE(spdid); ACT_RECORD(ACT_WAKEUP, spd, lock_id, cos_get_thd_id(), 0); ret = 0; } else { assert(0); #ifdef NIL /* ret here will fall through. We do NOT use the * dependency here as I can't think through the * repercussions */ if (-1 == (ret = timed_event_block(spdid, microsec))) return ret; /* * We might have woken from a timeout, which means * that we need to remove this thread from the waiting * list for the lock. */ TAKE(spdid); ml = lock_find(lock_id, spd); if (!ml) { ret = -1; goto error; } REM_LIST(&blocked_desc, next, prev); RELEASE(spdid); ACT_RECORD(ACT_WAKEUP, spd, lock_id, cos_get_thd_id(), 0); /* ret is set to the amnt of time we blocked */ #endif } return ret; error: RELEASE(spdid); return ret; }