// track blocked threads here for all clients (on each thread stack) int __sg_sched_block(spdid_t spdid, int dependency_thd) { struct blocked_thd blk_thd; // add to list cos_sched_lock_take(); if (unlikely(!bthds[spdid].next)) { INIT_LIST(&bthds[spdid], next, prev); } INIT_LIST(&blk_thd, next, prev); blk_thd.id = cos_get_thd_id(); blk_thd.dep_thd = dependency_thd; /* printc("add to the list..... thd %d\n", cos_get_thd_id()); */ ADD_LIST(&bthds[spdid], &blk_thd, next, prev); cos_sched_lock_release(); sched_block(spdid, dependency_thd); // remove from list in both normal path and reflect path cos_sched_lock_take(); /* printc("remove from the list..... thd %d\n", cos_get_thd_id()); */ REM_LIST(&blk_thd, next, prev); cos_sched_lock_release(); return 0; }
static void mgr_remove_client_mem(struct spd_tmem_info *sti, struct cos_cbuf_item *cci) { __cbuf_c_delete(sti, cci->desc.cbid, &cci->desc); /* DOUT("after buf del before map del\n"); */ cos_map_del(&cb_ids, cci->desc.cbid); DOUT("fly..........cbid is %d\n", cci->desc.cbid); cci->desc.cbid = 0; cci->parent_spdid = 0; // Clear our memory to prevent leakage memset(cci->desc.addr, 0, PAGE_SIZE); /* printc("Removing from local list\n"); */ REM_LIST(cci, next, prev); /* TODO: move all of this into the tmem generic code just like the ++s */ sti->num_allocated--; if (sti->num_allocated == 0) empty_comps++; if (sti->num_allocated >= sti->num_desired) over_quota_total--; assert(sti->num_allocated == tmem_num_alloc_tmems(sti->spdid)); }
int __cbuf_c_delete(struct spd_tmem_info *sti, int cbid, struct cb_desc *d) { struct cb_mapping *m; struct spd_tmem_info *map_sti; DOUT("_c_delete....cbid %d\n", cbid); __spd_cbvect_clean_val(sti, cbid); //assert(sti->ci.meta[(cbid-1)].c_0.v == NULL); //printc("_c_delete....cbid %d, meta %p\n", cbid, sti->ci.meta[cbid - 1].c_0.v); mman_revoke_page(cos_spd_id(), (vaddr_t)d->addr, 0); // remove all mapped children m = FIRST_LIST(&d->owner, next, prev); while (m != &d->owner) { struct cb_mapping *n; /* remove from the vector in all mapped spds as well! */ map_sti = get_spd_info(m->spd); DOUT("Clean val in spd %d\n", map_sti->spdid); DOUT("Clean: cbid %d\n",cbid); __spd_cbvect_clean_val(map_sti, cbid); valloc_free(cos_spd_id(), m->spd, (void *)(m->addr), 1); n = FIRST_LIST(m, next, prev); REM_LIST(m, next, prev); free(m); m = n; } valloc_free(cos_spd_id(), sti->spdid, (void *)(d->owner.addr), 1); DOUT("unmapped is done\n"); return 0; }
/* the stack should NOT be on the freelist within the spd */ static int stkmgr_stk_remove_from_spd(struct cos_stk_item *stk_item, struct spd_stk_info *ssi) { spdid_t s_spdid; s_spdid = ssi->spdid; DOUT("Releasing Stack\n"); mman_revoke_page(cos_spd_id(), (vaddr_t)(stk_item->hptr), 0); valloc_free(cos_spd_id(), s_spdid, (void*)stk_item->d_addr, 1); DOUT("Putting stack back on free list\n"); // cause underflow for MAX Int stk_item->parent_spdid = 0; // Clear our memory to prevent leakage memset(stk_item->hptr, 0, PAGE_SIZE); DOUT("Removing from local list\n"); // remove from s_spdid's stk_list; REM_LIST(stk_item, next, prev); ssi->num_allocated--; assert(ssi->num_allocated == stkmgr_num_alloc_stks(s_spdid)); return 0; }
int periodic_wake_create(spdid_t spdinv, unsigned int period) { struct thread_event *te; unsigned short int tid = cos_get_thd_id(); spdid_t spdid = cos_spd_id(); event_time_t n, t; if (period < 1) return -1; TAKE(spdid); te = te_pget(tid); if (NULL == te) BUG(); if (te->flags & TE_PERIODIC) { assert(!EMPTY_LIST(te, next, prev)); REM_LIST(te, next, prev); } assert(EMPTY_LIST(te, next, prev)); te->flags |= TE_PERIODIC; te->period = period; ticks = sched_timestamp(); te->event_expiration = n = ticks + period; assert(n > ticks); t = next_event_time(); assert(t > ticks); insert_pevent(te); if (t > n) sched_timeout(spdid, n-ticks); RELEASE(spdid); return 0; }
/* wake up all blocked threads whose request size smaller than or equal to available size */ static void cbuf_thd_wake_up(struct cbuf_comp_info *cci, unsigned long sz) { struct blocked_thd *bthd, *next; unsigned long long cur, tot; assert(cci->num_blocked_thds >= 0); /* Cannot wake up thd when in shrink */ assert(cci->target_size >= cci->allocated_size); if (cci->num_blocked_thds == 0) return; bthd = cci->bthd_list.next; while (bthd != &cci->bthd_list) { next = FIRST_LIST(bthd, next, prev); if (bthd->request_size <= sz) { REM_LIST(bthd, next, prev); cci->num_blocked_thds--; rdtscll(cur); tot = cur-bthd->blk_start; cci->track.blk_tot += tot; if (tot > cci->track.blk_max) cci->track.blk_max = tot; sched_wakeup(cos_spd_id(), bthd->thd_id); } bthd = next; } if (cci->num_blocked_thds == 0) cbuf_unmark_relinquish_all(cci); }
static void *alloc_rb_buff(rb_meta_t *r) { struct buff_page *p; int i; void *ret = NULL; lock_take(&r->l); if (EMPTY_LIST(&r->avail_pages, next, prev)) { if (NULL == (p = alloc_buff_page())) { lock_release(&r->l); return NULL; } ADD_LIST(&r->avail_pages, p, next, prev); } p = FIRST_LIST(&r->avail_pages, next, prev); assert(p->amnt_buffs < NP_NUM_BUFFS); for (i = 0 ; i < NP_NUM_BUFFS ; i++) { if (p->buff_used[i] == 0) { p->buff_used[i] = 1; ret = p->buffs[i]; p->amnt_buffs++; break; } } assert(NULL != ret); if (p->amnt_buffs == NP_NUM_BUFFS) { REM_LIST(p, next, prev); ADD_LIST(&r->used_pages, p, next, prev); } lock_release(&r->l); return ret; }
/* * As clients maybe malicious or don't use protocol correctly, we cannot * simply unmap memory here. We guarantee that fault can only happen within * the malicious component, but for other components, they either receive a * NULL pointer from cbuf2buf or see wrong data. No fault happen in other * components. See details in cbuf_unmap_prepare */ static int cbuf_free_unmap(struct cbuf_comp_info *cci, struct cbuf_info *cbi) { struct cbuf_maps *m = &cbi->owner, *next; struct cbuf_bin *bin; void *ptr = cbi->mem; unsigned long off, size = cbi->size; if (cbuf_unmap_prepare(cbi)) return 1; /* Unmap all of the pages from the clients */ for (off = 0 ; off < size ; off += PAGE_SIZE) { mman_revoke_page(cos_spd_id(), (vaddr_t)ptr + off, 0); } /* * Deallocate the virtual address in the client, and cleanup * the memory in this component */ m = FIRST_LIST(&cbi->owner, next, prev); while (m != &cbi->owner) { next = FIRST_LIST(m, next, prev); REM_LIST(m, next, prev); valloc_free(cos_spd_id(), m->spdid, (void*)m->addr, size/PAGE_SIZE); free(m); m = next; } valloc_free(cos_spd_id(), m->spdid, (void*)m->addr, size/PAGE_SIZE); /* deallocate/unlink our data-structures */ page_free(ptr, size/PAGE_SIZE); cmap_del(&cbufs, cbi->cbid); cci->allocated_size -= size; bin = cbuf_comp_info_bin_get(cci, size); if (EMPTY_LIST(cbi, next, prev)) { bin->c = NULL; } else { if (bin->c == cbi) bin->c = cbi->next; REM_LIST(cbi, next, prev); } free(cbi); return 0; }
/* * We got to this function because d and m aren't consistent. This * can only happen because the manager removed a cbuf, 1) thus leaving * the meta pointer as NULL, and the freelist referring to no actual * cbuf, or 2) when another thread is given a new cbuf (via * cbuf_c_create) with the same cbid as the one referred to in the * freelist. Either way, we want to simply remove the descriptor. */ void __cbuf_desc_free(struct cbuf_alloc_desc *d) { assert(d); assert(cvect_lookup(&alloc_descs, (unsigned long)d->addr >> PAGE_ORDER) == d); REM_LIST(d, next, prev); cvect_del(&alloc_descs, (unsigned long)d->addr >> PAGE_ORDER); cslab_free_desc(d); }
static inline void fp_rem_thd(struct sched_thd *t) { u16_t p = sched_get_metric(t)->priority; /* if on a list _and_ no other thread at this priority? */ if (!EMPTY_LIST(t, prio_next, prio_prev) && t->prio_next == t->prio_prev) { mask_unset(p); } REM_LIST(t, prio_next, prio_prev); }
static inline void fp_move_end_runnable(struct sched_thd *t) { struct sched_thd *head; unsigned short int p = sched_get_metric(t)->priority; assert(sched_thd_ready(t)); assert(!sched_thd_suspended(t)); head = &priorities[p].runnable; REM_LIST(t, prio_next, prio_prev); ADD_LIST(LAST_LIST(head, prio_next, prio_prev), t, prio_next, prio_prev); mask_set(p); }
static void mapping_del(struct mapping *m) { assert(m); mapping_del_children(m); assert(!m->c); if (m->p && m->p->c == m) { if (EMPTY_LIST(m, _s, s_)) m->p->c = NULL; else m->p->c = FIRST_LIST(m, _s, s_); } m->p = NULL; REM_LIST(m, _s, s_); __mapping_destroy(m); }
static inline void evt_grp_free(struct evt_grp *g) { int i; if (!EMPTY_LIST(g, next, prev)) { REM_LIST(g, next, prev); } while (!EMPTY_LIST(&g->events, next, prev)) { struct evt *e; e = FIRST_LIST(&g->events, next, prev); REM_LIST(e, next, prev); } for (i = 0 ; i < EVT_NUM_PRIOS ; i++) { while (!EMPTY_LIST(&g->triggered[i], next, prev)) { struct evt *e; e = FIRST_LIST(&g->triggered[i], next, prev); REM_LIST(e, next, prev); } } free(g); }
static struct thread_event *find_remove_event(unsigned short int thdid) { struct thread_event *tmp; for (tmp = FIRST_LIST(&events, next, prev); tmp != &events; tmp = FIRST_LIST(tmp, next, prev)) { if (tmp->thread_id == thdid) { REM_LIST(tmp, next, prev); assert(events.next && events.prev); return tmp; } } return NULL; }
static void mapping_del_children(struct mapping *m) { struct mapping *d, *n; /* decedents, next */ assert(m); d = __mapping_linearize_decendents(m); while (d) { n = FIRST_LIST(d, _s, s_); REM_LIST(d, _s, s_); __mapping_destroy(d); d = (n == d) ? NULL : n; } assert(!m->c); }
static int rdmm_list_free(struct rec_data_mm_list *rdmm_list) { assert(rdmm_list); #if (!LAZY_RECOVERY) REM_LIST(rdmm_list, next, prev); #endif if (cvect_del(&rec_mm_vect, rdmm_list->id)) { printc("Cli: can not del from cvect\n"); return -1; } cslab_free_rdmm_ls(rdmm_list); return 0; }
static void rd_remove(vaddr_t addr) { struct rec_data_mm *head = NULL, *alias_rd = NULL; struct parent_rec_data_mm *parent_rd = NULL; assert(addr); parent_rd = parent_rdmm_lookup(addr); if (parent_rd && (head = parent_rd->head)) { // there is alias from this addr while (!EMPTY_LIST(head, next, prev)) { alias_rd = FIRST_LIST(head, next, prev); assert(alias_rd); /* printc("cli: remove alias %p\n", alias_rd->d_addr); */ REM_LIST(alias_rd, next, prev); } } return; }
int periodic_wake_remove(spdid_t spdinv, unsigned short int tid) { spdid_t spdid = cos_spd_id(); struct thread_event *te; TAKE(spdid); te = te_pget(tid); if (NULL == te) BUG(); if (!(te->flags & TE_PERIODIC)) goto err; assert(!EMPTY_LIST(te, next, prev)); REM_LIST(te, next, prev); te->flags = 0; RELEASE(spdid); return 0; err: RELEASE(spdid); return -1; }
static inline int block_ser_if_block_track_lock_component_take(spdid_t spdid, ul_t lock_id, u32_t thd_id) { int ret = 0; struct track_block tb; // track on stack do { if (sched_component_take(cos_spd_id())) BUG(); } while (0); if (unlikely(!tracking_block_list[spdid].next)) { INIT_LIST(&tracking_block_list[spdid], next, prev); } INIT_LIST(&tb, next, prev); tb.lock_id = lock_id; ADD_LIST(&tracking_block_list[spdid], &tb, next, prev); do { if (sched_component_release(cos_spd_id())) BUG(); } while (0); ret = lock_component_take(spdid, lock_id, thd_id); do { if (sched_component_take(cos_spd_id())) BUG(); } while (0); REM_LIST(&tb, next, prev); do { if (sched_component_release(cos_spd_id())) BUG(); } while (0); return ret; }
static void cbufp_free_unmap(spdid_t spdid, struct cbufp_info *cbi) { struct cbufp_maps *m = &cbi->owner; void *ptr = cbi->mem; int size; if (cbufp_referenced(cbi)) return; do { struct cbufp_maps *next; next = FIRST_LIST(m, next, prev); REM_LIST(m, next, prev); valloc_free(cos_spd_id(), m->spdid, (void*)m->addr, cbi->size/PAGE_SIZE); m = next; } while (m != &cbi->owner); /* TODO: iterate through the size, and free all... */ mman_revoke_page(cos_spd_id(), (vaddr_t)ptr, 0); //free_page(ptr); }
void blklist_wake_threads(struct blocked_thd *bl) { struct blocked_thd *bthd, *bthd_next; spdid_t spdid; // Wake up spdid = cos_spd_id(); DOUT("waking up threads for spd %d\n", spdid); for(bthd = FIRST_LIST(bl, next, prev) ; bthd != bl ; bthd = bthd_next){ unsigned short int tid; bthd_next = FIRST_LIST(bthd, next, prev); DOUT("\tWakeing UP thd: %d", bthd->thd_id); REM_LIST(bthd, next, prev); tid = bthd->thd_id; free(bthd); sched_wakeup(cos_spd_id(), tid); DOUT("......UP\n"); } DOUT("All thds now awake\n"); }
static void release_rb_buff(rb_meta_t *r, void *b) { struct buff_page *p; int i; assert(r && b); p = (struct buff_page *)(((unsigned long)b) & ~(4096-1)); lock_take(&r->l); for (i = 0 ; i < NP_NUM_BUFFS ; i++) { if (p->buffs[i] == b) { p->buff_used[i] = 0; p->amnt_buffs--; REM_LIST(p, next, prev); ADD_LIST(&r->avail_pages, p, next, prev); lock_release(&r->l); return; } } /* b must be malformed such that p (the page descriptor) is * not at the start of its page */ BUG(); }
static void __event_expiration(event_time_t time, struct thread_event *events) { spdid_t spdid = cos_spd_id(); struct thread_event *tmp, *next_te; assert(TIMER_NO_EVENTS != time); for (tmp = FIRST_LIST(events, next, prev) ; tmp != events && tmp->event_expiration <= time ; tmp = next_te) { u8_t b; unsigned short int tid; assert(tmp); next_te = FIRST_LIST(tmp, next, prev); assert(next_te && next_te->prev == tmp && tmp->next == next_te); tmp->flags |= TE_TIMED_OUT; REM_LIST(tmp, next, prev); b = tmp->flags & TE_BLOCKED; tmp->flags &= ~TE_BLOCKED; tid = tmp->thread_id; if (tmp->flags & TE_PERIODIC) { /* thread hasn't blocked? deadline miss! */ if (!b) { long long period_cyc; tmp->dl_missed++; if (!tmp->missed) { /* first miss? */ tmp->missed = 1; /* save time of deadline, unless we * have saved the time of an earlier * deadline miss */ assert(!tmp->completion); rdtscll(tmp->completion); tmp->miss_samples++; tmp->samples++; } else { period_cyc = tmp->period*cyc_per_tick; assert(period_cyc > cyc_per_tick); tmp->lateness_tot +=period_cyc; tmp->miss_lateness_tot += period_cyc; rdtscll(tmp->completion); } } else { if (!tmp->missed) { /* on time, compute lateness */ long long t; assert(tmp->completion); rdtscll(t); tmp->lateness_tot += -(t - tmp->completion); tmp->samples++; tmp->completion = 0; } tmp->missed = 0; } tmp->dl++; /* Next periodic deadline! */ tmp->event_expiration += tmp->period; insert_pevent(tmp); } if (b) sched_wakeup(spdid, tmp->thread_id); /* We don't have to deallocate the thread_events as * they are stack allocated on the sleeping * threads. */ } }
static void lock_free(struct meta_lock *l) { assert(l && l != &locks); REM_LIST(l, next, prev); free(l); }
void thread_remove(struct sched_thd *t) { assert(t); fp_rem_thd(t); REM_LIST(t, sched_next, sched_prev); }
int lock_component_release(spdid_t spd, unsigned long lock_id) { struct meta_lock *ml; struct blocked_thds *sent, *bt; spdid_t spdid = cos_spd_id(); ACT_RECORD(ACT_UNLOCK, spd, lock_id, cos_get_thd_id(), 0); TAKE(spdid); generation++; ml = lock_find(lock_id, spd); if (!ml) goto error; /* Apparently, lock_take calls haven't been made. */ if (EMPTY_LIST(&ml->b_thds, next, prev)) { RELEASE(spdid); return 0; } sent = bt = FIRST_LIST(&ml->b_thds, next, prev); /* Remove all threads from the lock's list */ REM_LIST(&ml->b_thds, next, prev); /* Unblock all waiting threads */ while (1) { struct blocked_thds *next; u16_t tid; /* This is suboptimal: if we wake a thread with a * higher priority, it will be switched to. Given we * are holding the component lock here, we should get * switched _back_ to so as to wake the rest of the * components. */ next = FIRST_LIST(bt, next, prev); REM_LIST(bt, next, prev); ACT_RECORD(ACT_WAKE, spd, lock_id, cos_get_thd_id(), bt->thd_id); /* cache locally */ tid = bt->thd_id; /* Last node in the list? */ if (bt == next) { /* This is sneaky, so to reiterate: Keep this * lock till now so that if we wake another * thread, and it begins execution, the system * will switch back to this thread so that we * can wake up the rest of the waiting threads * (one of which might have the highest * priority). We release before we wake the * last as we don't really need the lock * anymore, an it will avoid quite a few * invocations.*/ RELEASE(spdid); } /* Wakeup the way we were put to sleep */ assert(tid != cos_get_thd_id()); /* printc("CPU %ld: %d waking up %d for lock %d\n", cos_cpuid(), cos_get_thd_id(), tid, lock_id); */ sched_wakeup(spdid, tid); if (bt == next) break; bt = next; } return 0; error: RELEASE(spdid); return -1; }
/* * Dependencies here (thus priority inheritance) will NOT be used if * you specify a timeout value. */ int lock_component_take(spdid_t spd, unsigned long lock_id, unsigned short int thd_id, unsigned int microsec) { struct meta_lock *ml; spdid_t spdid = cos_spd_id(); unsigned short int curr = (unsigned short int)cos_get_thd_id(); struct blocked_thds blocked_desc = {.thd_id = curr}; int ret = 0; // print("thread %d from spd %d locking for %d micrseconds.", curr, spdid, microsec); ACT_RECORD(ACT_LOCK, spd, lock_id, cos_get_thd_id(), thd_id); TAKE(spdid); if (0 == microsec) { ret = TIMER_EXPIRED; goto error; } ml = lock_find(lock_id, spd); /* tried to access a lock not yet created */ if (!ml) { ret = -1; //print("take wtf%d%d%d", 0,0,0); goto error; } if (lock_is_thd_blocked(ml, curr)) { prints("lock: lock_is_thd_blocked failed in lock_component_take\n"); goto error; } /* The calling component needs to retry its user-level lock, * some preemption has caused the generation count to get off, * i.e. we don't have the most up-to-date view of the * lock's state */ if (ml->gen_num != generation) { ml->gen_num = generation; ret = 0; goto error; } generation++; /* Note that we are creating the list of blocked threads from * memory allocated on the individual thread's stacks. */ INIT_LIST(&blocked_desc, next, prev); ADD_LIST(&ml->b_thds, &blocked_desc, next, prev); blocked_desc.timed = (TIMER_EVENT_INF != microsec); //ml->owner = thd_id; RELEASE(spdid); /* Bypass calling the timed every component if there is an infinite wait */ // assert(TIMER_EVENT_INF == microsec); // assert(!blocked_desc.timed); if (TIMER_EVENT_INF == microsec) { if (-1 == sched_block(spdid, thd_id)) BUG(); if (!EMPTY_LIST(&blocked_desc, next, prev)) BUG(); /* * OK, this seems ridiculous but here is the rational: Assume * we are a middle-prio thread, and were just woken by a low * priority thread. We will preempt that thread when woken, * and will continue here. If a high priority thread is also * waiting on the lock, then we would preempt the low priority * thread while it should wake the high prio thread. With the * following crit sect will switch to the low prio thread that * still holds the component lock. See the comments in * lock_component_release. */ //TAKE(spdid); //RELEASE(spdid); ACT_RECORD(ACT_WAKEUP, spd, lock_id, cos_get_thd_id(), 0); ret = 0; } else { assert(0); #ifdef NIL /* ret here will fall through. We do NOT use the * dependency here as I can't think through the * repercussions */ if (-1 == (ret = timed_event_block(spdid, microsec))) return ret; /* * We might have woken from a timeout, which means * that we need to remove this thread from the waiting * list for the lock. */ TAKE(spdid); ml = lock_find(lock_id, spd); if (!ml) { ret = -1; goto error; } REM_LIST(&blocked_desc, next, prev); RELEASE(spdid); ACT_RECORD(ACT_WAKEUP, spd, lock_id, cos_get_thd_id(), 0); /* ret is set to the amnt of time we blocked */ #endif } return ret; error: RELEASE(spdid); return ret; }