static struct meta_lock *lock_alloc(spdid_t spd) { struct meta_lock *l; struct meta_lock *snd, *lst; l = (struct meta_lock*)malloc(sizeof(struct meta_lock)); if (!l) return NULL; l->b_thds.thd_id = 0; INIT_LIST(&(l->b_thds), next, prev); /* FIXME: check for lock_id overflow */ l->lock_id = lock_id++; l->owner = 0; l->gen_num = 0; l->spd = spd; INIT_LIST(l, next, prev); assert(&locks != l); snd = FIRST_LIST(&locks, next, prev); lst = LAST_LIST(&locks, next, prev); (l)->next = (&locks)->next; (l)->prev = (&locks); (&locks)->next = (l); (l)->next->prev = (l); assert(FIRST_LIST(&locks, next, prev) == l); assert(LAST_LIST(l, next, prev) == &locks); if (lst != &locks) { assert(LAST_LIST(&locks, next, prev) == lst); assert(FIRST_LIST(lst, next, prev) == &locks); } assert(FIRST_LIST(l, next, prev) == snd && LAST_LIST(snd, next, prev) == l); // lock_print_all(); return l; }
static void init_spds(void) { int i, mgr; for (mgr = 0 ; mgr < NUM_TMEM_MGR ; mgr++) { INIT_LIST(&components[mgr], next, prev); for (i = 0 ; i < MAX_NUM_SPDS ; i++) { struct component *c; switch (mgr) { case STK_MGR: if (-1 == stkmgr_spd_concurrency_estimate(i)) continue; break; case CBUF_MGR: if (-1 == cbufmgr_spd_concurrency_estimate(i)) continue; break; default: BUG(); } c = malloc(sizeof(struct component)); if (!c) BUG(); memset(c, 0, sizeof(struct component)); c->spdid = i; c->allocated = DEFAULT_TMEM_AMNT; c->mgr = mgr; INIT_LIST(c, next, prev); ADD_LIST(&components[mgr], c, next, prev); ncomps++; } } }
/*--------------------------------------*/ static struct rec_data_mm_list * rdmm_list_init(long id) { struct rec_data_mm_list *rdmm_list; /* FIXME: A BUG here that bitmap will be all 0 */ rdmm_list = cslab_alloc_rdmm_ls(); assert(rdmm_list); rdmm_list->id = id; rdmm_list->fcnt = fcounter; rdmm_list->recordable = 1; rdmm_list->head = rdmm_list->tail = &rdmm_list->first; if (cvect_add(&rec_mm_vect, rdmm_list, rdmm_list->id)) { printc("Cli: can not add list into cvect\n"); return NULL; } /* printc("Init a list using id %d (vect @ %p ", id,&rec_mm_vect); */ /* printc("list @ %p)\n", rdmm_list); */ #if (!LAZY_RECOVERY) INIT_LIST(rdmm_list, next, prev); if (!all_rdmm_list) { all_rdmm_list = cslab_alloc_rdmm_ls(); assert(all_rdmm_list); INIT_LIST(all_rdmm_list, next, prev); } else { ADD_LIST(all_rdmm_list, rdmm_list, next, prev); } #endif return rdmm_list; }
MagPipelineManager::MagPipelineManager(MagClock *mpClockComp): mpClockComp(mpClockComp){ INIT_LIST(&mVideoPipelineHead); INIT_LIST(&mAudioPipelineHead); mpClockComp->init(); }
// track blocked threads here for all clients (on each thread stack) int __sg_sched_block(spdid_t spdid, int dependency_thd) { struct blocked_thd blk_thd; // add to list cos_sched_lock_take(); if (unlikely(!bthds[spdid].next)) { INIT_LIST(&bthds[spdid], next, prev); } INIT_LIST(&blk_thd, next, prev); blk_thd.id = cos_get_thd_id(); blk_thd.dep_thd = dependency_thd; /* printc("add to the list..... thd %d\n", cos_get_thd_id()); */ ADD_LIST(&bthds[spdid], &blk_thd, next, prev); cos_sched_lock_release(); sched_block(spdid, dependency_thd); // remove from list in both normal path and reflect path cos_sched_lock_take(); /* printc("remove from the list..... thd %d\n", cos_get_thd_id()); */ REM_LIST(&blk_thd, next, prev); cos_sched_lock_release(); return 0; }
int main(int argc, char *argv[]) { #define COUNT (10) struct mydata data[COUNT]; int i; struct mydata *pcur, *plast = data; INIT_LIST(plast); plast->data1 = -222; plast->data2 = -999000.0; for(i=1; i<COUNT; i++) { pcur = data + i; INIT_LIST(pcur); pcur->data1 = i; pcur->data2 = i + 999300.0; ADD_AFTER(plast, pcur); plast = NEXT(plast); } struct mydata *ptr = data; while(ptr) { printf("data1=%d, data2=%f\n", ptr->data1, ptr->data2); ptr = NEXT(ptr); } ptr = data + COUNT - 1; while(ptr) { printf("data1=%d, data2=%f\n", ptr->data1, ptr->data2); ptr = PREV(ptr); } return 0; }
isc_result_t isc_task_create(isc_taskmgr_t *manager, unsigned int quantum, isc_task_t **taskp) { isc_task_t *task; isc_boolean_t exiting; REQUIRE(VALID_MANAGER(manager)); REQUIRE(taskp != NULL && *taskp == NULL); task = isc_mem_get(manager->mctx, sizeof(*task)); if (task == NULL) return (ISC_R_NOMEMORY); XTRACE("isc_task_create"); task->manager = manager; if (isc_mutex_init(&task->lock) != ISC_R_SUCCESS) { isc_mem_put(manager->mctx, task, sizeof(*task)); UNEXPECTED_ERROR(__FILE__, __LINE__, "isc_mutex_init() %s", isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL, ISC_MSG_FAILED, "failed")); return (ISC_R_UNEXPECTED); } task->state = task_state_idle; task->references = 1; INIT_LIST(task->events); INIT_LIST(task->on_shutdown); task->quantum = quantum; task->flags = 0; task->now = 0; #ifdef ISC_TASK_NAMES memset(task->name, 0, sizeof(task->name)); task->tag = NULL; #endif INIT_LINK(task, link); INIT_LINK(task, ready_link); exiting = ISC_FALSE; LOCK(&manager->lock); if (!manager->exiting) { if (task->quantum == 0) task->quantum = manager->default_quantum; APPEND(manager->tasks, task, link); } else exiting = ISC_TRUE; UNLOCK(&manager->lock); if (exiting) { DESTROYLOCK(&task->lock); isc_mem_put(manager->mctx, task, sizeof(*task)); return (ISC_R_SHUTTINGDOWN); } task->magic = TASK_MAGIC; *taskp = task; return (ISC_R_SUCCESS); }
ISC_TASKFUNC_SCOPE isc_result_t isc__task_create(isc_taskmgr_t *manager0, unsigned int quantum, isc_task_t **taskp) { isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0; isc__task_t *task; isc_boolean_t exiting; isc_result_t result; REQUIRE(VALID_MANAGER(manager)); REQUIRE(taskp != NULL && *taskp == NULL); task = isc_mem_get(manager->mctx, sizeof(*task)); if (task == NULL) return (ISC_R_NOMEMORY); XTRACE("isc_task_create"); task->manager = manager; result = isc_mutex_init(&task->lock); if (result != ISC_R_SUCCESS) { isc_mem_put(manager->mctx, task, sizeof(*task)); return (result); } task->state = task_state_idle; task->references = 1; INIT_LIST(task->events); INIT_LIST(task->on_shutdown); task->quantum = quantum; task->flags = 0; task->now = 0; memset(task->name, 0, sizeof(task->name)); task->tag = NULL; INIT_LINK(task, link); INIT_LINK(task, ready_link); INIT_LINK(task, ready_priority_link); exiting = ISC_FALSE; LOCK(&manager->lock); if (!manager->exiting) { if (task->quantum == 0) task->quantum = manager->default_quantum; APPEND(manager->tasks, task, link); } else exiting = ISC_TRUE; UNLOCK(&manager->lock); if (exiting) { DESTROYLOCK(&task->lock); isc_mem_put(manager->mctx, task, sizeof(*task)); return (ISC_R_SHUTTINGDOWN); } task->common.methods = (isc_taskmethods_t *)&taskmethods; task->common.magic = ISCAPI_TASK_MAGIC; task->common.impmagic = TASK_MAGIC; *taskp = (isc_task_t *)task; return (ISC_R_SUCCESS); }
static OMX_S32 addComponentList(OMX_PTR hLib, comp_reg_func_t regFunc, comp_dereg_func_t deregFunc){ MagOMX_Component_Registration_t *regInfo; Component_Entry_t *entry; OMX_U32 u = 0; regInfo = regFunc(); if (NULL != regInfo){ entry = (Component_Entry_t *)mag_mallocz(sizeof(Component_Entry_t)); MAG_ASSERT(entry != NULL); INIT_LIST(&entry->node); entry->regInfo = regInfo; entry->deregFunc = deregFunc; entry->libHandle = hLib; entry->initialized = OMX_FALSE; AGILE_LOGD("add the component name = %s", regInfo->name); list_add_tail(&entry->node, &gOmxCore->LoadedCompListHead); for (u = 0; u < regInfo->roles_num; u++){ gOmxCore->roleToComponentTable->addItem(gOmxCore->roleToComponentTable, entry, regInfo->roles[u]); AGILE_LOGD("add the component role %d: %s", u, regInfo->roles[u]); } gOmxCore->componentToRoleTable->addItem(gOmxCore->componentToRoleTable, entry, regInfo->name); return 0; }else{ AGILE_LOGE("failed to do regFunc[%p]()", regFunc); return -1; } }
isc_result_t ldap_rdatalist_copy(isc_mem_t *mctx, ldapdb_rdatalist_t source, ldapdb_rdatalist_t *target) { dns_rdatalist_t *rdlist; dns_rdatalist_t *new_rdlist; isc_result_t result; REQUIRE(mctx != NULL); REQUIRE(target != NULL); INIT_LIST(*target); rdlist = HEAD(source); while (rdlist != NULL) { new_rdlist = NULL; CHECK(rdatalist_clone(mctx, rdlist, &new_rdlist)); APPEND(*target, new_rdlist, link); rdlist = NEXT(rdlist, link); } return ISC_R_SUCCESS; cleanup: ldapdb_rdatalist_destroy(mctx, target); return result; }
void cos_init(void *arg) { lock_static_init(&evt_lock); cos_map_init_static(&evt_map); if (mapping_create(NULL) != 0) BUG(); INIT_LIST(&grps, next, prev); }
static struct cbuf_alloc_desc * __cbuf_desc_alloc(int cbid, int size, void *addr, struct cbuf_meta *cm, int tmem) { struct cbuf_alloc_desc *d; int idx = ((int)addr >> PAGE_ORDER); assert(addr && cm); assert(cm->nfo.c.ptr == idx); assert(__cbuf_alloc_lookup(idx) == NULL); assert((!tmem && !(cm->nfo.c.flags & CBUFM_TMEM)) || (tmem && cm->nfo.c.flags & CBUFM_TMEM)); d = cslab_alloc_desc(); if (!d) return NULL; d->cbid = cbid; d->addr = addr; d->length = size; d->meta = cm; d->tmem = tmem; INIT_LIST(d, next, prev); //ADD_LIST(&cbuf_alloc_freelists, d, next, prev); if (tmem) d->flhead = &cbuf_alloc_freelists; else d->flhead = __cbufp_freelist_get(size); cvect_add(&alloc_descs, d, idx); return d; }
int main(int argc, char *argv[]) { struct child_st *c1; struct child_st *c2; struct child_st *c3; struct child_st *tmp; uint8_t s1[] = "The first node"; uint8_t s2[] = "The second node"; uint8_t s3[] = "The third node"; INIT_LIST(parent, p); p->ops->create(&c1, s1, strlen(s1)); p->ops->add(p, c1); p->ops->print(p); p->ops->create(&c2, s2, strlen(s2)); p->ops->add(p, c2); p->ops->print(p); p->ops->create(&c3, s3, strlen(s3)); p->ops->add(p, c3); p->ops->print(p); tmp = p->ops->get(p, "The third node", 14); if (tmp) printf("Get Right? name (%d bytes): %s\n", tmp->nlen, tmp->name); else printf("Not found\n"); p->ops->del(p, c2); p->ops->print(p); return 0; }
static void rb_init(rb_meta_t *rbm, ring_buff_t *rb) { int i; for (i = 0 ; i < RB_SIZE ; i++) { rb->packets[i].status = RB_EMPTY; } memset(rbm, 0, sizeof(rb_meta_t)); rbm->rb_head = 0; rbm->rb_tail = RB_SIZE-1; rbm->rb = rb; // rbm->curr_buffs = rbm->max_buffs = 0; // rbm->tot_principal = rbm->max_principal = 0; lock_static_init(&rbm->l); INIT_LIST(&rbm->used_pages, next, prev); INIT_LIST(&rbm->avail_pages, next, prev); }
void init_lru(){ lru = zmalloc(sizeof(LRU_list)); lru->head = zmalloc(sizeof(LRU_node)); lru->tail = zmalloc(sizeof(LRU_node)); DUMMYNODE(lru->head); DUMMYNODE(lru->tail); INIT_LIST(lru); }
DWORD InterfaceUtils::INITEX(PDWORD index, const char *reservedAddrString, const char *reservedMaskString) { INIT_LIST(); ULONG NTEContext; if (InterfaceAddIPAddress(reservedAddrString, reservedMaskString, *index, &NTEContext) == NO_ERROR) { AddEntry(NTEContext); return NO_ERROR; } return ERROR_INVALID_ACCESS; }
/*Member functions*/ static void MagOmxPortVideo_addFormat(MagOmxPortVideo hPort, MagOMX_Video_PortFormat_t *pFormat){ MagOMX_Video_PortFormat_t *format; format = (MagOMX_Video_PortFormat_t *)mag_mallocz(sizeof(MagOMX_Video_PortFormat_t)); INIT_LIST(&format->node); format->xFramerate = pFormat->xFramerate; format->eCompressionFormat = pFormat->eCompressionFormat; format->eColorFormat = pFormat->eColorFormat; list_add_tail(&format->node, &hPort->mPortFormatList); }
MagErr_t Mag_MsgChannelCreate(MagMsgChannelHandle *handle){ int rc; *handle = (MagMsgChannelHandle)malloc(sizeof(**handle)); if(NULL == *handle){ return MAG_NoMemory; } INIT_LIST(&(*handle)->msgQueueHead); INIT_LIST(&(*handle)->freeMsgListHead); rc = pthread_mutex_init(&(*handle)->lock, NULL /* default attributes */); if(rc != 0){ goto err_mutex; } (*handle)->privData = NULL; (*handle)->receiverFunc = NULL; (*handle)->ReceiverThread = (pthread_t)0; (*handle)->stopped = MAG_FALSE; if (MAG_ErrNone == Mag_CreateEventGroup(&(*handle)->evtGrp)){ if (MAG_ErrNone == Mag_CreateEvent(&(*handle)->event, 0)) Mag_AddEventGroup((*handle)->evtGrp, (*handle)->event); else goto err_event; }else{ goto err_event; } return MAG_ErrNone; err_event: (*handle)->event = NULL; (*handle)->evtGrp = NULL; err_mutex: if((*handle)) mag_free((*handle)); *handle = NULL; return MAG_ErrMutexCreate; }
void cos_init(void *arg) { printc("thd %d Tmem policy running.....\n", cos_get_thd_id()); INIT_LIST(&threads, next, prev); init_spds(); #ifdef THD_POOL printc("<<<Thd Pool with total %d tmems, component size %d>>>\n", MAX_NUM_MEM, THD_POOL); if (THD_POOL != 1) thdpool_max_policy(); else thdpool_1_policy(); #else printc("<<<Now using Algorithm %d, total number of tmems:%d >>>\n", ALGORITHM, MAX_NUM_MEM); DOUT("Tmem policy: %d in spd %ld\n", cos_get_thd_id(), cos_spd_id()); init_policy(); #endif periodic_wake_create(cos_spd_id(), POLICY_PERIODICITY); /* Wait for all other threads to initialize */ int i = 0, waiting = 100 / POLICY_PERIODICITY, counter = 0, report_period = 100 / POLICY_PERIODICITY; do { periodic_wake_wait(cos_spd_id()); } while (i++ < waiting); init_thds(); //unsigned long long s,e; while (1) { if (counter++ % report_period == 0) { /* report tmems usage */ cbufmgr_buf_report(); stkmgr_stack_report(); } gather_data(counter % report_period); #ifdef THD_POOL if (THD_POOL == 1) thdpool_1_policy(); else thdpool_max_policy(); #else //rdtscll(s); DOUT("POLICY starts!\n"); policy(); DOUT("POLICY ends!\n"); //rdtscll(e); //printc("SP:%llu cycles\n",e-s); #endif periodic_wake_wait(cos_spd_id()); } return; }
static int malloc_init() { list_head *pages; malloc_page_t *mpage; /* We start with just one page */ mdata.current_page = get_page(PAGE_SIZE); if (!mdata.current_page) { printf("Could not init malloc...\n"); set_bit(&mdata.flags, MALLOC_INITIALIZED); clear_bit(&mdata.flags, MALLOC_HAS_SPACE); return -1; } pages = &(mdata.pages); INIT_LIST(pages); set_bit(&mdata.flags, MALLOC_INITIALIZED); set_bit(&mdata.flags, MALLOC_VALID); set_bit(&mdata.flags, MALLOC_HAS_SPACE); mdata.num_pages++; mdata.offset = sizeof(malloc_data_t); /*We store the page list data inside the new allocated page *right after malloc metadata */ mpage = (malloc_page_t*)mdata.current_page + mdata.offset; mdata.offset += sizeof(malloc_page_t); mpage->list = (list_head*)mdata.current_page + mdata.offset; mdata.offset += sizeof(list_head); INIT_LIST(mpage->list); mpage->addr = mdata.current_page; list_insert_tail(&mdata.pages, mpage->list); return 0; }
static void cbuf_comp_info_init(spdid_t spdid, struct cbuf_comp_info *cci) { void *p; memset(cci, 0, sizeof(*cci)); cci->spdid = spdid; cci->target_size = CB_DEF_POOL_SZ; INIT_LIST(&cci->bthd_list, next, prev); cvect_add(&components, cci, spdid); }
Ins* Ins_new(Ins_kind kind, Value *src, Value *dst) { Ins *ptr = (Ins*) calloc( 1, sizeof(Ins) ); if(!ptr) return NULL; INIT_LIST(ptr); ptr->kind = kind; ptr->src = src; ptr->dst = dst; return ptr; }
static inline int block_ser_if_block_track_lock_component_take(spdid_t spdid, ul_t lock_id, u32_t thd_id) { int ret = 0; struct track_block tb; // track on stack do { if (sched_component_take(cos_spd_id())) BUG(); } while (0); if (unlikely(!tracking_block_list[spdid].next)) { INIT_LIST(&tracking_block_list[spdid], next, prev); } INIT_LIST(&tb, next, prev); tb.lock_id = lock_id; ADD_LIST(&tracking_block_list[spdid], &tb, next, prev); do { if (sched_component_release(cos_spd_id())) BUG(); } while (0); ret = lock_component_take(spdid, lock_id, thd_id); do { if (sched_component_take(cos_spd_id())) BUG(); } while (0); REM_LIST(&tb, next, prev); do { if (sched_component_release(cos_spd_id())) BUG(); } while (0); return ret; }
hash_table hash_table_alloc(int bits) { int i; hash_table ret = calloc(sizeof(*ret), 1); ret->bits = bits; ret->table = calloc(sizeof(*(ret->table)), 1 << bits); for (i = 0; i < 1 << bits; i++) INIT_LIST(&(ret->table[i])); return ret; }
int cbufp_retrieve(spdid_t spdid, int cbid, int len) { struct cbufp_comp_info *cci; struct cbufp_info *cbi; struct cbuf_meta *meta; struct cbufp_maps *map; vaddr_t dest; void *page; int ret = -1; CBUFP_TAKE(); cci = cbufp_comp_info_get(spdid); if (!cci) goto done; cbi = cmap_lookup(&cbufs, cbid); if (!cbi) goto done; /* shouldn't cbuf2buf your own buffer! */ if (cbi->owner.spdid == spdid) goto done; meta = cbufp_meta_lookup(cci, cbid); if (!meta) goto done; map = malloc(sizeof(struct cbufp_maps)); if (!map) goto done; dest = (vaddr_t)valloc_alloc(cos_spd_id(), spdid, 1); if (!dest) goto free; map->spdid = spdid; map->m = meta; map->addr = dest; INIT_LIST(map, next, prev); ADD_LIST(&cbi->owner, map, next, prev); page = cbi->mem; assert(page); if (dest != (mman_alias_page(cos_spd_id(), (vaddr_t)page, spdid, dest))) { assert(0); valloc_free(cos_spd_id(), spdid, (void *)dest, 1); } meta->nfo.c.flags |= CBUFM_TOUCHED; meta->nfo.c.ptr = map->addr >> PAGE_ORDER; ret = 0; done: CBUFP_RELEASE(); return ret; free: free(map); goto done; }
/* * FIXME: to make this predictable (avoid memory allocation in the * must-be-predictable case, we should really cos_vect_add_id when we * first find out about the possibility of the thread making any * invocations. */ static struct blocked_thds *bt_get(unsigned short int tid) { struct blocked_thds *bt; bt = cos_vect_lookup(&bthds, tid); if (NULL == bt) { bt = malloc(sizeof(struct blocked_thds)); if (NULL == bt) return NULL; INIT_LIST(bt, next, prev); bt->thd_id = tid; if (tid != cos_vect_add_id(&bthds, bt, tid)) return NULL; } return bt; }
/* * FIXME: to make this predictable (avoid memory allocation in the * must-be-predictable case, we should really cos_vect_add_id when we * first find out about the possibility of the thread making any * invocations. */ static struct thread_event *__te_get(unsigned short int tid, cos_vect_t *v) { struct thread_event *te; te = cos_vect_lookup(v, tid); if (NULL == te) { te = malloc(sizeof(struct thread_event)); if (NULL == te) return NULL; memset(te, 0, sizeof(struct thread_event)); te->thread_id = tid; INIT_LIST(te, next, prev); if (tid != cos_vect_add_id(v, te, tid)) return NULL; } return te; }
struct cos_cbuf_item *alloc_item_data_struct(void *l_addr) { struct cos_cbuf_item *cci; cci = malloc(sizeof(struct cos_cbuf_item)); if (!cci) BUG(); INIT_LIST(cci, next, prev); cci->desc.addr = l_addr; cci->desc.cbid = 0; cci->desc.obj_sz = 0; cci->desc.principal = 0; return cci; }
static void mapping_init(struct mapping *m, spdid_t spdid, vaddr_t a, struct mapping *p, struct frame *f) { assert(m && f); INIT_LIST(m, _s, s_); m->f = f; m->flags = 0; m->spdid = spdid; m->addr = a; m->p = p; if (p) { m->flags = p->flags; if (!p->c) p->c = m; else ADD_LIST(p->c, m, _s, s_); } }
static struct cbuf_meta_range * cbuf_meta_add(struct cbuf_comp_info *comp, unsigned int cbid, struct cbuf_meta *m, vaddr_t dest) { struct cbuf_meta_range *cmr; if (cbuf_meta_lookup(comp, cbid)) return NULL; cmr = malloc(sizeof(struct cbuf_meta_range)); if (unlikely(!cmr)) return NULL; INIT_LIST(cmr, next, prev); cmr->m = m; cmr->dest = dest; cmr->low_id = round_to_pow2(cbid, PAGE_SIZE/sizeof(struct cbuf_meta)); if (comp->cbuf_metas) ADD_LIST(comp->cbuf_metas, cmr, next, prev); else comp->cbuf_metas = cmr; return cmr; }