/* TODO DK: check automatically if all merges were done properly! */ static void tc_alloc_dealloc(void *ctx) { test_framework_t *tf = ctx; list_node_t *iter; list_head_t head; page_idx_t allocated, saved_ap, resr = 0; page_frame_t *pages; page_idx_t c; tf->printf("Target MM pool: %s\n", tlsf_ctx.pool->name); tf->printf("Number of allocatable pages: %d\n", tlsf_ctx.pool->free_pages); saved_ap = atomic_get(&tlsf_ctx.pool->free_pages); #ifdef CONFIG_SMP for_each_cpu(c) { if (!tlsf_ctx.tlsf->percpu[c] || !c) continue; resr += tlsf_ctx.tlsf->percpu[c]->noc_pages; } #endif /* CONFIG_SMP */ tf->printf("Allocate all possible pages one-by-one...\n"); list_init_head(&head); allocated = 0; for (;;) { pages = alloc_page(AF_ZERO); tlsf_validate_dbg(tlsf_ctx.tlsf); if (!pages) break; list_add2tail(&head, &pages->chain_node); allocated++; } if (atomic_get(&tlsf_ctx.pool->free_pages) != resr) { tf->printf("Failed to allocate %d pages. %d pages rest\n", saved_ap, atomic_get(&tlsf_ctx.pool->free_pages)); tf->failed(); } if (allocated != saved_ap) { tf->printf("Not all pages was allocated from TLSF.\n"); tf->printf("Total: %d. Allocated: %d\n", saved_ap, allocated); } mmpool_allocator_dump(tlsf_ctx.pool); tf->printf("Free allocated %d pages.\n", allocated); pages = list_entry(list_node_first(&head), page_frame_t, chain_node); list_cut_head(&head); free_pages_chain(pages); if (atomic_get(&tlsf_ctx.pool->free_pages) != saved_ap) { tf->printf("Not all pages were fried: %d rest (%d total)\n", saved_ap - atomic_get(&tlsf_ctx.pool->free_pages), saved_ap); tf->failed(); } mmpool_allocator_dump(tlsf_ctx.pool); tf->printf("Allocate all possible pages usign non-continous allocation\n"); pages = alloc_pages(saved_ap - resr, AF_ZERO | AF_USER); if (!pages) { tf->printf("Failed to allocate non-continous %d pages!\n", saved_ap); tf->failed(); } tlsf_validate_dbg(tlsf_ctx.tlsf); mmpool_allocator_dump(tlsf_ctx.pool); allocated = 0; list_set_head(&head, &pages->chain_node); list_for_each(&head, iter) allocated++; if (allocated != (saved_ap - resr)) { tf->printf("Invalid number of pages allocated: %d (%d was expected)\n", allocated, saved_ap - resr); tf->failed(); } list_cut_head(&head); free_pages_chain(pages); if (atomic_get(&tlsf_ctx.pool->free_pages) != saved_ap) { tf->printf("Not all pages were fried: %d rest (%d total)\n", saved_ap - atomic_get(&tlsf_ctx.pool->free_pages), saved_ap); tf->failed(); } mmpool_allocator_dump(tlsf_ctx.pool); tlsf_ctx.completed = true; sys_exit(0); }
static scret_t __sxmpd_filter_add(yd_context_t *ctx, sexp_t *sx, void *priv) { register int state = 0; register int idx; int filter = -1; /* 0 for pem, 1 for account 2 for rpc 3 for on destroy 4 for on pulse*/ int r = EINVAL; usrtc_node_t *node; char *name = NULL; void *refobj = NULL; sexp_t *isx; struct sxmpd_node *nn; list_head_t *lhead; filter_item_t *nitem; scret_t rets, refret; SEXP_ITERATE_LIST(sx, isx, idx) { if(isx->ty == SEXP_LIST && state < 2) { RETURN_SRET_IRES(rets, r); } if(!state) { if(!strcmp(isx->val, SXMPDSYN_PEM_FILTERADD)) filter = 0; else if(!strcmp(isx->val, SXMPDSYN_ACCOUNT_FILTERADD)) filter = 1; else if(!strcmp(isx->val, SXMPDSYN_RPC_FILTERADD)) filter = 2; else if(!strcmp(isx->val, SXMPDSYN_ONDESTROY_FILTERADD)) filter = 3; else if(!strcmp(isx->val, SXMPDSYN_ONPULSE_FILTERADD)) filter = 4; else { RETURN_SRET_IRES(rets, r); } state++; } else { switch(state) { case 1: if(isx->aty != SEXP_SQUOTE) { RETURN_SRET_IRES(rets, r); } name = isx->val; state++; break; case 2: if(isx->ty != SEXP_LIST) { RETURN_SRET_IRES(rets, r); } refret = yd_eval_sexp(ctx, isx); if(refret.type != SCOBJECTPTR) { RETURN_SRET_IRES(rets, r); } else refobj = refret.ret; break; default: RETURN_SRET_IRES(rets, r); break; } } } if(!refobj || !name) { RETURN_SRET_IRES(rets, r); } /* ok here we go */ node = usrtc_lookup(&(glins->tree), (const void *)name); if(!node) r = ENOENT; else { nn = (struct sxmpd_node *)usrtc_node_getdata(node); /* decide the filter */ switch(filter) { case 0: lhead = &(nn->pem_filter); break; case 1: lhead = &(nn->account_filter); break; case 2: lhead = &(nn->rpc_filter); break; case 3: lhead = &(nn->ondestroy_filter); break; case 4: lhead = &(nn->onpulse_filter); break; } if(!(nitem = malloc(sizeof(filter_item_t)))) r = ENOMEM; else { /* let's a deal */ r = 0; nitem->obj = refobj; list_init_node(&nitem->node); list_add2tail(lhead, &nitem->node); } } RETURN_SRET_IRES(rets, r); }