static __inline struct ida_qcb * ida_get_qcb(struct ida_softc *ida) { struct ida_qcb *qcb; if ((qcb = SLIST_FIRST(&ida->free_qcbs)) != NULL) { SLIST_REMOVE_HEAD(&ida->free_qcbs, link.sle); } else { ida_alloc_qcb(ida); if ((qcb = SLIST_FIRST(&ida->free_qcbs)) != NULL) SLIST_REMOVE_HEAD(&ida->free_qcbs, link.sle); } return (qcb); }
static void ips_run_waiting_command(ips_softc_t *sc) { ips_wait_list_t *waiter; ips_command_t *command; int (*callback)(ips_command_t*); intrmask_t mask; mask = splbio(); waiter = STAILQ_FIRST(&sc->cmd_wait_list); command = SLIST_FIRST(&sc->free_cmd_list); if(!waiter || !command){ splx(mask); return; } DEVICE_PRINTF(1, sc->dev, "removing command from wait queue\n"); SLIST_REMOVE_HEAD(&sc->free_cmd_list, next); STAILQ_REMOVE_HEAD(&sc->cmd_wait_list, next); (sc->used_commands)++; splx(mask); clear_ips_command(command); bzero(command->command_buffer, IPS_COMMAND_LEN); command->arg = waiter->data; callback = waiter->callback; free(waiter, M_DEVBUF); callback(command); return; }
static int ips_add_waiting_command(ips_softc_t *sc, int (*callback)(ips_command_t *), void *data, unsigned long flags) { intrmask_t mask; ips_command_t *command; ips_wait_list_t *waiter; unsigned long memflags = 0; if(IPS_NOWAIT_FLAG & flags) memflags = M_NOWAIT; waiter = malloc(sizeof(ips_wait_list_t), M_DEVBUF, memflags); if(!waiter) return ENOMEM; mask = splbio(); if(sc->state & IPS_OFFLINE){ splx(mask); return EIO; } command = SLIST_FIRST(&sc->free_cmd_list); if(command && !(sc->state & IPS_TIMEOUT)){ SLIST_REMOVE_HEAD(&sc->free_cmd_list, next); (sc->used_commands)++; splx(mask); clear_ips_command(command); bzero(command->command_buffer, IPS_COMMAND_LEN); free(waiter, M_DEVBUF); command->arg = data; return callback(command); } DEVICE_PRINTF(1, sc->dev, "adding command to the wait queue\n"); waiter->callback = callback; waiter->data = data; STAILQ_INSERT_TAIL(&sc->cmd_wait_list, waiter, next); splx(mask); return 0; }
static int mp_init_block(struct MP_TREE_ENTRY *tree_entry, long size,unsigned int alloc_num) { struct MP_MEM_ENTRY *mem_entry; struct MP_MEM_ENTRY *new_mem_entry; unsigned int i; for (i = 0; i < alloc_num; i++) { new_mem_entry = (struct MP_MEM_ENTRY *)malloc( sizeof(struct MP_MEM_ENTRY) + size); if (new_mem_entry == NULL) { while (!SLIST_EMPTY(&(tree_entry->mem_head))) { mem_entry = SLIST_FIRST(&(tree_entry->mem_head)); SLIST_REMOVE_HEAD(&(tree_entry->mem_head), mem_entries); free(mem_entry); } free(new_mem_entry); return (-1); } new_mem_entry->size = size; SLIST_INSERT_HEAD(&(tree_entry->mem_head), new_mem_entry, mem_entries); tree_entry->total_item++; } return (0); }
/* * Destroy all table data. This function can run when there are no * readers on table lists. */ int dm_table_destroy(dm_table_head_t * head, uint8_t table_id) { dm_table_t *tbl; dm_table_entry_t *table_en; uint8_t id; lockmgr(&head->table_mtx, LK_EXCLUSIVE); aprint_debug("dm_Table_destroy called with %d--%d\n", table_id, head->io_cnt); if (table_id == DM_TABLE_ACTIVE) id = head->cur_active_table; else id = 1 - head->cur_active_table; tbl = &head->tables[id]; while (!SLIST_EMPTY(tbl)) { /* List Deletion. */ table_en = SLIST_FIRST(tbl); /* * Remove target specific config data. After successfull * call table_en->target_config must be set to NULL. */ table_en->target->destroy(table_en); SLIST_REMOVE_HEAD(tbl, next); kfree(table_en, M_DM); } lockmgr(&head->table_mtx, LK_RELEASE); return 0; }
void hashtable_delete(const char *key, uint32_t klen, struct hash_table *ht) { struct item_slh *bucket; struct item *it, *prev; ASSERT(hashtable_get(key, klen, ht) != NULL); bucket = _get_bucket(key, klen, ht); for (prev = NULL, it = SLIST_FIRST(bucket); it != NULL; prev = it, it = SLIST_NEXT(it, i_sle)) { /* iterate through bucket to find item to be removed */ if ((klen == it->klen) && cc_memcmp(key, item_key(it), klen) == 0) { /* found item */ break; } } if (prev == NULL) { SLIST_REMOVE_HEAD(bucket, i_sle); } else { SLIST_REMOVE_AFTER(prev, i_sle); } --(ht->nhash_item); }
static void destroy_buffers(struct chip_swap *swap) { struct block_space *blk_space; if (swap == NULL) return; blk_space = SLIST_FIRST(&swap->free_bs); while (blk_space) { SLIST_REMOVE_HEAD(&swap->free_bs, free_link); nand_debug(NDBG_SIM,"destroyed blk_space %p[%p]\n", blk_space, blk_space->blk_ptr); free(blk_space->blk_ptr, M_NANDSIM); free(blk_space, M_NANDSIM); blk_space = SLIST_FIRST(&swap->free_bs); } blk_space = STAILQ_FIRST(&swap->used_bs); while (blk_space) { STAILQ_REMOVE_HEAD(&swap->used_bs, used_link); nand_debug(NDBG_SIM,"destroyed blk_space %p[%p]\n", blk_space, blk_space->blk_ptr); free(blk_space->blk_ptr, M_NANDSIM); free(blk_space, M_NANDSIM); blk_space = STAILQ_FIRST(&swap->used_bs); } }
static yasm_expr * expr_expand_equ(yasm_expr *e, yasm__exprhead *eh) { int i; yasm__exprentry ee; /* traverse terms */ for (i=0; i<e->numterms; i++) { const yasm_expr *equ_expr; /* Expand equ's. */ if (e->terms[i].type == YASM_EXPR_SYM && (equ_expr = yasm_symrec_get_equ(e->terms[i].data.sym))) { yasm__exprentry *np; /* Check for circular reference */ SLIST_FOREACH(np, eh, next) { if (np->e == equ_expr) { yasm_error_set(YASM_ERROR_TOO_COMPLEX, N_("circular reference detected")); return e; } } e->terms[i].type = YASM_EXPR_EXPR; e->terms[i].data.expn = yasm_expr_copy(equ_expr); /* Remember we saw this equ and recurse */ ee.e = equ_expr; SLIST_INSERT_HEAD(eh, &ee, next); e->terms[i].data.expn = expr_expand_equ(e->terms[i].data.expn, eh); SLIST_REMOVE_HEAD(eh, next); } else if (e->terms[i].type == YASM_EXPR_EXPR)
OM_uint32 GSSAPI_LIB_FUNCTION gss_release_name(OM_uint32 *minor_status, gss_name_t *input_name) { struct _gss_name *name; *minor_status = 0; if (input_name == NULL || *input_name == NULL) return GSS_S_COMPLETE; name = (struct _gss_name *) *input_name; if (name->gn_type.elements) free(name->gn_type.elements); while (SLIST_FIRST(&name->gn_mn)) { struct _gss_mechanism_name *mn; mn = SLIST_FIRST(&name->gn_mn); SLIST_REMOVE_HEAD(&name->gn_mn, gmn_link); mn->gmn_mech->gm_release_name(minor_status, &mn->gmn_name); free(mn); } gss_release_buffer(minor_status, &name->gn_value); free(name); *input_name = GSS_C_NO_NAME; return (GSS_S_COMPLETE); }
/* returns a free command struct if one is available. * It also blanks out anything that may be a wild pointer/value. * Also, command buffers are not freed. They are * small so they are saved and kept dmamapped and loaded. */ int ips_get_free_cmd(ips_softc_t *sc, int (*callback)(ips_command_t *), void *data, unsigned long flags) { intrmask_t mask; ips_command_t *command; mask = splbio(); if(sc->state & IPS_OFFLINE){ splx(mask); return EIO; } command = SLIST_FIRST(&sc->free_cmd_list); if(!command || (sc->state & IPS_TIMEOUT)){ splx(mask); if(flags & IPS_NOWAIT_FLAG) return EAGAIN; return ips_add_waiting_command(sc, callback, data, flags); } SLIST_REMOVE_HEAD(&sc->free_cmd_list, next); (sc->used_commands)++; splx(mask); clear_ips_command(command); bzero(command->command_buffer, IPS_COMMAND_LEN); command->arg = data; return callback(command); }
struct twe_ccb * twe_ccb_alloc_wait(struct twe_softc *sc, int flags) { struct twe_ccb *ccb; int s; KASSERT((flags & TWE_CCB_AEN) == 0); s = splbio(); while (__predict_false((ccb = SLIST_FIRST(&sc->sc_ccb_freelist)) == NULL)) { sc->sc_flags |= TWEF_WAIT_CCB; (void) tsleep(&sc->sc_ccb_freelist, PRIBIO, "tweccb", 0); } SLIST_REMOVE_HEAD(&sc->sc_ccb_freelist, ccb_chain.slist); #ifdef DIAGNOSTIC if ((ccb->ccb_flags & TWE_CCB_ALLOCED) != 0) panic("twe_ccb_alloc_wait: CCB %ld already allocated", (long)(ccb - sc->sc_ccbs)); flags |= TWE_CCB_ALLOCED; #endif splx(s); twe_ccb_init(sc, ccb, flags); return (ccb); }
static int dos_attack_init(void *dummy) { char dos_addr[MAX_ASCII_ADDR_LEN]; char unused_addr[MAX_ASCII_ADDR_LEN]; struct port_list *p; /* It doesn't work if unoffensive */ if (GBL_OPTIONS->unoffensive) { INSTANT_USER_MSG("dos_attack: plugin doesn't work in UNOFFENSIVE mode\n"); return PLUGIN_FINISHED; } /* don't show packets while operating */ GBL_OPTIONS->quiet = 1; memset(dos_addr, 0, sizeof(dos_addr)); memset(unused_addr, 0, sizeof(dos_addr)); ui_input("Insert victim IP: ", dos_addr, sizeof(dos_addr), NULL); if (ip_addr_pton(dos_addr, &victim_host) == -EINVALID) { INSTANT_USER_MSG("dos_attack: Invalid IP address.\n"); return PLUGIN_FINISHED; } ui_input("Insert unused IP: ", unused_addr, sizeof(unused_addr), NULL); if (ip_addr_pton(unused_addr, &fake_host) == -EINVALID) { INSTANT_USER_MSG("dos_attack: Invalid IP address.\n"); return PLUGIN_FINISHED; } if(victim_host.addr_type != fake_host.addr_type) { INSTANT_USER_MSG("dos_attack: Address' families don't match.\n"); return PLUGIN_FINISHED; } INSTANT_USER_MSG("dos_attack: Starting scan against %s [Fake Host: %s]\n", dos_addr, unused_addr); /* Delete the "open" port list just in case of previous executions */ while (!SLIST_EMPTY(&port_table)) { p = SLIST_FIRST(&port_table); SLIST_REMOVE_HEAD(&port_table, next); SAFE_FREE(p); } /* Add the hook to "create" the fake host */ if(ntohs(fake_host.addr_type) == AF_INET) hook_add(HOOK_PACKET_ARP_RQ, &parse_arp); #ifdef WITH_IPV6 else if(ntohs(fake_host.addr_type) == AF_INET6) hook_add(HOOK_PACKET_ICMP6_NSOL, &parse_icmp6); #endif /* Add the hook for SYN-ACK reply */ hook_add(HOOK_PACKET_TCP, &parse_tcp); /* create the flooding thread */ ec_thread_new("golem", "SYN flooder thread", &syn_flooder, NULL); return PLUGIN_RUNNING; }
void adv_free(struct adv_softc *adv) { switch (adv->init_level) { case 6: { struct adv_ccb_info *cinfo; while ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) { SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links); adv_destroy_ccb_info(adv, cinfo); } bus_dmamap_unload(adv->sense_dmat, adv->sense_dmamap); } case 5: bus_dmamem_free(adv->sense_dmat, adv->sense_buffers, adv->sense_dmamap); case 4: bus_dma_tag_destroy(adv->sense_dmat); case 3: bus_dma_tag_destroy(adv->buffer_dmat); case 2: bus_dma_tag_destroy(adv->parent_dmat); case 1: if (adv->ccb_infos != NULL) free(adv->ccb_infos, M_DEVBUF); case 0: break; } }
/* returns a free command struct if one is available. * It also blanks out anything that may be a wild pointer/value. * Also, command buffers are not freed. They are * small so they are saved and kept dmamapped and loaded. */ int ips_get_free_cmd(ips_softc_t *sc, ips_command_t **cmd, unsigned long flags) { ips_command_t *command; if(sc->state & IPS_OFFLINE){ return EIO; } if ((flags & IPS_STATIC_FLAG) == 0) { command = SLIST_FIRST(&sc->free_cmd_list); if(!command || (sc->state & IPS_TIMEOUT)){ return EBUSY; } SLIST_REMOVE_HEAD(&sc->free_cmd_list, next); (sc->used_commands)++; } else { if (sc->state & IPS_STATIC_BUSY) return EAGAIN; command = sc->staticcmd; sc->state |= IPS_STATIC_BUSY; } clear_ips_command(command); bzero(command->command_buffer, IPS_COMMAND_LEN); *cmd = command; return 0; }
/* * Free all the memory collected while the cdev mutex was * locked. Since devmtx is after the system map mutex, free() cannot * be called immediately and is postponed until cdev mutex can be * dropped. */ static void dev_unlock_and_free(void) { struct cdev_priv_list cdp_free; struct free_cdevsw csw_free; struct cdev_priv *cdp; struct cdevsw *csw; mtx_assert(&devmtx, MA_OWNED); /* * Make the local copy of the list heads while the dev_mtx is * held. Free it later. */ TAILQ_INIT(&cdp_free); TAILQ_CONCAT(&cdp_free, &cdevp_free_list, cdp_list); csw_free = cdevsw_gt_post_list; SLIST_INIT(&cdevsw_gt_post_list); mtx_unlock(&devmtx); while ((cdp = TAILQ_FIRST(&cdp_free)) != NULL) { TAILQ_REMOVE(&cdp_free, cdp, cdp_list); devfs_free(&cdp->cdp_c); } while ((csw = SLIST_FIRST(&csw_free)) != NULL) { SLIST_REMOVE_HEAD(&csw_free, d_postfree_list); free(csw, M_DEVT); } }
static struct pgt *pop_from_cache_list(vaddr_t vabase, void *ctx) { struct pgt *pgt; struct pgt *p; pgt = SLIST_FIRST(&pgt_cache_list); if (!pgt) return NULL; if (match_pgt(pgt, vabase, ctx)) { SLIST_REMOVE_HEAD(&pgt_cache_list, link); return pgt; } while (true) { p = SLIST_NEXT(pgt, link); if (!p) break; if (match_pgt(p, vabase, ctx)) { SLIST_REMOVE_AFTER(pgt, link); break; } pgt = p; } return p; }
/* ARGSUSED */ int poptag(int f, int n) { struct line *dotp; struct tagpos *s; if (SLIST_EMPTY(&shead)) { dobeep(); ewprintf("No previous location for find-tag invocation"); return (FALSE); } s = SLIST_FIRST(&shead); SLIST_REMOVE_HEAD(&shead, entry); if (loadbuffer(s->bname) == FALSE) return (FALSE); curwp->w_dotline = s->dotline; curwp->w_doto = s->doto; /* storing of dotp in tagpos wouldn't work out in cases when * that buffer is killed by user(dangling pointer). Explicitly * traverse till dotline for correct handling. */ dotp = curwp->w_bufp->b_headp; while (s->dotline--) dotp = dotp->l_fp; curwp->w_dotp = dotp; free(s->bname); free(s); return (TRUE); }
struct twe_ccb * twe_ccb_alloc(struct twe_softc *sc, int flags) { struct twe_ccb *ccb; int s; s = splbio(); if (__predict_false((flags & TWE_CCB_AEN) != 0)) { /* Use the reserved CCB. */ ccb = sc->sc_ccbs; } else { /* Allocate a CCB and command block. */ if (__predict_false((ccb = SLIST_FIRST(&sc->sc_ccb_freelist)) == NULL)) { splx(s); return (NULL); } SLIST_REMOVE_HEAD(&sc->sc_ccb_freelist, ccb_chain.slist); } #ifdef DIAGNOSTIC if ((long)(ccb - sc->sc_ccbs) == 0 && (flags & TWE_CCB_AEN) == 0) panic("twe_ccb_alloc: got reserved CCB for non-AEN"); if ((ccb->ccb_flags & TWE_CCB_ALLOCED) != 0) panic("twe_ccb_alloc: CCB %ld already allocated", (long)(ccb - sc->sc_ccbs)); flags |= TWE_CCB_ALLOCED; #endif splx(s); twe_ccb_init(sc, ccb, flags); return (ccb); }
/* * Allocate a new TPD, zero the TPD part. Cannot return NULL if * flag is 0. The TPD is removed from the free list and its used * bit is set. */ static struct tpd * hatm_alloc_tpd(struct hatm_softc *sc, u_int flags) { struct tpd *t; /* if we allocate a transmit TPD check for the reserve */ if (flags & M_NOWAIT) { if (sc->tpd_nfree <= HE_CONFIG_TPD_RESERVE) return (NULL); } else { if (sc->tpd_nfree == 0) return (NULL); } /* make it beeing used */ t = SLIST_FIRST(&sc->tpd_free); KASSERT(t != NULL, ("tpd botch")); SLIST_REMOVE_HEAD(&sc->tpd_free, link); TPD_SET_USED(sc, t->no); sc->tpd_nfree--; /* initialize */ t->mbuf = NULL; t->cid = 0; bzero(&t->tpd, sizeof(t->tpd)); t->tpd.addr = t->no << HE_REGS_TPD_ADDR; return (t); }
static struct pgt *pop_least_used_from_cache_list(void) { struct pgt *pgt; struct pgt *p_prev = NULL; size_t least_used; pgt = SLIST_FIRST(&pgt_cache_list); if (!pgt) return NULL; if (!pgt->num_used_entries) goto out; least_used = pgt->num_used_entries; while (true) { if (!SLIST_NEXT(pgt, link)) break; if (SLIST_NEXT(pgt, link)->num_used_entries <= least_used) { p_prev = pgt; least_used = SLIST_NEXT(pgt, link)->num_used_entries; } pgt = SLIST_NEXT(pgt, link); } out: if (p_prev) { pgt = SLIST_NEXT(p_prev, link); SLIST_REMOVE_AFTER(p_prev, link); } else { pgt = SLIST_FIRST(&pgt_cache_list); SLIST_REMOVE_HEAD(&pgt_cache_list, link); } return pgt; }
void* new_blocks(struct s_arena* arena,size_t count){ /* TODO better allocation sequence. it will require TAILQ. */ struct mega_bdescr* mega_block = SLIST_FIRST(&(arena->mega_blocks)); struct single_bdescr* new_block = NULL; void* new_vblocks = NULL; if(count > ( MEGA_BLOCK_SIZE / BLOCK_SIZE)){ debug("ERROR: %s cannot allocate size %08x\n",__FUNCTION__,count); return NULL; } if(mega_block == NULL){ mega_block = new_megablock(arena); SLIST_INSERT_HEAD(&(arena->mega_blocks),mega_block,link); } /* now mega_block is non-NULL */ if(mega_block->nblocks <= count) { mega_block = new_megablock(arena); SLIST_INSERT_HEAD(&(arena->mega_blocks),mega_block,link); } SLIST_REMOVE_HEAD(&(arena->mega_blocks),link); new_vblocks = new_blocks_from_megablock(&mega_block,count); if(mega_block != NULL){ SLIST_INSERT_HEAD(&(arena->mega_blocks),mega_block,link); } return new_vblocks; }
void ava_codegen_pop_jprot(ava_codegen_context* context) { ava_codegen_jprot* jprot; jprot = SLIST_FIRST(&context->jprots); SLIST_REMOVE_HEAD(&context->jprots, next); (*jprot->exit)(context, NULL, jprot->userdata); }
static void * mp_alloc(long size) { struct MP_TREE_ENTRY find; struct MP_TREE_ENTRY *cur_tree_entry; struct MP_TREE_ENTRY *new_tree_entry; struct MP_MEM_ENTRY *new_mem_entry; struct MP_MEM_ENTRY *mem_entry; int retval; if (!mp_initialized) return NULL; if (size <= 0) return NULL; find.mem_size = size; cur_tree_entry = RB_FIND(MP_TREE, &mp_tree, &find); if (cur_tree_entry == NULL) { new_tree_entry = (struct MP_TREE_ENTRY *)calloc(1, sizeof(struct MP_TREE_ENTRY)); if (new_tree_entry == NULL) return (NULL); new_tree_entry->mem_size = size; new_tree_entry->total_item = 0; SLIST_INIT(&(new_tree_entry->mem_head)); if (mp_init_block(new_tree_entry, size, g_pre_alloc_num - 1) < 0) return (NULL); RB_INSERT(MP_TREE, &mp_tree, new_tree_entry); } else { if (cur_tree_entry->total_item == 0) { SLIST_INIT(&(cur_tree_entry->mem_head)); retval = mp_init_block(cur_tree_entry, cur_tree_entry->mem_size, g_pre_alloc_num - 1); if (retval < 0) return (NULL); } else { if (!SLIST_EMPTY(&(cur_tree_entry->mem_head))) { mem_entry = SLIST_FIRST(&(cur_tree_entry->mem_head)); SLIST_REMOVE_HEAD(&(cur_tree_entry->mem_head), mem_entries); cur_tree_entry->total_item--; return ((void *)(++mem_entry)); } else { printf("alloc: FATAL ERROR!\n"); return (NULL); } } } new_mem_entry = (struct MP_MEM_ENTRY *)calloc(1, sizeof(struct MP_MEM_ENTRY) + size); if (new_mem_entry == NULL) return (NULL); new_mem_entry->size = size; return ((void *)(++new_mem_entry)); }
void free_tables(struct Table_head *tables) { struct Table *iter; while ((iter = SLIST_FIRST(tables)) != NULL) { SLIST_REMOVE_HEAD(tables, entries); table_ref_put(iter); } }
static void ahaallocccbs(struct aha_softc *aha) { struct aha_ccb *next_ccb; struct sg_map_node *sg_map; bus_addr_t physaddr; aha_sg_t *segs; int newcount; int i; next_ccb = &aha->aha_ccb_array[aha->num_ccbs]; sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); if (sg_map == NULL) return; /* Allocate S/G space for the next batch of CCBS */ if (bus_dmamem_alloc(aha->sg_dmat, (void **)&sg_map->sg_vaddr, BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { free(sg_map, M_DEVBUF); return; } SLIST_INSERT_HEAD(&aha->sg_maps, sg_map, links); bus_dmamap_load(aha->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, PAGE_SIZE, ahamapsgs, aha, /*flags*/0); segs = sg_map->sg_vaddr; physaddr = sg_map->sg_physaddr; newcount = (PAGE_SIZE / (AHA_NSEG * sizeof(aha_sg_t))); for (i = 0; aha->num_ccbs < aha->max_ccbs && i < newcount; i++) { int error; next_ccb->sg_list = segs; next_ccb->sg_list_phys = physaddr; next_ccb->flags = ACCB_FREE; callout_init_mtx(&next_ccb->timer, &aha->lock, 0); error = bus_dmamap_create(aha->buffer_dmat, /*flags*/0, &next_ccb->dmamap); if (error != 0) break; SLIST_INSERT_HEAD(&aha->free_aha_ccbs, next_ccb, links); segs += AHA_NSEG; physaddr += (AHA_NSEG * sizeof(aha_sg_t)); next_ccb++; aha->num_ccbs++; } /* Reserve a CCB for error recovery */ if (aha->recovery_accb == NULL) { aha->recovery_accb = SLIST_FIRST(&aha->free_aha_ccbs); SLIST_REMOVE_HEAD(&aha->free_aha_ccbs, links); } }
static void pgt_free_unlocked(struct pgt_cache *pgt_cache, bool save_ctx __unused) { while (!SLIST_EMPTY(pgt_cache)) { struct pgt *p = SLIST_FIRST(pgt_cache); SLIST_REMOVE_HEAD(pgt_cache, link); push_to_free_list(p); } }
static struct pgt *pop_from_free_list(void) { struct pgt *p = SLIST_FIRST(&pgt_free_list); if (p) { SLIST_REMOVE_HEAD(&pgt_free_list, link); memset(p->tbl, 0, PGT_SIZE); } return p; }
static __inline struct ida_qcb * ida_get_qcb(struct ida_softc *ida) { struct ida_qcb *qcb; if ((qcb = SLIST_FIRST(&ida->free_qcbs)) != NULL) { SLIST_REMOVE_HEAD(&ida->free_qcbs, link.sle); bzero(qcb->hwqcb, sizeof(struct ida_hdr) + sizeof(struct ida_req)); } return (qcb); }
void doomer_run(void) { enter_mono_region(); SLOG(LOG_DEBUG, "Deleting doomed objects..."); unsigned nb_dels = 0, nb_rescued = 0; // Bench time spent scanning death_row uint64_t start = bench_event_start(); // Rescue from death_row the objects which ref count is > 0, // and queue into kill_list the one no longer accessible (they can not even reach each others) struct refs to_kill; SLIST_INIT(&to_kill); struct ref *r; while (NULL != (r = SLIST_FIRST(&death_row))) { SLIST_REMOVE_HEAD(&death_row, entry); if (r->count == 0) { SLIST_INSERT_HEAD(&to_kill, r, entry); nb_dels ++; } else { nb_rescued ++; } } SLOG(nb_dels + nb_rescued > 0 ? LOG_INFO:LOG_DEBUG, "Deleted %u objects, rescued %u", nb_dels, nb_rescued); bench_event_stop(&dooming, start); // No need to block parsing any more since the selected objects are not accessible leave_protected_region(); // Delete all selected objects while (NULL != (r = SLIST_FIRST(&to_kill))) { // Beware that r->del() may doom further objects, which will be added in the death_row for next run SLOG(LOG_DEBUG, "Delete next object on kill list: %p", r); SLIST_REMOVE_HEAD(&to_kill, entry); r->entry.sle_next = NULL; // the deletor must not care about the ref (since the decision to del the object was already taken) r->del(r); } }
void ignoreclean(void) { struct ignentry *ign; while (!SLIST_EMPTY(&ignores)) { ign = SLIST_FIRST(&ignores); SLIST_REMOVE_HEAD(&ignores, next); free(ign->mask); free(ign); } }