int vbds_alloc(xsis_vbds_t *vbds, xsis_flts_t *domids, xsis_flts_t *vbdids){ // Local variables DIR *dp = NULL; // dir pointer struct dirent *dirp; // dirent pointer xsis_vbd_t *vbd; // Temporary VBD pointer uint32_t domid; // Temporary DOM ID uint32_t vbdid; // Temporary VBD ID int err = 0; // Return code // Open VBD3 base dir if (!(dp = opendir(XSIS_VBD3_DIR))){ perror("opendir"); goto err; } // Scan for valid VBD entries while ((dirp = readdir(dp))){ // Skip irrelevant entries and fetch DOM/VBD ids if (sscanf(dirp->d_name, XSIS_VBD3_BASEFMT, &domid, &vbdid) != 2) continue; // Filter domids and vbdids if (!LIST_EMPTY(domids)) if (!flt_isset(domids, domid)) continue; if (!LIST_EMPTY(vbdids)) if (!flt_isset(vbdids, vbdid)) continue; // Do not add repeated entries LIST_FOREACH(vbd, vbds, vbds) if ((vbd->domid == domid) && (vbd->vbdid == vbdid)) break; if (vbd) continue; // Allocate VBD entry if (vbd_open(&vbd, domid, vbdid)) continue; // Insert new VBD in list LIST_INSERT_HEAD(vbds, vbd, vbds); } out: // Close VBD3 base dir if (dp) closedir(dp); // Return return(err); err: err = 1; goto out; }
/***************************************************************************** * FUNCTION * ies_task_main * DESCRIPTION * image viewer daemon task main function & MSG loop * PARAMETERS * *task_entry_ptr [IN] task entry structure * RETURNS * void *****************************************************************************/ static void ies_task_main(task_entry_struct *task_entry_ptr) { /*----------------------------------------------------------------*/ /* Local Variables */ /*----------------------------------------------------------------*/ ilm_struct current_ilm; kal_uint32 msg_count; /*----------------------------------------------------------------*/ /* Code Body */ /*----------------------------------------------------------------*/ kal_set_active_module_id(MOD_IES); while(1) { /* peek and check command */ msg_count = msg_get_extq_messages(); while(msg_count > 0) { msg_receive_extq_for_stack(¤t_ilm); _ies_task_dispatch_message(¤t_ilm); destroy_ilm(¤t_ilm); // consume ext queue eagerly msg_count = msg_get_extq_messages(); } if(!g_ies_task_context.pJob) { // At this point, there may be pending jobs. _ies_task_job_handle_queue(); if (!g_ies_task_context.pJob) { // We still have nothing to do if ((LIST_EMPTY(&(g_ies_task_context.normal))) && (LIST_EMPTY(&(g_ies_task_context.lowest)))) { msg_receive_extq_for_stack(¤t_ilm); _ies_task_dispatch_message(¤t_ilm); destroy_ilm(¤t_ilm); } } } else { /* after processed all commands, perform job iteration if there are active jobs. */ if(_ies_task_job_handle_start(g_ies_task_context.pJob)) { // job finished g_ies_task_context.pJob = NULL; _ies_task_job_handle_queue(); } } } }
/** ========================================================================= */ static void destroy_sa_qp(struct oib_port *port) { int i; // if the user just unregistered trap messages those messages may still // be on this list, wait 5 seconds for the thread to handle the response. for (i = 0; i < 5000; i++) { if (!LIST_EMPTY(&port->pending_reg_msg_head)) { usleep(1000); } else { DBGPRINT("destroy_sa_qp: wait %d ms for LIST_EMPTY\n", i); break; } } stop_ud_cq_monitor(port); join_port_thread(port); /* Free any remaining unregistration messages */ if (!LIST_EMPTY(&port->pending_reg_msg_head)) { OUTPUT_ERROR("Ignoring Pending Notice un-registation requests\n"); oib_sa_remove_all_pending_reg_msgs(port); } if (port->sa_ah) ibv_destroy_ah(port->sa_ah); if (port->sa_qp) ibv_destroy_qp(port->sa_qp); for (i = 0; i<port->num_userspace_recv_buf; i++) if (port->recv_bufs) ibv_dereg_mr(port->recv_bufs[i].mr); if (port->sa_qp_pd) ibv_dealloc_pd(port->sa_qp_pd); if (port->sa_qp_cq) ibv_destroy_cq(port->sa_qp_cq); if (port->recv_bufs) { free(port->recv_bufs); port->recv_bufs = NULL; } if (port->sa_qp_comp_channel) ibv_destroy_comp_channel(port->sa_qp_comp_channel); }
void * kore_pool_get(struct kore_pool *pool) { u_int8_t *ptr; struct kore_pool_entry *entry; if (LIST_EMPTY(&(pool->freelist))) { kore_log(LOG_NOTICE, "pool %s is exhausted (%d/%d)", pool->name, pool->inuse, pool->elms); pool_region_create(pool, pool->elms); } entry = LIST_FIRST(&(pool->freelist)); if (entry->state != POOL_ELEMENT_FREE) fatal("%s: element %p was not free", pool->name, entry); LIST_REMOVE(entry, list); entry->state = POOL_ELEMENT_BUSY; ptr = (u_int8_t *)entry + sizeof(struct kore_pool_entry); pool->inuse++; return (ptr); }
int Sched::addtoactive(Task &task) { Task *pos; uint8_t task_priority; task_priority = task.Task_GetSchedPriority(); DEBUG_PRINT("addtoactive:task_priority:%d\n",task_priority); task.Task_SetState(TSTATE_TASK_READYTORUN); if (LIST_EMPTY(task_active)) { // furtherm LIST_ADD(task_active, task); DEBUG_PRINT("LIST_EMPTY:LIST_ADD to task_active OK\n"); Sched_SetCurrentTask(task); DEBUG_PRINT("It's the first task.\n"); return OK; }else { if (LIST_LAST_ENTRY(task_active).Task_GetSchedPriority() > task_priority) { LIST_ADD_TAIL(task_active, task); }else { LIST_FOR_EACH_ENTRY(task_active, pos) { if (pos->Task_GetSchedPriority() <= task_priority) { LIST_ADD_BEFORE(task_active, task, (*pos)); } } if (!Sched_locked() && IS_LIST_FIRST_ENTRY(task_active, task)) { return OK; } } } return NO; }
static int pflog_modevent(module_t mod, int type, void *data) { int error = 0; switch (type) { case MOD_LOAD: LIST_INIT(&pflog_list); if_clone_attach(&pflog_cloner); break; case MOD_UNLOAD: if_clone_detach(&pflog_cloner); while (!LIST_EMPTY(&pflog_list)) pflog_clone_destroy( &LIST_FIRST(&pflog_list)->sc_if); break; default: error = EINVAL; break; } return error; }
/* * Prepare context switch from oldlwp to newlwp. * This code is shared by cpu_switch and cpu_switchto. */ struct lwp * cpu_switch_prepare(struct lwp *oldlwp, struct lwp *newlwp) { newlwp->l_stat = LSONPROC; if (newlwp != oldlwp) { struct proc *p = newlwp->l_proc; curpcb = newlwp->l_md.md_pcb; pmap_activate(newlwp); /* Check for Restartable Atomic Sequences. */ if (!LIST_EMPTY(&p->p_raslist)) { caddr_t pc; pc = ras_lookup(p, (caddr_t)newlwp->l_md.md_regs->tf_spc); if (pc != (caddr_t) -1) newlwp->l_md.md_regs->tf_spc = (int) pc; } } curlwp = newlwp; return (newlwp); }
/* * Flush out the buffer cache */ int smbfs_sync(struct mount *mp, int waitfor, kauth_cred_t cred) { struct vnode *vp; struct vnode_iterator *marker; struct smbnode *np; int error, allerror = 0; vfs_vnode_iterator_init(mp, &marker); while (vfs_vnode_iterator_next(marker, &vp)) { error = vn_lock(vp, LK_EXCLUSIVE); if (error) { vrele(vp); continue; } np = VTOSMB(vp); if (np == NULL) { vput(vp); continue; } if ((vp->v_type == VNON || (np->n_flag & NMODIFIED) == 0) && LIST_EMPTY(&vp->v_dirtyblkhd) && vp->v_uobj.uo_npages == 0) { vput(vp); continue; } error = VOP_FSYNC(vp, cred, waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0); if (error) allerror = error; vput(vp); } vfs_vnode_iterator_destroy(marker); return (allerror); }
/* Used in fork case, to avoid deadlocks. * The fork caller acquires all locks before fork and release them * after because the child will have only a thread. If one lock is * taken by another thread than, in the child process, nobody will * release it. */ static void acquire_locks(void) { struct entries_list *list; struct hashentry *tmp; struct shm_data *data; struct semid_pool *semaptr; int i; SYSV_MUTEX_LOCK(&lock_undo); SYSV_MUTEX_LOCK(&lock_resources); //pthread_rwlock_wrlock(&rwlock_addrs); for (i=0; i<get_hash_size(MAXSIZE); i++) { list = &shmaddrs->entries[i]; if (LIST_EMPTY(list)) continue; LIST_FOREACH(tmp, list, entry_link) { data = (struct shm_data*)tmp->value; if (data->type == SEMGET) { semaptr = (struct semid_pool *)data->internal; #ifdef SYSV_RWLOCK #ifdef SYSV_SEMS /* There is no need to acquire the mutexes from * each semaphore in the group. It is enough * to acquire the group lock in write mode. */ #endif sysv_rwlock_wrlock(&semaptr->rwlock); #else sysv_mutex_lock(&semaptr->mutex); #endif } } }
/** * Destroy the timer list. * * @return 0 means ok, the other means fail. */ int destroy_timer(void) { struct timer *node = NULL; if ((signal(SIGALRM, timer_list.old_sigfunc)) == SIG_ERR) { return -1; } if((setitimer(ITIMER_REAL, &timer_list.ovalue, &timer_list.value)) < 0) { return -1; } while (!LIST_EMPTY(&timer_list.header)) {/* Delete. */ node = LIST_FIRST(&timer_list.header); LIST_REMOVE(node, entries); /* Free node */ printf("Remove id %d\n", node->id); free(node->user_data); free(node); } memset(&timer_list, 0, sizeof(struct timer_list)); return 0; }
void buf_put(struct buf *bp) { splassert(IPL_BIO); #ifdef DIAGNOSTIC if (bp->b_pobj != NULL) KASSERT(bp->b_bufsize > 0); if (ISSET(bp->b_flags, B_DELWRI)) panic("buf_put: releasing dirty buffer"); if (bp->b_freelist.tqe_next != NOLIST && bp->b_freelist.tqe_next != (void *)-1) panic("buf_put: still on the free list"); if (bp->b_vnbufs.le_next != NOLIST && bp->b_vnbufs.le_next != (void *)-1) panic("buf_put: still on the vnode list"); if (!LIST_EMPTY(&bp->b_dep)) panic("buf_put: b_dep is not empty"); #endif LIST_REMOVE(bp, b_list); bcstats.numbufs--; if (buf_dealloc_mem(bp) != 0) return; pool_put(&bufpool, bp); }
/* * This is the dispatcher called by the low-level * assembly language autovectored interrupt routine. */ void isrdispatch_autovec(int ipl) { struct isr_autovec *isr; isr_autovec_list_t *list; int rc, handled = 0; static int straycount, unexpected; #ifdef DIAGNOSTIC if (ipl < 0 || ipl >= NISRAUTOVEC) panic("isrdispatch_autovec: bad ipl 0x%d\n", ipl); #endif intrcnt[ipl]++; #if 0 /* XXX: already counted in machdep.c */ uvmexp.intrs++; #endif list = &isr_autovec[ipl]; if (LIST_EMPTY(list)) { printf("isrdispatch_autovec: ipl %d unexpected\n", ipl); if (++unexpected > 10) panic("too many unexpected interrupts"); return; } /* Give all the handlers a chance. */ LIST_FOREACH(isr, list, isr_link) { rc = (*isr->isr_func)(isr->isr_arg); if (rc != 0) isr->isr_count.ec_count++; handled |= rc; }
static void session_free(obfsproxyssh_client_session_t *session) { obfsproxyssh_client_t *client = session->client; assert(NULL == session->ssh_session); assert(NULL == session->ssh_channel); bdestroy(session->hostkey_rsa); bdestroy(session->hostkey_dss); bdestroy(session->user); bdestroy(session->privkey_pem); free_rsa_private_key(session->privkey); if (NULL !=session->ssh_ev) bufferevent_free(session->ssh_ev); bufferevent_free(session->socks_ev); bdestroy(session->ssh_addr); bdestroy(session->socks_addr); LIST_REMOVE(session, entries); free(session); /* * Assuming that we are shutting down, ensure that we break out of the * event loop if this is the last session. (Not needed?) */ if (NULL == client->listener && LIST_EMPTY(&client->sessions)) event_base_loopbreak(client->state->base); }
/* ================= G_ListIP_f ================= */ void G_ListIP_f(edict_t *ent) { byte b[4]; ipfilter_t *ip, *next; char address[32], expires[32]; time_t now, diff; if (LIST_EMPTY(&ipfilters)) { gi.cprintf(ent, PRINT_HIGH, "Filter list is empty.\n"); return; } now = time(NULL); gi.cprintf(ent, PRINT_HIGH, "address expires in action added by\n" "--------------- ---------- ------ ---------------\n"); FOR_EACH_IPFILTER_SAFE(ip, next) { *(unsigned *)b = ip->compare; Q_snprintf(address, sizeof(address), "%d.%d.%d.%d", b[0], b[1], b[2], b[3]); if (ip->duration) { diff = now - ip->added; if (diff > ip->duration) { remove_filter(ip); continue; } Com_FormatTime(expires, sizeof(expires), ip->duration - diff); } else { strcpy(expires, "permanent"); } gi.cprintf(ent, PRINT_HIGH, "%-15s %10s %6s %s\n", address, expires, ip->action == IPA_MUTE ? "mute" : "ban", ip->adder); }
void t_dump (void) { Topic *tp; if (LIST_EMPTY (topics)) { printf ("No topics discovered.\r\n"); return; } printf ("Active #rd #wr #msgs #disp #no_w Topic\r\n"); printf ("------ --- --- ----- ----- ----- -----\r\n"); lock_take (topic_lock); LIST_FOREACH (topics, tp) { if (tp->active) printf (" * "); else printf (" "); printf ("%6u %6u %7lu %7lu %7lu %s/%s\r\n", nendpoints (tp->writers), nendpoints (tp->readers), tp->ndata, tp->ndispose, tp->nnowriter, tp->topic_name, tp->type_name); } lock_release (topic_lock); }
static void assign_job(struct scan_peer *peer) { size_t job_len; uint16_t net_job_len; peer->job = clients_started ? get_job() : NULL; if (peer->job == NULL) { LIST_INSERT_HEAD(&inactive_peers, peer, peer_link); if (LIST_EMPTY(&active_peers) && clients_started) shutdown_master(); return; } LIST_INSERT_HEAD(&active_peers, peer, peer_link); peer->job->scan_output = NULL; job_len = strlen(peer->job->pkg_location); if (job_len > 0xffff) errx(1, "Location inside pkgsrc tree too long"); net_job_len = htons(job_len); (void)memcpy(peer->tmp_buf, &net_job_len, 2); deferred_write(peer->fd, peer->tmp_buf, 2, peer, send_job_path, kill_peer); }
int msdosfs_sync_vnode(struct vnode *vp, void *arg) { struct msdosfs_sync_arg *msa = arg; int error; struct denode *dep; dep = VTODE(vp); if (vp->v_type == VNON || ((dep->de_flag & (DE_ACCESS | DE_CREATE | DE_UPDATE | DE_MODIFIED)) == 0 && LIST_EMPTY(&vp->v_dirtyblkhd)) || msa->waitfor == MNT_LAZY) { return (0); } if (vget(vp, LK_EXCLUSIVE | LK_NOWAIT, msa->p)) return (0); if ((error = VOP_FSYNC(vp, msa->cred, msa->waitfor, msa->p)) != 0) msa->allerror = error; VOP_UNLOCK(vp, 0, msa->p); vrele(vp); return (0); }
/* * Destroy node */ static int ng_ksocket_shutdown(node_p node) { const priv_p priv = NG_NODE_PRIVATE(node); priv_p embryo; /* Close our socket (if any) */ if (priv->so != NULL) { atomic_clear_int(&priv->so->so_rcv.ssb_flags, SSB_UPCALL); atomic_clear_int(&priv->so->so_snd.ssb_flags, SSB_UPCALL); priv->so->so_upcall = NULL; soclose(priv->so, FNONBLOCK); priv->so = NULL; } /* If we are an embryo, take ourselves out of the parent's list */ if (priv->flags & KSF_EMBRYONIC) { LIST_REMOVE(priv, siblings); priv->flags &= ~KSF_EMBRYONIC; } /* Remove any embryonic children we have */ while (!LIST_EMPTY(&priv->embryos)) { embryo = LIST_FIRST(&priv->embryos); ng_rmnode_self(embryo->node); } /* Take down netgraph node */ bzero(priv, sizeof(*priv)); kfree(priv, M_NETGRAPH); NG_NODE_SET_PRIVATE(node, NULL); NG_NODE_UNREF(node); /* let the node escape */ return (0); }
void handle_pending_hsrs(void) { extern int32_t sched_lock; int nr; list_head_t *list, *node; hsr_t *hsr; if (sched_lock > 0) return; // just only to prevent hisrs to schedule ++sched_lock; while (1) { nr = HAL_FIND_FIRST_SET(hsr_bitmap); if (nr < 0) break; list = hsr_array + nr; node = LIST_FIRST(list); BUG_ON(NULL == node); hsr = LIST_ENTRY(node, hsr_t, node); hsr->function(hsr->data); HAL_DISABLE_INTERRUPTS(); --hsr->count; if (hsr->count <= 0) LIST_DEL(node); if (LIST_EMPTY(list)) hsr_bitmap &= ~(1 << nr); HAL_ENABLE_INTERRUPTS(); } --sched_lock; }
static int g_bde_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp) { struct g_consumer *cp; struct g_provider *pp; struct g_bde_softc *sc; g_trace(G_T_TOPOLOGY, "g_bde_destroy_geom(%s, %s)", mp->name, gp->name); g_topology_assert(); /* * Orderly detachment. */ KASSERT(gp != NULL, ("NULL geom")); pp = LIST_FIRST(&gp->provider); KASSERT(pp != NULL, ("NULL provider")); if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) return (EBUSY); sc = gp->softc; cp = LIST_FIRST(&gp->consumer); KASSERT(cp != NULL, ("NULL consumer")); sc->dead = 1; wakeup(sc); g_access(cp, -1, -1, -1); g_detach(cp); g_destroy_consumer(cp); while (sc->dead != 2 && !LIST_EMPTY(&pp->consumers)) tsleep(sc, PRIBIO, "g_bdedie", hz); mtx_destroy(&sc->worklist_mutex); bzero(&sc->key, sizeof sc->key); g_free(sc); g_wither_geom(gp, ENXIO); return (0); }
static void vfs_mountroot_wait(void) { struct root_hold_token *h; struct timeval lastfail; int curfail; curfail = 0; while (1) { DROP_GIANT(); g_waitidle(); PICKUP_GIANT(); mtx_lock(&mountlist_mtx); if (LIST_EMPTY(&root_holds)) { mtx_unlock(&mountlist_mtx); break; } if (ppsratecheck(&lastfail, &curfail, 1)) { printf("Root mount waiting for:"); LIST_FOREACH(h, &root_holds, list) printf(" %s", h->who); printf("\n"); } msleep(&root_holds, &mountlist_mtx, PZERO | PDROP, "roothold", hz); } }
static int snp_modevent(module_t mod, int type, void *data) { int i; lwkt_gettoken(&tty_token); switch (type) { case MOD_LOAD: snooplinedisc = ldisc_register(LDISC_LOAD, &snpdisc); make_autoclone_dev(&snp_ops, &DEVFS_CLONE_BITMAP(snp), snpclone, UID_ROOT, GID_WHEEL, 0600, "snp"); for (i = 0; i < SNP_PREALLOCATED_UNITS; i++) { make_dev(&snp_ops, i, UID_ROOT, GID_WHEEL, 0600, "snp%d", i); devfs_clone_bitmap_set(&DEVFS_CLONE_BITMAP(snp), i); } break; case MOD_UNLOAD: if (!LIST_EMPTY(&snp_sclist)) { lwkt_reltoken(&tty_token); return (EBUSY); } ldisc_deregister(snooplinedisc); devfs_clone_handler_del("snp"); dev_ops_remove_all(&snp_ops); devfs_clone_bitmap_uninit(&DEVFS_CLONE_BITMAP(snp)); break; default: break; } lwkt_reltoken(&tty_token); return (0); }
static void bt_freetrim(vmem_t *vm, int freelimit) { bt_t *t; LIST_HEAD(, vmem_btag) tofree; LIST_INIT(&tofree); VMEM_LOCK(vm); while (vm->vm_nfreetags > freelimit) { bt_t *bt = LIST_FIRST(&vm->vm_freetags); LIST_REMOVE(bt, bt_freelist); vm->vm_nfreetags--; if (bt >= static_bts && bt < static_bts + sizeof(static_bts)) { mutex_enter(&vmem_btag_lock); LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist); vmem_btag_freelist_count++; mutex_exit(&vmem_btag_lock); VMEM_EVCNT_DECR(static_bt_inuse); } else { LIST_INSERT_HEAD(&tofree, bt, bt_freelist); } } VMEM_UNLOCK(vm); while (!LIST_EMPTY(&tofree)) { t = LIST_FIRST(&tofree); LIST_REMOVE(t, bt_freelist); pool_put(&vmem_btag_pool, t); } }
static int faithmodevent(module_t mod, int type, void *data) { switch (type) { case MOD_LOAD: LIST_INIT(&faith_softc_list); if_clone_attach(&faith_cloner); #ifdef INET6 faithprefix_p = faithprefix; #endif break; case MOD_UNLOAD: #ifdef INET6 faithprefix_p = NULL; #endif if_clone_detach(&faith_cloner); while (!LIST_EMPTY(&faith_softc_list)) faith_clone_destroy( &LIST_FIRST(&faith_softc_list)->sc_if); break; } return 0; }
struct pefs_dircache_entry * pefs_dircache_lookup(struct pefs_dircache *pd, char const *name, size_t name_len) { struct pefs_dircache_entry *pde; struct pefs_dircache_listhead *head; uint32_t h; MPASS(pd != NULL); MPASS((pd->pd_flags & PD_UPDATING) == 0); MPASS(LIST_EMPTY(DIRCACHE_STALEHEAD(pd))); h = dircache_hashname(pd, name, name_len); head = &dircache_tbl[h & pefs_dircache_hashmask]; mtx_lock(&dircache_mtx); LIST_FOREACH(pde, head, pde_hash_entry) { if (pde->pde_namehash == h && pde->pde_dircache == pd && pde->pde_gen == pd->pd_gen && pde->pde_namelen == name_len && memcmp(pde->pde_name, name, name_len) == 0) { mtx_unlock(&dircache_mtx); PEFSDEBUG("pefs_dircache_lookup: found %s -> %s\n", pde->pde_name, pde->pde_encname); return (pde); } } mtx_unlock(&dircache_mtx); PEFSDEBUG("pefs_dircache_lookup: not found %s\n", name); return (NULL); }
void kore_parse_config(void) { FILE *fp; int i, lineno; char buf[BUFSIZ], *p, *t, *argv[5]; if (config_file == NULL) fatal("specify a configuration file with -c"); if ((fp = fopen(config_file, "r")) == NULL) fatal("configuration given cannot be opened: %s", config_file); lineno = 1; while (fgets(buf, sizeof(buf), fp) != NULL) { p = buf; buf[strcspn(buf, "\n")] = '\0'; while (isspace(*p)) p++; if (p[0] == '#' || p[0] == '\0') { lineno++; continue; } for (t = p; *t != '\0'; t++) { if (*t == '\t') *t = ' '; } if (!strcmp(p, "}") && current_domain != NULL) domain_sslstart(); kore_split_string(p, " ", argv, 5); for (i = 0; config_names[i].name != NULL; i++) { if (!strcmp(config_names[i].name, argv[0])) { if (!config_names[i].configure(argv)) { fatal("configuration error on line %d", lineno); } break; } } lineno++; } if (!kore_module_loaded()) fatal("no site module was loaded"); if (LIST_EMPTY(&listeners)) fatal("no listeners defined"); if (chroot_path == NULL) fatal("missing a chroot path"); if (runas_user == NULL) fatal("missing a username to run as"); if ((pw = getpwnam(runas_user)) == NULL) fatal("user '%s' does not exist", runas_user); }
void eco_stop(struct ifnet *ifp, int disable) { struct ecocom *ec = (struct ecocom *)ifp; while (!LIST_EMPTY(&ec->ec_retries)) eco_retry_free(LIST_FIRST(&ec->ec_retries)); }
void eth_fini(void) { assert(LIST_EMPTY(ð_subprotos)); mux_proto_dtor(&mux_proto_eth); mutex_dtor(ð_subprotos_mutex); ext_param_collapse_vlans_fini(); log_category_proto_eth_fini(); }
void variants_free(void) { struct variant *variant; while(!LIST_EMPTY(&variant_list)) { variant = LIST_FIRST(&variant_list); tupid_tree_rm(&variant_dt_root, &variant->dtnode); tupid_tree_rm(&variant_root, &variant->tnode); LIST_REMOVE(variant, list); vardb_close(&variant->vdb); free(variant); } while(!LIST_EMPTY(&disabled_list)) { variant = LIST_FIRST(&disabled_list); LIST_REMOVE(variant, list); free(variant); } }
static void dircache_expire(struct pefs_dircache *pd) { struct pefs_dircache_entry *pde; pd->pd_gen = 0; if (LIST_EMPTY(DIRCACHE_STALEHEAD(pd))) { pd->pd_flags ^= PD_SWAPEDHEADS; } else while (!LIST_EMPTY(DIRCACHE_ACTIVEHEAD(pd))) { pde = LIST_FIRST(DIRCACHE_ACTIVEHEAD(pd)); pde->pde_gen = 0; LIST_REMOVE(pde, pde_dir_entry); LIST_INSERT_HEAD(DIRCACHE_STALEHEAD(pd), pde, pde_dir_entry); PEFSDEBUG("dircache_expire: active entry: %p\n", pde); } MPASS(LIST_EMPTY(DIRCACHE_ACTIVEHEAD(pd))); }