static int g_mbr_ioctl(struct g_provider *pp, u_long cmd, void *data, int fflag, struct thread *td) { struct g_geom *gp; struct g_mbr_softc *ms; struct g_slicer *gsp; struct g_consumer *cp; int error, opened; gp = pp->geom; gsp = gp->softc; ms = gsp->softc; opened = 0; error = 0; switch(cmd) { case DIOCSMBR: { if (!(fflag & FWRITE)) return (EPERM); DROP_GIANT(); g_topology_lock(); cp = LIST_FIRST(&gp->consumer); if (cp->acw == 0) { error = g_access(cp, 0, 1, 0); if (error == 0) opened = 1; } if (!error) error = g_mbr_modify(gp, ms, data, 512); if (!error) error = g_write_data(cp, 0, data, 512); if (opened) g_access(cp, 0, -1 , 0); g_topology_unlock(); PICKUP_GIANT(); return(error); } default: return (ENOIOCTL); } }
static int g_bsd_writelabel(struct g_geom *gp, u_char *bootcode) { off_t secoff; u_int secsize; struct g_consumer *cp; struct g_slicer *gsp; struct g_bsd_softc *ms; u_char *buf; uint64_t sum; int error, i; gsp = gp->softc; ms = gsp->softc; cp = LIST_FIRST(&gp->consumer); /* Get sector size, we need it to read data. */ secsize = cp->provider->sectorsize; secoff = ms->labeloffset % secsize; if (bootcode == NULL) { buf = g_read_data(cp, ms->labeloffset - secoff, secsize, &error); if (buf == NULL) return (error); bcopy(ms->label, buf + secoff, sizeof(ms->label)); } else { buf = bootcode; bcopy(ms->label, buf + ms->labeloffset, sizeof(ms->label)); } if (ms->labeloffset == ALPHA_LABEL_OFFSET) { sum = 0; for (i = 0; i < 63; i++) sum += le64dec(buf + i * 8); le64enc(buf + 504, sum); } if (bootcode == NULL) { error = g_write_data(cp, ms->labeloffset - secoff, buf, secsize); g_free(buf); } else { error = g_write_data(cp, 0, bootcode, BBSIZE); } return(error); }
int svr4_elf32_probe( struct lwp *l, struct exec_package *epp, void *eh, char *itp, vaddr_t *pos ) { struct proc *p = l->l_proc; int error; if (itp) { if ((error = emul_find_interp(LIST_FIRST(&p->p_lwps), epp, itp))) return error; } #ifdef SVR4_INTERP_ADDR *pos = SVR4_INTERP_ADDR; #endif return 0; }
struct atom *builtin_if(struct atom *expr, struct env *env) { struct list *list = expr->list; struct atom *op = LIST_FIRST(list); struct atom *predicate = CDR(op); struct atom *true_case = CDR(predicate); struct atom *false_case = CDR(true_case); if (!predicate || !true_case || !false_case) { printf("error: if takes 3 arguments\n"); return &nil_atom; } predicate = eval(predicate, env); if (IS_TRUE(predicate)) return eval(true_case, env); return eval(false_case, env); }
/* Set media options */ Static int url_ifmedia_change(struct ifnet *ifp) { struct url_softc *sc = ifp->if_softc; struct mii_data *mii = GET_MII(sc); DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) return (0); sc->sc_link = 0; if (mii->mii_instance) { struct mii_softc *miisc; for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; miisc = LIST_NEXT(miisc, mii_list)) mii_phy_reset(miisc); } return (mii_mediachg(mii)); }
void GTIMER_FCN(mtimer_arm_abs) (GTIMER_TRACEID_ mtimer_t *mti, mti_callback_t *callback, void *opaque, int64_t when) { lock_assert(&global_lock); if (mti->mti_callback != NULL) LIST_REMOVE(mti, mti_link); mti->mti_callback = callback; mti->mti_opaque = opaque; mti->mti_expire = when; #if ENABLE_GTIMER_CHECK mti->mti_id = id; #endif LIST_INSERT_SORTED(&mtimers, mti, mti_link, mtimercmp); if (LIST_FIRST(&mtimers) == mti) tvh_cond_signal(&mtimer_cond, 0); // force timer re-check }
static void pp_cb(void *opaque, prop_event_t event, ...) { proppage_t *pp = opaque; openpage_t *op; if(event != PROP_DESTROYED) return; while((op = LIST_FIRST(&pp->pp_pages)) != NULL) { LIST_REMOVE(op, op_link); op->op_pp = NULL; prop_set_int(prop_create(op->op_root, "close"), 1); } LIST_REMOVE(pp, pp_link); prop_ref_dec(pp->pp_model); prop_unsubscribe(pp->pp_model_sub); rstr_release(pp->pp_url); free(pp); }
/* * Media changed; notify all PHYs. */ int mii_mediachg(struct mii_data *mii) { struct mii_softc *child; int rv; mii->mii_media_status = 0; mii->mii_media_active = IFM_NONE; for (child = LIST_FIRST(&mii->mii_phys); child != NULL; child = LIST_NEXT(child, mii_list)) { rv = (*child->mii_service)(child, mii, MII_MEDIACHG); if (rv) { return (rv); } else { /* Reset autonegotiation timer. */ child->mii_ticks = 0; } } return (0); }
void GTIMER_FCN(gtimer_arm_absn) (GTIMER_TRACEID_ gtimer_t *gti, gti_callback_t *callback, void *opaque, time_t when) { lock_assert(&global_lock); if (gti->gti_callback != NULL) LIST_REMOVE(gti, gti_link); gti->gti_callback = callback; gti->gti_opaque = opaque; gti->gti_expire = when; #if ENABLE_GTIMER_CHECK gti->gti_id = id; #endif LIST_INSERT_SORTED(>imers, gti, gti_link, gtimercmp); if (LIST_FIRST(>imers) == gti) pthread_cond_signal(>imer_cond); // force timer re-check }
struct atom *builtin_eq(struct atom *expr, struct env *env) { struct list *list = expr->list; struct atom *op = LIST_FIRST(list); struct atom *a = CDR(op); struct atom *b = CDR(a); if (!a || !b) { printf("error: eq takes 2 arguments\n"); return &nil_atom; } a = eval(a, env); b = eval(b, env); if (atom_cmp(a, b)) return &true_atom; return &false_atom; }
physmem_error_t common_physmem_page_alloc(struct physmem *_phys, uint8 node VAR_UNUSED, physaddr_t *address) { struct physmem_page * newpage = NULL; if (_phys->free_pages <= 0) { return PHYSMEM_ERR_OOM; } _phys->free_pages--; assert(!LIST_EMPTY(&_phys->freelist)); newpage = LIST_FIRST(&_phys->freelist); LIST_REMOVE(newpage, pages); *address = physmem_page_to_phys(_phys, newpage); return PHYSMEM_SUCCESS; }
static void adv_clear_state_really(struct adv_softc *adv, union ccb* ccb) { if ((adv->state & ADV_BUSDMA_BLOCK_CLEARED) != 0) adv->state &= ~(ADV_BUSDMA_BLOCK_CLEARED|ADV_BUSDMA_BLOCK); if ((adv->state & ADV_RESOURCE_SHORTAGE) != 0) { int openings; openings = adv->max_openings - adv->cur_active - ADV_MIN_FREE_Q; if (openings >= adv->openings_needed) { adv->state &= ~ADV_RESOURCE_SHORTAGE; adv->openings_needed = 0; } } if ((adv->state & ADV_IN_TIMEOUT) != 0) { struct adv_ccb_info *cinfo; cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr; if ((cinfo->state & ACCB_RECOVERY_CCB) != 0) { struct ccb_hdr *ccb_h; /* * We now traverse our list of pending CCBs * and reinstate their timeouts. */ ccb_h = LIST_FIRST(&adv->pending_ccbs); while (ccb_h != NULL) { ccb_h->timeout_ch = timeout(adv_timeout, (caddr_t)ccb_h, (ccb_h->timeout * hz) / 1000); ccb_h = LIST_NEXT(ccb_h, sim_links.le); } adv->state &= ~ADV_IN_TIMEOUT; printf("%s: No longer in timeout\n", adv_name(adv)); } } if (adv->state == 0) ccb->ccb_h.status |= CAM_RELEASE_SIMQ; }
/* * Insert a dev_data into the provided list, sorted by select code. */ static void dev_data_insert(struct dev_data *dd, ddlist_t *ddlist) { struct dev_data *de; #ifdef DIAGNOSTIC if (dd->dd_scode < 0 || dd->dd_scode > 255) { printf("bogus select code for %s\n", dd->dd_dev->dv_xname); panic("dev_data_insert"); } #endif de = LIST_FIRST(ddlist); /* * Just insert at head if list is empty. */ if (de == NULL) { LIST_INSERT_HEAD(ddlist, dd, dd_clist); return; } /* * Traverse the list looking for a device who's select code * is greater than ours. When we find it, insert ourselves * into the list before it. */ for (; LIST_NEXT(de, dd_clist) != NULL; de = LIST_NEXT(de, dd_clist)) { if (de->dd_scode > dd->dd_scode) { LIST_INSERT_BEFORE(de, dd, dd_clist); return; } } /* * Our select code is greater than everyone else's. We go * onto the end. */ LIST_INSERT_AFTER(de, dd, dd_clist); }
void ax88190_media_init(struct dp8390_softc *sc) { struct ifnet *ifp = &sc->sc_arpcom.ac_if; sc->sc_mii.mii_ifp = ifp; sc->sc_mii.mii_readreg = ax88190_mii_readreg; sc->sc_mii.mii_writereg = ax88190_mii_writereg; sc->sc_mii.mii_statchg = ax88190_mii_statchg; ifmedia_init(&sc->sc_mii.mii_media, 0, dp8390_mediachange, dp8390_mediastatus); mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0); if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); } else ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); }
int open_notify(enum access_type at, const char *pathname) { /* For the parser: manually keep track of file accesses, since we * don't run the UDP server for the parsing stage in win32. */ if(!LIST_EMPTY(&finfo_list_head)) { struct finfo_list *flist; struct stat buf; char fullpath[PATH_MAX]; int cwdlen; int pathlen; if(getcwd(fullpath, sizeof(fullpath)) != fullpath) { perror("getcwd"); return -1; } cwdlen = strlen(fullpath); pathlen = strlen(pathname); if(cwdlen + pathlen + 2 >= (signed)sizeof(fullpath)) { fprintf(stderr, "tup internal error: max pathname exceeded.\n"); return -1; } fullpath[cwdlen] = PATH_SEP; memcpy(fullpath + cwdlen + 1, pathname, pathlen); fullpath[cwdlen + pathlen + 1] = 0; /* If the stat fails, or if the stat works and we know it * is a directory, don't actually add the dependency. We * want failed stats for ghost nodes, and all successful * file accesses. */ if(stat(pathname, &buf) < 0 || !S_ISDIR(buf.st_mode)) { flist = LIST_FIRST(&finfo_list_head); if(handle_open_file(at, fullpath, flist->finfo) < 0) return -1; } } return 0; }
static int alq_load_handler(module_t mod, int what, void *arg) { int ret; ret = 0; switch (what) { case MOD_LOAD: case MOD_SHUTDOWN: break; case MOD_QUIESCE: ALD_LOCK(); /* Only allow unload if there are no open queues. */ if (LIST_FIRST(&ald_queues) == NULL) { ald_shutingdown = 1; ALD_UNLOCK(); ald_shutdown(NULL, 0); mtx_destroy(&ald_mtx); } else { ALD_UNLOCK(); ret = EBUSY; } break; case MOD_UNLOAD: /* If MOD_QUIESCE failed we must fail here too. */ if (ald_shutingdown == 0) ret = EBUSY; break; default: ret = EINVAL; break; } return (ret); }
struct atom *builtin_lambda(struct atom *expr, struct env *env) { struct list *list = expr->list; struct atom *op = LIST_FIRST(list); struct atom *params = CDR(op); struct atom *body = CDR(params); if (!params || !body || CDR(body)) { printf("error: lambda takes exactly 2 arguments\n"); return &nil_atom; } if (!IS_LIST(params) && !IS_NIL(params)) { printf("error: first arg to lambda must be a list\n"); return &nil_atom; } return atom_new_closure(params, body, env); }
/* * test_list - Do some basic list manipulations and output to log for * script comparison. Only testing the macros we use. */ static void test_list(void) { PTEST_LIST_NODE pNode = NULL; struct TestList head = LIST_HEAD_INITIALIZER(head); LIST_INIT(&head); UT_ASSERT_rt(LIST_EMPTY(&head)); pNode = MALLOC(sizeof(struct TEST_LIST_NODE)); pNode->dummy = 0; LIST_INSERT_HEAD(&head, pNode, ListEntry); UT_ASSERTeq_rt(1, get_list_count(&head)); dump_list(&head); /* Remove one node */ LIST_REMOVE(pNode, ListEntry); UT_ASSERTeq_rt(0, get_list_count(&head)); dump_list(&head); free(pNode); /* Add a bunch of nodes */ for (int i = 1; i < 10; i++) { pNode = MALLOC(sizeof(struct TEST_LIST_NODE)); pNode->dummy = i; LIST_INSERT_HEAD(&head, pNode, ListEntry); } UT_ASSERTeq_rt(9, get_list_count(&head)); dump_list(&head); /* Remove all of them */ while (!LIST_EMPTY(&head)) { pNode = (PTEST_LIST_NODE)LIST_FIRST(&head); LIST_REMOVE(pNode, ListEntry); free(pNode); } UT_ASSERTeq_rt(0, get_list_count(&head)); dump_list(&head); }
/* * RECURSIVE FUNCTION. We need to clear/free any number of levels of nested * layers. */ static void __dbclear_child(ct_entry *parent) { ct_entry *ctp, *nextctp; for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL; ctp = nextctp) { nextctp = LIST_NEXT(ctp, entries); if (ctp->ct_type == 0) continue; if (ctp->ct_parent == parent) { __dbclear_child(ctp); /* * Need to do this here because le_next may * have changed with the recursive call and we * don't want to point to a removed entry. */ nextctp = LIST_NEXT(ctp, entries); __dbclear_ctp(ctp); } } }
/** * Free the list of known pgmounts and re-build the list from * /proc/mounts. * * @param None. * @return None. */ static void pgmounts_read(void) { FILE *file; char line[512]; int i; char *prefix; /* First free old list of mounted file systems. */ while (!LIST_EMPTY(&pgmounts)) { struct pgmount *mnt = LIST_FIRST(&pgmounts); LIST_REMOVE(mnt, next); if (mnt->prefix) free(mnt->prefix); if (mnt->dirfd >= 0) close(mnt->dirfd); free(mnt); } file = fopen("/proc/mounts", "r"); if (!file) return; while (fgets(line, sizeof(line), file)) { i=0; while (line[i] && !isspace(line[i])) i++; if (!isspace(line[i])) continue; i++; prefix = line+i; if (*prefix != '/') continue; while (line[i] && !isspace(line[i])) i++; line[i] = 0; pgmounts_readone(prefix); } fclose(file); }
static void miibus_mediainit(device_t dev) { struct mii_data *mii; struct ifmedia_entry *m; int media = 0; /* Poke the parent in case it has any media of its own to add. */ MIIBUS_MEDIAINIT(device_get_parent(dev)); mii = device_get_softc(dev); for (m = LIST_FIRST(&mii->mii_media.ifm_list); m != NULL; m = LIST_NEXT(m, ifm_list)) { media = m->ifm_media; if (media == (IFM_ETHER|IFM_AUTO)) break; } ifmedia_set(&mii->mii_media, media); return; }
int page_alloc(struct Page **pp) { // Fill this function in struct Page *ppage_temp; ppage_temp = LIST_FIRST(&page_free_list); //printf("%x\n",ppage_temp); //printf("pages__%x\n",ppage_temp); if (ppage_temp != NULL) { *pp = ppage_temp; LIST_REMOVE(ppage_temp, pp_link); page_initpp(*pp); bzero((void *)KADDR(page2pa(ppage_temp)), BY2PG); return 0; } return -E_NO_MEM; }
/* * Mark I/O complete on a buffer. * * If a callback has been requested, e.g. the pageout * daemon, do so. Otherwise, awaken waiting processes. * * [ Leffler, et al., says on p.247: * "This routine wakes up the blocked process, frees the buffer * for an asynchronous write, or, for a request by the pagedaemon * process, invokes a procedure specified in the buffer structure" ] * * In real life, the pagedaemon (or other system processes) wants * to do async stuff to, and doesn't want the buffer brelse()'d. * (for swap pager, that puts swap buffers on the free lists (!!!), * for the vn device, that puts malloc'd buffers on the free lists!) * * Must be called at splbio(). */ void biodone(struct buf *bp) { splassert(IPL_BIO); if (ISSET(bp->b_flags, B_DONE)) panic("biodone already"); SET(bp->b_flags, B_DONE); /* note that it's done */ if (bp->b_bq) bufq_done(bp->b_bq, bp); if (LIST_FIRST(&bp->b_dep) != NULL) buf_complete(bp); if (!ISSET(bp->b_flags, B_READ)) { CLR(bp->b_flags, B_WRITEINPROG); vwakeup(bp->b_vp); } if (bcstats.numbufs && (!(ISSET(bp->b_flags, B_RAW) || ISSET(bp->b_flags, B_PHYS)))) { if (!ISSET(bp->b_flags, B_READ)) bcstats.pendingwrites--; else bcstats.pendingreads--; } if (ISSET(bp->b_flags, B_CALL)) { /* if necessary, call out */ CLR(bp->b_flags, B_CALL); /* but note callout done */ (*bp->b_iodone)(bp); } else { if (ISSET(bp->b_flags, B_ASYNC)) {/* if async, release it */ brelse(bp); } else { /* or just wakeup the buffer */ CLR(bp->b_flags, B_WANTED); wakeup(bp); } } }
/** * Parse a <programme> tag from xmltv */ static int _xmltv_parse_programme (epggrab_module_t *mod, htsmsg_t *body, epggrab_stats_t *stats) { int chsave = 0, save = 0; htsmsg_t *attribs, *tags, *subtag; const char *s, *chid, *icon = NULL; time_t start, stop; epggrab_channel_t *ec; idnode_list_mapping_t *ilm; if(body == NULL) return 0; if((attribs = htsmsg_get_map(body, "attrib")) == NULL) return 0; if((tags = htsmsg_get_map(body, "tags")) == NULL) return 0; if((chid = htsmsg_get_str(attribs, "channel")) == NULL) return 0; if((ec = _xmltv_channel_find(chid, 1, &chsave)) == NULL) return 0; if (chsave) { epggrab_channel_updated(ec); stats->channels.created++; stats->channels.modified++; } if (!LIST_FIRST(&ec->channels)) return 0; if((s = htsmsg_get_str(attribs, "start")) == NULL) return 0; start = _xmltv_str2time(s); if((s = htsmsg_get_str(attribs, "stop")) == NULL) return 0; stop = _xmltv_str2time(s); if((subtag = htsmsg_get_map(tags, "icon")) != NULL && (attribs = htsmsg_get_map(subtag, "attrib")) != NULL) icon = htsmsg_get_str(attribs, "src"); if(stop <= start || stop <= dispatch_clock) return 0; LIST_FOREACH(ilm, &ec->channels, ilm_in2_link) save |= _xmltv_parse_programme_tags(mod, (channel_t *)ilm->ilm_in2, tags, start, stop, icon, stats); return save; }
void lookup_consider_update_cache(vnode_t dvp, vnode_t vp, struct componentname *cnp, int nc_generation) { int isdot_or_dotdot; isdot_or_dotdot = (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') || (cnp->cn_flags & ISDOTDOT); if (vp->v_name == NULL || vp->v_parent == NULLVP) { int update_flags = 0; if (isdot_or_dotdot == 0) { if (vp->v_name == NULL) update_flags |= VNODE_UPDATE_NAME; if (dvp != NULLVP && vp->v_parent == NULLVP) update_flags |= VNODE_UPDATE_PARENT; if (update_flags) vnode_update_identity(vp, dvp, cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, update_flags); } } if ( (cnp->cn_flags & MAKEENTRY) && (vp->v_flag & VNCACHEABLE) && LIST_FIRST(&vp->v_nclinks) == NULL) { /* * missing from name cache, but should * be in it... this can happen if volfs * causes the vnode to be created or the * name cache entry got recycled but the * vnode didn't... * check to make sure that ni_dvp is valid * cache_lookup_path may return a NULL * do a quick check to see if the generation of the * directory matches our snapshot... this will get * rechecked behind the name cache lock, but if it * already fails to match, no need to go any further */ if (dvp != NULLVP && (nc_generation == dvp->v_nc_generation) && (!isdot_or_dotdot)) cache_enter_with_gen(dvp, vp, cnp, nc_generation); } }
/* * The function is called to read encrypted data. * * g_eli_start -> G_ELI_CRYPTO_READ -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver */ void g_eli_crypto_read(struct g_eli_softc *sc, struct bio *bp, boolean_t fromworker) { struct g_consumer *cp; struct bio *cbp; if (!fromworker) { /* * We are not called from the worker thread, so check if * device is suspended. */ mtx_lock(&sc->sc_queue_mtx); if (sc->sc_flags & G_ELI_FLAG_SUSPEND) { /* * If device is suspended, we place the request onto * the queue, so it can be handled after resume. */ G_ELI_DEBUG(0, "device suspended, move onto queue"); bioq_insert_tail(&sc->sc_queue, bp); mtx_unlock(&sc->sc_queue_mtx); wakeup(sc); return; } atomic_add_int(&sc->sc_inflight, 1); mtx_unlock(&sc->sc_queue_mtx); } bp->bio_pflags = 0; bp->bio_driver2 = NULL; cbp = bp->bio_driver1; cbp->bio_done = g_eli_read_done; cp = LIST_FIRST(&sc->sc_geom->consumer); cbp->bio_to = cp->provider; G_ELI_LOGREQ(2, cbp, "Sending request."); /* * Read encrypted data from provider. */ g_io_request(cbp, cp); }
/* * Destroy node */ static int ng_ksocket_shutdown(node_p node) { const priv_p priv = NG_NODE_PRIVATE(node); priv_p embryo; /* Close our socket (if any) */ if (priv->so != NULL) { SOCKBUF_LOCK(&priv->so->so_rcv); priv->so->so_rcv.sb_flags &= ~SB_UPCALL; SOCKBUF_UNLOCK(&priv->so->so_rcv); SOCKBUF_LOCK(&priv->so->so_snd); priv->so->so_snd.sb_flags &= ~SB_UPCALL; SOCKBUF_UNLOCK(&priv->so->so_snd); priv->so->so_upcall = NULL; soclose(priv->so); priv->so = NULL; } /* If we are an embryo, take ourselves out of the parent's list */ if (priv->flags & KSF_EMBRYONIC) { LIST_REMOVE(priv, siblings); priv->flags &= ~KSF_EMBRYONIC; } /* Remove any embryonic children we have */ while (!LIST_EMPTY(&priv->embryos)) { embryo = LIST_FIRST(&priv->embryos); ng_rmnode_self(embryo->node); } /* Take down netgraph node */ bzero(priv, sizeof(*priv)); kfree(priv, M_NETGRAPH); NG_NODE_SET_PRIVATE(node, NULL); NG_NODE_UNREF(node); /* let the node escape */ return (0); }
void ng_hci_unit_clean(ng_hci_unit_p unit, int reason) { int size; /* Drain command queue */ if (unit->state & NG_HCI_UNIT_COMMAND_PENDING) ng_hci_command_untimeout(unit); NG_BT_MBUFQ_DRAIN(&unit->cmdq); NG_HCI_BUFF_CMD_SET(unit->buffer, 1); /* Clean up connection list */ while (!LIST_EMPTY(&unit->con_list)) { ng_hci_unit_con_p con = LIST_FIRST(&unit->con_list); /* Remove all timeouts (if any) */ if (con->flags & NG_HCI_CON_TIMEOUT_PENDING) ng_hci_con_untimeout(con); /* * Notify upper layer protocol and destroy connection * descriptor. Do not really care about the result. */ ng_hci_lp_discon_ind(con, reason); ng_hci_free_con(con); } NG_HCI_BUFF_ACL_TOTAL(unit->buffer, size); NG_HCI_BUFF_ACL_FREE(unit->buffer, size); NG_HCI_BUFF_SCO_TOTAL(unit->buffer, size); NG_HCI_BUFF_SCO_FREE(unit->buffer, size); /* Clean up neighbors list */ ng_hci_flush_neighbor_cache(unit); } /* ng_hci_unit_clean */
/* * This function finds the directory cookie that corresponds to the * logical byte offset given. */ nfsuint64 * ncl_getcookie(struct nfsnode *np, off_t off, int add) { struct nfsdmap *dp, *dp2; int pos; nfsuint64 *retval = NULL; pos = (uoff_t)off / NFS_DIRBLKSIZ; if (pos == 0 || off < 0) { KASSERT(!add, ("nfs getcookie add at <= 0")); return (&nfs_nullcookie); } pos--; dp = LIST_FIRST(&np->n_cookies); if (!dp) { if (add) { MALLOC(dp, struct nfsdmap *, sizeof (struct nfsdmap), M_NFSDIROFF, M_WAITOK); dp->ndm_eocookie = 0; LIST_INSERT_HEAD(&np->n_cookies, dp, ndm_list); } else goto out; }
static void js_fini(void) { js_plugin_t *jsp, *n; JSContext *cx; cx = js_newctx(err_reporter); JS_BeginRequest(cx); for(jsp = LIST_FIRST(&js_plugins); jsp != NULL; jsp = n) { n = LIST_NEXT(jsp, jsp_link); js_plugin_unload(cx, jsp); } JS_RemoveRoot(cx, &showtimeobj); JS_EndRequest(cx); JS_GC(cx); JS_DestroyContext(cx); JS_DestroyRuntime(runtime); JS_ShutDown(); }