void tcp_twclose(struct tcptw *tw, int reuse) { struct socket *so; struct inpcb *inp; /* * At this point, we are in one of two situations: * * (1) We have no socket, just an inpcb<->twtcp pair. We can free * all state. * * (2) We have a socket -- if we own a reference, release it and * notify the socket layer. */ inp = tw->tw_inpcb; //ScenSim-Port// KASSERT((inp->inp_flags & INP_TIMEWAIT), ("tcp_twclose: !timewait")); //ScenSim-Port// KASSERT(intotw(inp) == tw, ("tcp_twclose: inp_ppcb != tw")); //ScenSim-Port// INP_INFO_WLOCK_ASSERT(&V_tcbinfo); /* tcp_tw_2msl_stop(). */ //ScenSim-Port// INP_WLOCK_ASSERT(inp); tw->tw_inpcb = NULL; tcp_tw_2msl_stop(tw); inp->inp_ppcb = NULL; in_pcbdrop(inp); so = inp->inp_socket; if (so != NULL) { /* * If there's a socket, handle two cases: first, we own a * strong reference, which we will now release, or we don't * in which case another reference exists (XXXRW: think * about this more), and we don't need to take action. */ if (inp->inp_flags & INP_SOCKREF) { inp->inp_flags &= ~INP_SOCKREF; //ScenSim-Port// INP_WUNLOCK(inp); //ScenSim-Port// ACCEPT_LOCK(); //ScenSim-Port// SOCK_LOCK(so); //ScenSim-Port// KASSERT(so->so_state & SS_PROTOREF, //ScenSim-Port// ("tcp_twclose: INP_SOCKREF && !SS_PROTOREF")); so->so_state &= ~SS_PROTOREF; sofree(so); } else { /* * If we don't own the only reference, the socket and * inpcb need to be left around to be handled by * tcp_usr_detach() later. */ //ScenSim-Port// INP_WUNLOCK(inp); } } else in_pcbfree(inp); TCPSTAT_INC(tcps_closed); //ScenSim-Port// crfree(tw->tw_cred); //ScenSim-Port// tw->tw_cred = NULL; if (reuse) return; uma_zfree(V_tcptw_zone, tw); }
static void racct_destroy_locked(struct racct **racctp) { int i; struct racct *racct; SDT_PROBE(racct, kernel, racct, destroy, racctp, 0, 0, 0, 0); mtx_assert(&racct_lock, MA_OWNED); KASSERT(racctp != NULL, ("NULL racctp")); KASSERT(*racctp != NULL, ("NULL racct")); racct = *racctp; for (i = 0; i <= RACCT_MAX; i++) { if (RACCT_IS_SLOPPY(i)) continue; if (!RACCT_IS_RECLAIMABLE(i)) continue; KASSERT(racct->r_resources[i] == 0, ("destroying non-empty racct: " "%ju allocated for resource %d\n", racct->r_resources[i], i)); } uma_zfree(racct_zone, racct); *racctp = NULL; }
static void dtsec_rm_fi_free(struct dtsec_softc *sc, struct dtsec_rm_frame_info *fi) { XX_UntrackAddress(fi); uma_zfree(sc->sc_fi_zone, fi); }
/* * m_get2() allocates minimum mbuf that would fit "size" argument. */ struct mbuf * m_get2(int size, int how, short type, int flags) { struct mb_args args; struct mbuf *m, *n; args.flags = flags; args.type = type; if (size <= MHLEN || (size <= MLEN && (flags & M_PKTHDR) == 0)) return (uma_zalloc_arg(zone_mbuf, &args, how)); if (size <= MCLBYTES) return (uma_zalloc_arg(zone_pack, &args, how)); if (size > MJUMPAGESIZE) return (NULL); m = uma_zalloc_arg(zone_mbuf, &args, how); if (m == NULL) return (NULL); n = uma_zalloc_arg(zone_jumbop, m, how); if (n == NULL) { uma_zfree(zone_mbuf, m); return (NULL); } return (m); }
/* * Apply an anonymous mask to a single thread. */ int cpuset_setthread(lwpid_t id, cpuset_t *mask) { struct cpuset *nset; struct cpuset *set; struct thread *td; struct proc *p; int error; nset = uma_zalloc(cpuset_zone, M_WAITOK); error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &set); if (error) goto out; set = NULL; thread_lock(td); error = cpuset_shadow(td->td_cpuset, nset, mask); if (error == 0) { set = td->td_cpuset; td->td_cpuset = nset; sched_affinity(td); nset = NULL; } thread_unlock(td); PROC_UNLOCK(p); if (set) cpuset_rel(set); out: if (nset) uma_zfree(cpuset_zone, nset); return (error); }
/* * m_getjcl() returns an mbuf with a cluster of the specified size attached. * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES. */ struct mbuf * m_getjcl(int how, short type, int flags, int size) { struct mb_args args; struct mbuf *m, *n; uma_zone_t zone; if (size == MCLBYTES) return m_getcl(how, type, flags); args.flags = flags; args.type = type; m = uma_zalloc_arg(zone_mbuf, &args, how); if (m == NULL) return (NULL); zone = m_getzone(size); n = uma_zalloc_arg(zone, m, how); if (n == NULL) { uma_zfree(zone_mbuf, m); return (NULL); } return (m); }
/* * Reclaim an nfsnode so that it can be used for other purposes. */ int ncl_reclaim(struct vop_reclaim_args *ap) { struct vnode *vp = ap->a_vp; struct nfsnode *np = VTONFS(vp); struct nfsdmap *dp, *dp2; /* * If the NLM is running, give it a chance to abort pending * locks. */ if (nfs_reclaim_p != NULL) nfs_reclaim_p(ap); /* * Destroy the vm object and flush associated pages. */ vnode_destroy_vobject(vp); if (NFS_ISV4(vp) && vp->v_type == VREG) /* * We can now safely close any remaining NFSv4 Opens for * this file. Most opens will have already been closed by * ncl_inactive(), but there are cases where it is not * called, so we need to do it again here. */ (void) nfsrpc_close(vp, 1, ap->a_td); vfs_hash_remove(vp); /* * Call nfscl_reclaimnode() to save attributes in the delegation, * as required. */ if (vp->v_type == VREG) nfscl_reclaimnode(vp); /* * Free up any directory cookie structures and * large file handle structures that might be associated with * this nfs node. */ if (vp->v_type == VDIR) { dp = LIST_FIRST(&np->n_cookies); while (dp) { dp2 = dp; dp = LIST_NEXT(dp, ndm_list); FREE((caddr_t)dp2, M_NFSDIROFF); } } if (np->n_writecred != NULL) crfree(np->n_writecred); FREE((caddr_t)np->n_fhp, M_NFSFH); if (np->n_v4 != NULL) FREE((caddr_t)np->n_v4, M_NFSV4NODE); mtx_destroy(&np->n_mtx); uma_zfree(newnfsnode_zone, vp->v_data); vp->v_data = NULL; return (0); }
/* * Complete a deferred release. Removes the set from the list provided to * cpuset_rel_defer. */ static void cpuset_rel_complete(struct cpuset *set) { LIST_REMOVE(set, cs_link); cpuset_rel(set->cs_parent); uma_zfree(cpuset_zone, set); }
/* * Reclaim an nfsnode so that it can be used for other purposes. */ int ncl_reclaim(struct vop_reclaim_args *ap) { struct vnode *vp = ap->a_vp; struct nfsnode *np = VTONFS(vp); struct nfsdmap *dp, *dp2; if (NFS_ISV4(vp) && vp->v_type == VREG) /* * Since mmap()'d files do I/O after VOP_CLOSE(), the NFSv4 * Close operations are delayed until ncl_inactive(). * However, since VOP_INACTIVE() is not guaranteed to be * called, we need to do it again here. */ (void) nfsrpc_close(vp, 1, ap->a_td); /* * If the NLM is running, give it a chance to abort pending * locks. */ if (nfs_reclaim_p != NULL) nfs_reclaim_p(ap); /* * Destroy the vm object and flush associated pages. */ vnode_destroy_vobject(vp); vfs_hash_remove(vp); /* * Call nfscl_reclaimnode() to save attributes in the delegation, * as required. */ if (vp->v_type == VREG) nfscl_reclaimnode(vp); /* * Free up any directory cookie structures and * large file handle structures that might be associated with * this nfs node. */ if (vp->v_type == VDIR) { dp = LIST_FIRST(&np->n_cookies); while (dp) { dp2 = dp; dp = LIST_NEXT(dp, ndm_list); FREE((caddr_t)dp2, M_NFSDIROFF); } } if (np->n_writecred != NULL) crfree(np->n_writecred); FREE((caddr_t)np->n_fhp, M_NFSFH); if (np->n_v4 != NULL) FREE((caddr_t)np->n_v4, M_NFSV4NODE); mtx_destroy(&np->n_mtx); uma_zfree(newnfsnode_zone, vp->v_data); vp->v_data = NULL; return (0); }
static void syncache_free(struct syncache *sc) { if (sc->sc_ipopts) (void) m_free(sc->sc_ipopts); uma_zfree(tcp_syncache.zone, sc); }
static void cache_free(struct namecache *ncp) { int ts; if (ncp == NULL) return; ts = ncp->nc_flag & NCF_TS; if (ncp->nc_nlen <= CACHE_PATH_CUTOFF) { if (ts) uma_zfree(cache_zone_small_ts, ncp); else uma_zfree(cache_zone_small, ncp); } else if (ts) uma_zfree(cache_zone_large_ts, ncp); else uma_zfree(cache_zone_large, ncp); }
/** * @group dTSEC buffer pools routines. * @{ */ static t_Error dtsec_rm_pool_rx_put_buffer(t_Handle h_BufferPool, uint8_t *buffer, t_Handle context) { struct dtsec_softc *sc; sc = h_BufferPool; uma_zfree(sc->sc_rx_zone, buffer); return (E_OK); }
static __inline void pefs_node_free(struct pefs_node *pn) { struct vnode *lowervp; lowervp = pn->pn_lowervp_dead; uma_zfree(pefs_node_zone, pn); if (lowervp != NULL) { vrele(lowervp); } }
static void icl_soft_conn_pdu_free(struct icl_conn *ic, struct icl_pdu *ip) { m_freem(ip->ip_bhs_mbuf); m_freem(ip->ip_ahs_mbuf); m_freem(ip->ip_data_mbuf); uma_zfree(icl_pdu_zone, ip); #ifdef DIAGNOSTIC refcount_release(&ic->ic_outstanding_pdus); #endif }
/* * Free struct sackhole. */ static void tcp_sackhole_free(struct tcpcb *tp, struct sackhole *hole) { uma_zfree(V_sack_hole_zone, hole); tp->snd_numholes--; atomic_subtract_int(&V_tcp_sack_globalholes, 1); //ScenSim-Port// KASSERT(tp->snd_numholes >= 0, ("tp->snd_numholes >= 0")); //ScenSim-Port// KASSERT(V_tcp_sack_globalholes >= 0, ("tcp_sack_globalholes >= 0")); }
static void netmap_default_mbuf_destructor(struct mbuf *m) { /* restore original mbuf */ m->m_ext.ext_buf = m->m_data = m->m_ext.ext_arg1; m->m_ext.ext_arg1 = NULL; m->m_ext.ext_type = EXT_PACKET; m->m_ext.ext_free = NULL; if (MBUF_REFCNT(m) == 0) SET_MBUF_REFCNT(m, 1); uma_zfree(zone_pack, m); }
//ScenSim-Port//static void //ScenSim-Port//ertt_uma_dtor(void *mem, int size, void *arg) void ertt_uma_dtor(struct ertt *e_t) //ScenSim-Port// { //ScenSim-Port// struct ertt *e_t; struct txseginfo *n_txsi, *txsi; //ScenSim-Port// e_t = mem; txsi = TAILQ_FIRST(&e_t->txsegi_q); while (txsi != NULL) { n_txsi = TAILQ_NEXT(txsi, txsegi_lnk); uma_zfree(txseginfo_zone, txsi); txsi = n_txsi; } }
void icl_pdu_free(struct icl_pdu *ip) { struct icl_conn *ic; ic = ip->ip_conn; m_freem(ip->ip_bhs_mbuf); m_freem(ip->ip_ahs_mbuf); m_freem(ip->ip_data_mbuf); uma_zfree(icl_pdu_zone, ip); refcount_release(&ic->ic_outstanding_pdus); }
static void ertt_uma_dtor(void *mem, int size, void *arg) { struct ertt *e_t; struct txseginfo *n_txsi, *txsi; e_t = mem; txsi = TAILQ_FIRST(&e_t->txsegi_q); while (txsi != NULL) { n_txsi = TAILQ_NEXT(txsi, txsegi_lnk); uma_zfree(txseginfo_zone, txsi); txsi = n_txsi; } }
/* * VCC has been finally closed. */ void patm_vcc_closed(struct patm_softc *sc, struct patm_vcc *vcc) { /* inform management about non-NG and NG-PVCs */ if (!(vcc->vcc.flags & ATMIO_FLAG_NG) || (vcc->vcc.flags & ATMIO_FLAG_PVC)) ATMEV_SEND_VCC_CHANGED(IFP2IFATM(sc->ifp), vcc->vcc.vpi, vcc->vcc.vci, 0); sc->vccs_open--; sc->vccs[vcc->cid] = NULL; uma_zfree(sc->vcc_zone, vcc); }
static int void_mbuf_dtor(struct mbuf *m, void *arg1, void *arg2) { /* restore original mbuf */ m->m_ext.ext_buf = m->m_data = m->m_ext.ext_arg1; m->m_ext.ext_arg1 = NULL; m->m_ext.ext_type = EXT_PACKET; m->m_ext.ext_free = NULL; if (MBUF_REFCNT(m) == 0) SET_MBUF_REFCNT(m, 1); uma_zfree(zone_pack, m); return 0; }
/* * VCC has been finally closed. */ void hatm_vcc_closed(struct hatm_softc *sc, u_int cid) { struct hevcc *vcc = sc->vccs[cid]; /* inform management about non-NG and NG-PVCs */ if (!(vcc->param.flags & ATMIO_FLAG_NG) || (vcc->param.flags & ATMIO_FLAG_PVC)) ATMEV_SEND_VCC_CHANGED(IFP2IFATM(sc->ifp), HE_VPI(cid), HE_VCI(cid), 0); sc->open_vccs--; uma_zfree(sc->vcc_zone, vcc); sc->vccs[cid] = NULL; }
static void pefs_insmntque_dtr(struct vnode *vp, void *_pn) { struct pefs_node *pn = _pn; PEFSDEBUG("pefs_insmntque_dtr: free node %p\n", pn); vp->v_data = NULL; vp->v_vnlock = &vp->v_lock; pefs_key_release(pn->pn_tkey.ptk_key); uma_zfree(pefs_node_zone, pn); vp->v_op = &dead_vnodeops; vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); vgone(vp); vput(vp); }
void ttyoutq_free(struct ttyoutq *to) { struct ttyoutq_block *tob; ttyoutq_flush(to); to->to_quota = 0; while ((tob = to->to_firstblock) != NULL) { TTYOUTQ_REMOVE_HEAD(to); uma_zfree(ttyoutq_zone, tob); } MPASS(to->to_nblocks == 0); }
void ttyinq_free(struct ttyinq *ti) { struct ttyinq_block *tib; ttyinq_flush(ti); ti->ti_quota = 0; while ((tib = ti->ti_firstblock) != NULL) { TTYINQ_REMOVE_HEAD(ti); uma_zfree(ttyinq_zone, tib); } MPASS(ti->ti_nblocks == 0); }
static void dircache_entry_free(struct pefs_dircache_entry *pde) { MPASS(pde != NULL); PEFSDEBUG("dircache_entry_free: %s -> %s\n", pde->pde_name, pde->pde_encname); pefs_key_release(pde->pde_tkey.ptk_key); LIST_REMOVE(pde, pde_dir_entry); mtx_lock(&dircache_mtx); LIST_REMOVE(pde, pde_hash_entry); LIST_REMOVE(pde, pde_enchash_entry); dircache_entries--; mtx_unlock(&dircache_mtx); uma_zfree(dircache_entry_zone, pde); }
/* * Release a reference in a context where it is safe to allocate. */ void cpuset_rel(struct cpuset *set) { cpusetid_t id; if (refcount_release(&set->cs_ref) == 0) return; mtx_lock_spin(&cpuset_lock); LIST_REMOVE(set, cs_siblings); id = set->cs_id; if (id != CPUSET_INVALID) LIST_REMOVE(set, cs_link); mtx_unlock_spin(&cpuset_lock); cpuset_rel(set->cs_parent); uma_zfree(cpuset_zone, set); if (id != CPUSET_INVALID) free_unr(cpuset_unr, id); }
static struct mbuf *sfxge_rx_alloc_mbuf(struct sfxge_softc *sc) { struct mb_args args; struct mbuf *m; /* Allocate mbuf structure */ args.flags = M_PKTHDR; args.type = MT_DATA; m = (struct mbuf *)uma_zalloc_arg(zone_mbuf, &args, M_NOWAIT); /* Allocate (and attach) packet buffer */ if (m != NULL && !uma_zalloc_arg(sc->rx_buffer_zone, m, M_NOWAIT)) { uma_zfree(zone_mbuf, m); m = NULL; } return (m); }
/* * Reclaim an nfsnode so that it can be used for other purposes. */ int nfs_reclaim(struct vop_reclaim_args *ap) { struct vnode *vp = ap->a_vp; struct nfsnode *np = VTONFS(vp); struct nfsdmap *dp, *dp2; /* * If the NLM is running, give it a chance to abort pending * locks. */ if (nfs_reclaim_p) nfs_reclaim_p(ap); /* * Destroy the vm object and flush associated pages. */ vnode_destroy_vobject(vp); vfs_hash_remove(vp); /* * Free up any directory cookie structures and * large file handle structures that might be associated with * this nfs node. */ if (vp->v_type == VDIR) { dp = LIST_FIRST(&np->n_cookies); while (dp) { dp2 = dp; dp = LIST_NEXT(dp, ndm_list); free((caddr_t)dp2, M_NFSDIROFF); } } if (np->n_writecred != NULL) crfree(np->n_writecred); if (np->n_fhsize > NFS_SMALLFH) { free((caddr_t)np->n_fhp, M_NFSBIGFH); } mtx_destroy(&np->n_mtx); uma_zfree(nfsnode_zone, vp->v_data); vp->v_data = NULL; return (0); }
void pefs_dircache_free(struct pefs_dircache *pd) { struct pefs_dircache_entry *pde; if (pd == NULL) return; while (!LIST_EMPTY(DIRCACHE_STALEHEAD(pd))) { pde = LIST_FIRST(DIRCACHE_STALEHEAD(pd)); dircache_entry_free(pde); } while (!LIST_EMPTY(DIRCACHE_ACTIVEHEAD(pd))) { pde = LIST_FIRST(DIRCACHE_ACTIVEHEAD(pd)); dircache_entry_free(pde); } sx_destroy(&pd->pd_lock); uma_zfree(dircache_zone, pd); }