int alq_open_flags(struct alq **alqp, const char *file, struct ucred *cred, int cmode, int size, int flags) { struct thread *td; struct nameidata nd; struct alq *alq; int oflags; int error; int vfslocked; KASSERT((size > 0), ("%s: size <= 0", __func__)); *alqp = NULL; td = curthread; NDINIT(&nd, LOOKUP, NOFOLLOW | MPSAFE, UIO_SYSSPACE, file, td); oflags = FWRITE | O_NOFOLLOW | O_CREAT; error = vn_open_cred(&nd, &oflags, cmode, 0, cred, NULL); if (error) return (error); vfslocked = NDHASGIANT(&nd); NDFREE(&nd, NDF_ONLY_PNBUF); /* We just unlock so we hold a reference */ VOP_UNLOCK(nd.ni_vp, 0); VFS_UNLOCK_GIANT(vfslocked); alq = bsd_malloc(sizeof(*alq), M_ALD, M_WAITOK|M_ZERO); alq->aq_vp = nd.ni_vp; alq->aq_cred = crhold(cred); mtx_init(&alq->aq_mtx, "ALD Queue", NULL, MTX_SPIN|MTX_QUIET); alq->aq_buflen = size; alq->aq_entmax = 0; alq->aq_entlen = 0; alq->aq_freebytes = alq->aq_buflen; alq->aq_entbuf = bsd_malloc(alq->aq_buflen, M_ALD, M_WAITOK|M_ZERO); alq->aq_writehead = alq->aq_writetail = 0; if (flags & ALQ_ORDERED) alq->aq_flags |= AQ_ORDERED; if ((error = ald_add(alq)) != 0) { alq_destroy(alq); return (error); } *alqp = alq; return (0); }
int do_getopt_accept_filter(struct socket *so, struct sockopt *sopt) { struct accept_filter_arg *afap; int error; error = 0; afap = bsd_malloc(sizeof(*afap), M_TEMP, M_WAITOK | M_ZERO); SOCK_LOCK(so); if ((so->so_options & SO_ACCEPTCONN) == 0) { error = EINVAL; goto out; } if ((so->so_options & SO_ACCEPTFILTER) == 0) { error = EINVAL; goto out; } strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name); if (so->so_accf->so_accept_filter_str != NULL) strcpy(afap->af_arg, so->so_accf->so_accept_filter_str); out: SOCK_UNLOCK(so); if (error == 0) error = sooptcopyout(sopt, afap, sizeof(*afap)); bsd_free(afap, M_TEMP); return (error); }
/* * Allocate or resize buffers. */ int bpf_buffer_ioctl_sblen(struct bpf_d *d, u_int *i) { u_int size; caddr_t fbuf, sbuf; size = *i; if (size > bpf_maxbufsize) *i = size = bpf_maxbufsize; else if (size < BPF_MINBUFSIZE) *i = size = BPF_MINBUFSIZE; /* Allocate buffers immediately */ fbuf = (caddr_t)bsd_malloc(size, M_BPF, M_WAITOK); sbuf = (caddr_t)bsd_malloc(size, M_BPF, M_WAITOK); BPFD_LOCK(d); if (d->bd_bif != NULL) { /* Interface already attached, unable to change buffers */ BPFD_UNLOCK(d); bsd_free(fbuf, M_BPF); bsd_free(sbuf, M_BPF); return (EINVAL); } while (d->bd_hbuf_in_use) mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock, PRINET, "bd_hbuf", 0); /* Free old buffers if set */ if (d->bd_fbuf != NULL) bsd_free(d->bd_fbuf, M_BPF); if (d->bd_sbuf != NULL) bsd_free(d->bd_sbuf, M_BPF); /* Fill in new data */ d->bd_bufsize = size; d->bd_fbuf = fbuf; d->bd_sbuf = sbuf; d->bd_hbuf = NULL; d->bd_slen = 0; d->bd_hlen = 0; BPFD_UNLOCK(d); return (0); }
static int ext2_mountroot() { #if !defined(__FreeBSD__) extern struct vnode *rootvp; #endif register struct ext2_sb_info *fs; register struct mount *mp; #if defined(__FreeBSD__) struct proc *p = curproc; #else struct proc *p = get_proc(); /* XXX */ #endif struct ufsmount *ump; u_int size; int error; /* * Get vnodes for swapdev and rootdev. */ if (bdevvp(swapdev, &swapdev_vp) || bdevvp(rootdev, &rootvp)) panic("ext2_mountroot: can't setup bdevvp's"); mp = bsd_malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK); bzero((char *)mp, (u_long)sizeof(struct mount)); mp->mnt_op = &ext2fs_vfsops; mp->mnt_flag = MNT_RDONLY; if (error = ext2_mountfs(rootvp, mp, p)) { bsd_free(mp, M_MOUNT); return (error); } if (error = vfs_lock(mp)) { (void)ext2_unmount(mp, 0, p); bsd_free(mp, M_MOUNT); return (error); } #if defined(__FreeBSD__) CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list); #else TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); #endif mp->mnt_flag |= MNT_ROOTFS; mp->mnt_vnodecovered = NULLVP; ump = VFSTOUFS(mp); fs = ump->um_e2fs; bzero(fs->fs_fsmnt, sizeof(fs->fs_fsmnt)); fs->fs_fsmnt[0] = '/'; bcopy((caddr_t)fs->fs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname, MNAMELEN); (void) copystr(ROOTNAME, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size); bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); (void)ext2_statfs(mp, &mp->mnt_stat, p); vfs_unlock(mp); inittodr(fs->s_es->s_wtime); /* this helps to set the time */ return (0); }
/* * Setup a DMA channel's bounce buffer. */ int isa_dma_init(int chan, u_int bouncebufsize, int flag) { void *buf; int contig; #ifdef DIAGNOSTIC if (chan & ~VALID_DMA_MASK) panic("isa_dma_init: channel out of range"); #endif /* Try malloc() first. It works better if it works. */ buf = bsd_malloc(bouncebufsize, M_DEVBUF, flag); if (buf != NULL) { if (isa_dmarangecheck(buf, bouncebufsize, chan) != 0) { bsd_free(buf, M_DEVBUF); buf = NULL; } contig = 0; } if (buf == NULL) { buf = contigmalloc(bouncebufsize, M_DEVBUF, flag, 0ul, 0xfffffful, 1ul, chan & 4 ? 0x20000ul : 0x10000ul); contig = 1; } if (buf == NULL) return (ENOMEM); mtx_lock(&isa_dma_lock); /* * If a DMA channel is shared, both drivers have to call isa_dma_init * since they don't know that the other driver will do it. * Just return if we're already set up good. * XXX: this only works if they agree on the bouncebuf size. This * XXX: is typically the case since they are multiple instances of * XXX: the same driver. */ if (dma_bouncebuf[chan] != NULL) { if (contig) contigfree(buf, bouncebufsize, M_DEVBUF); else bsd_free(buf, M_DEVBUF); mtx_unlock(&isa_dma_lock); return (0); } dma_bouncebufsize[chan] = bouncebufsize; dma_bouncebuf[chan] = buf; mtx_unlock(&isa_dma_lock); return (0); }
char * strdup(const char *string, struct malloc_type *type) { size_t len; char *copy; len = strlen(string) + 1; copy = bsd_malloc(len, type, M_WAITOK); bcopy(string, copy, len); return (copy); }
static int stf_clone_create(struct if_clone *ifc, char *name, size_t len, caddr_t params) { int err, unit; struct stf_softc *sc; struct ifnet *ifp; /* * We can only have one unit, but since unit allocation is * already locked, we use it to keep from allocating extra * interfaces. */ unit = STFUNIT; err = ifc_alloc_unit(ifc, &unit); if (err != 0) return (err); sc = bsd_malloc(sizeof(struct stf_softc), M_STF, M_WAITOK | M_ZERO); ifp = STF2IFP(sc) = if_alloc(IFT_STF); if (ifp == NULL) { bsd_free(sc, M_STF); ifc_free_unit(ifc, unit); return (ENOSPC); } ifp->if_softc = sc; sc->sc_fibnum = curthread->td_proc->p_fibnum; /* * Set the name manually rather then using if_initname because * we don't conform to the default naming convention for interfaces. */ strlcpy(ifp->if_xname, name, IFNAMSIZ); ifp->if_dname = ifc->ifc_name; ifp->if_dunit = IF_DUNIT_NONE; mtx_init(&(sc)->sc_ro_mtx, "stf ro", NULL, MTX_DEF); sc->encap_cookie = encap_attach_func(AF_INET, IPPROTO_IPV6, stf_encapcheck, &in_stf_protosw, sc); if (sc->encap_cookie == NULL) { if_printf(ifp, "attach failed\n"); bsd_free(sc, M_STF); ifc_free_unit(ifc, unit); return (ENOMEM); } ifp->if_mtu = IPV6_MMTU; ifp->if_ioctl = stf_ioctl; ifp->if_output = stf_output; ifp->if_snd.ifq_maxlen = ifqmaxlen; if_attach(ifp); bpfattach(ifp, DLT_NULL, sizeof(u_int32_t)); return (0); }
void * alloc_fpusave(int flags) { struct pcb *res; struct savefpu_ymm *sf; res = bsd_malloc(cpu_max_ext_state_size, M_DEVBUF, flags); if (use_xsave) { sf = (struct savefpu_ymm *)res; bzero(&sf->sv_xstate.sx_hd, sizeof(sf->sv_xstate.sx_hd)); sf->sv_xstate.sx_hd.xstate_bv = xsave_mask; } return (res); }
static int ext2_mountroot() { register struct ext2_sb_info *fs; register struct mount *mp; struct proc *p = curproc; struct ufsmount *ump; u_int size; int error; if ((error = bdevvp(rootdev, &rootvp))) { printf("ext2_mountroot: can't find rootvp\n"); return (error); } mp = bsd_malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK); bzero((char *)mp, (u_long)sizeof(struct mount)); mp->mnt_op = &ext2fs_vfsops; mp->mnt_flag = MNT_RDONLY; if (error = ext2_mountfs(rootvp, mp, p)) { bsd_free(mp, M_MOUNT); return (error); } if (error = vfs_lock(mp)) { (void)ext2_unmount(mp, 0, p); bsd_free(mp, M_MOUNT); return (error); } TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list); mp->mnt_flag |= MNT_ROOTFS; mp->mnt_vnodecovered = NULLVP; ump = VFSTOUFS(mp); fs = ump->um_e2fs; bzero(fs->fs_fsmnt, sizeof(fs->fs_fsmnt)); fs->fs_fsmnt[0] = '/'; bcopy((caddr_t)fs->fs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname, MNAMELEN); (void) copystr(ROOTNAME, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size); bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); (void)ext2_statfs(mp, &mp->mnt_stat, p); vfs_unlock(mp); inittodr(fs->s_es->s_wtime); /* this helps to set the time */ return (0); }
static void msi_create_source(void) { struct msi_intsrc *msi; u_int irq; mtx_lock(&msi_lock); if (msi_last_irq >= NUM_MSI_INTS) { mtx_unlock(&msi_lock); return; } irq = msi_last_irq + FIRST_MSI_INT; msi_last_irq++; mtx_unlock(&msi_lock); msi = bsd_malloc(sizeof(struct msi_intsrc), M_MSI, M_WAITOK | M_ZERO); msi->msi_intsrc.is_pic = &msi_pic; msi->msi_irq = irq; intr_register_source(&msi->msi_intsrc); nexus_add_irq(irq); }
static device_t cpu_add_child(device_t bus, u_int order, const char *name, int unit) { struct cpu_device *cd; device_t child; struct pcpu *pc; if ((cd = bsd_malloc(sizeof(*cd), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) return (NULL); resource_list_init(&cd->cd_rl); pc = pcpu_find(device_get_unit(bus)); cd->cd_pcpu = pc; child = device_add_child_ordered(bus, order, name, unit); if (child != NULL) { pc->pc_device = child; device_set_ivars(child, cd); } else bsd_free(cd, M_DEVBUF); return (child); }
/* * Look for a PCI bus with the specified bus address. If one is found, * add a pcib device and return 0. Otherwise, return an error code. */ static int qpi_probe_pcib(device_t dev, int bus) { struct qpi_device *qdev; device_t child; uint32_t devid; /* * If a PCI bus already exists for this bus number, then * fail. */ if (pci_find_bsf(bus, 0, 0) != NULL) return (EEXIST); /* * Attempt to read the device id for device 0, function 0 on * the bus. A value of 0xffffffff means that the bus is not * present. */ devid = pci_cfgregread(bus, 0, 0, PCIR_DEVVENDOR, 4); if (devid == 0xffffffff) return (ENOENT); if ((devid & 0xffff) != 0x8086) { device_printf(dev, "Device at pci%d.0.0 has non-Intel vendor 0x%x\n", bus, devid & 0xffff); return (ENXIO); } child = BUS_ADD_CHILD(dev, 0, "pcib", -1); if (child == NULL) panic("%s: failed to add pci bus %d", device_get_nameunit(dev), bus); qdev = bsd_malloc(sizeof(struct qpi_device), M_QPI, M_WAITOK); qdev->qd_pcibus = bus; device_set_ivars(child, qdev); return (0); }
int accept_filt_generic_mod_event(module_t mod, int event, void *data) { struct accept_filter *p; struct accept_filter *accfp = (struct accept_filter *) data; int error; switch (event) { case MOD_LOAD: p = bsd_malloc(sizeof(*p), M_ACCF, M_WAITOK); bcopy(accfp, p, sizeof(*p)); error = accept_filt_add(p); break; case MOD_UNLOAD: /* * Do not support unloading yet. we don't keep track of * refcounts and unloading an accept filter callback and then * having it called is a bad thing. A simple fix would be to * track the refcount in the struct accept_filter. */ if (unloadable != 0) { error = accept_filt_del(accfp->accf_name); } else error = EOPNOTSUPP; break; case MOD_SHUTDOWN: error = 0; break; default: error = EOPNOTSUPP; break; } return (error); }
static device_t legacy_add_child(device_t bus, u_int order, const char *name, int unit) { device_t child; struct legacy_device *atdev; atdev = bsd_malloc(sizeof(struct legacy_device), M_LEGACYDEV, M_NOWAIT | M_ZERO); if (atdev == NULL) return(NULL); atdev->lg_pcibus = -1; atdev->lg_pcislot = -1; atdev->lg_pcifunc = -1; child = device_add_child_ordered(bus, order, name, unit); if (child == NULL) bsd_free(atdev, M_LEGACYDEV); else /* should we free this in legacy_child_detached? */ device_set_ivars(child, atdev); return (child); }
/* * Try to allocate 'count' interrupt sources with contiguous IDT values. */ int msi_alloc(device_t dev, int count, int maxcount, int *irqs) { struct msi_intsrc *msi, *fsrc; u_int cpu; int cnt, i, *mirqs, vector; if (!msi_enabled) return (ENXIO); if (count > 1) mirqs = bsd_malloc(count * sizeof(*mirqs), M_MSI, M_WAITOK); else mirqs = NULL; again: mtx_lock(&msi_lock); /* Try to find 'count' free IRQs. */ cnt = 0; for (i = FIRST_MSI_INT; i < FIRST_MSI_INT + NUM_MSI_INTS; i++) { msi = (struct msi_intsrc *)intr_lookup_source(i); /* End of allocated sources, so break. */ if (msi == NULL) break; /* If this is a free one, save its IRQ in the array. */ if (msi->msi_dev == NULL) { irqs[cnt] = i; cnt++; if (cnt == count) break; } } /* Do we need to create some new sources? */ if (cnt < count) { /* If we would exceed the max, give up. */ if (i + (count - cnt) > FIRST_MSI_INT + NUM_MSI_INTS) { mtx_unlock(&msi_lock); bsd_free(mirqs, M_MSI); return (ENXIO); } mtx_unlock(&msi_lock); /* We need count - cnt more sources. */ while (cnt < count) { msi_create_source(); cnt++; } goto again; } /* Ok, we now have the IRQs allocated. */ KASSERT(cnt == count, ("count mismatch")); /* Allocate 'count' IDT vectors. */ cpu = intr_next_cpu(); vector = apic_alloc_vectors(cpu, irqs, count, maxcount); if (vector == 0) { mtx_unlock(&msi_lock); bsd_free(mirqs, M_MSI); return (ENOSPC); } /* Assign IDT vectors and make these messages owned by 'dev'. */ fsrc = (struct msi_intsrc *)intr_lookup_source(irqs[0]); for (i = 0; i < count; i++) { msi = (struct msi_intsrc *)intr_lookup_source(irqs[i]); msi->msi_cpu = cpu; msi->msi_dev = dev; msi->msi_vector = vector + i; if (bootverbose) printf( "msi: routing MSI IRQ %d to local APIC %u vector %u\n", msi->msi_irq, msi->msi_cpu, msi->msi_vector); msi->msi_first = fsrc; KASSERT(msi->msi_intsrc.is_handlers == 0, ("dead MSI has handlers")); } fsrc->msi_count = count; fsrc->msi_maxcount = maxcount; if (count > 1) bcopy(irqs, mirqs, count * sizeof(*mirqs)); fsrc->msi_irqs = mirqs; mtx_unlock(&msi_lock); return (0); }
struct ifnet * iface_make(struct ssconfig *ifc) { register struct sana_softc *ssc = NULL; register struct IOSana2Req *req; struct Sana2DeviceQuery devicequery; /* Allocate the request for opening the device */ if ((req = CreateIOSana2Req(NULL)) == NULL) { __log(LOG_ERR, "iface_find(): CreateIOSana2Req failed\n"); } else { req->ios2_BufferManagement = buffermanagement; DSANA(__log(LOG_DEBUG,"Opening device %s unit %ld", ifc->args->a_dev, *ifc->args->a_unit);) if (OpenDevice(ifc->args->a_dev, *ifc->args->a_unit, (struct IORequest *)req, 0L)) { sana2perror("OpenDevice", req); /* Allocate the interface structure */ ssc = (struct sana_softc *) bsd_malloc(sizeof(*ssc) + strlen(ifc->args->a_dev) + 1, M_IFNET, M_WAITOK); if (!ssc) { __log(LOG_ERR, "iface_find: out of memory\n"); } else { aligned_bzero_const(ssc, sizeof(*ssc)); /* Save request pointers */ ssc->ss_dev = req->ios2_Req.io_Device; ssc->ss_unit = req->ios2_Req.io_Unit; ssc->ss_if.if_type = IFT_OTHER; ssc->ss_if.if_flags &= ~(IFF_RUNNING|IFF_UP); /* Initialize */ D(bug("[AROSTCP] if_sana.c: iface_make: Current IP from config = %s\n", ifc->args[0].a_ip)); ifc->args[0].a_ip = "0.0.0.0"; D(bug("[AROSTCP] if_sana.c: iface_make: IP set to 0.0.0.0\n")); ssconfig(ssc, ifc); NewList((struct List*)&ssc->ss_freereq); if_attach((struct ifnet*)ssc); ifinit(); ssc->ss_next = ssq; ssq = ssc; } } else { /* Ask for our type, address length, MTU * Obl. bitch: nobody tells, WHO is supplying * DevQueryFormat and DeviceLevel */ req->ios2_Req.io_Command = S2_DEVICEQUERY; req->ios2_StatData = &devicequery; devicequery.SizeAvailable = sizeof(devicequery); devicequery.DevQueryFormat = 0L; DoIO((struct IORequest *)req); if (req->ios2_Req.io_Error) { sana2perror("S2_DEVICEQUERY", req); } else { /* Get Our Station address */ req->ios2_StatData = NULL; req->ios2_Req.io_Command = S2_GETSTATIONADDRESS; DoIO((struct IORequest *)req); if (req->ios2_Req.io_Error) { sana2perror("S2_GETSTATIONADDRESS", req); } else { req->ios2_Req.io_Command = 0; /* Allocate the interface structure */ ssc = (struct sana_softc *) bsd_malloc(sizeof(*ssc) + strlen(ifc->args->a_dev) + 1, M_IFNET, M_WAITOK); if (!ssc) { __log(LOG_ERR, "iface_find: out of memory\n"); } else { aligned_bzero_const(ssc, sizeof(*ssc)); /* Save request pointers */ ssc->ss_dev = req->ios2_Req.io_Device; ssc->ss_unit = req->ios2_Req.io_Unit; ssc->ss_bufmgnt = req->ios2_BufferManagement; /* Address must be full bytes */ ssc->ss_if.if_addrlen = (devicequery.AddrFieldSize + 7) >> 3; bcopy(req->ios2_DstAddr, ssc->ss_hwaddr, ssc->ss_if.if_addrlen); ssc->ss_if.if_mtu = devicequery.MTU; ssc->ss_maxmtu = devicequery.MTU; ssc->ss_if.if_baudrate = devicequery.BPS; ssc->ss_hwtype = devicequery.HardwareType; /* These might be different on different hwtypes */ ssc->ss_if.if_output = sana_output; ssc->ss_if.if_ioctl = sana_ioctl; ssc->ss_if.if_query = sana_query; /* Map SANA-II hardware types to RFC1573 standard */ switch (ssc->ss_hwtype) { case S2WireType_Ethernet: ssc->ss_if.if_type = IFT_ETHER; break; case S2WireType_IEEE802: ssc->ss_if.if_type = IFT_IEEE80211; break; case S2WireType_Arcnet: ssc->ss_if.if_type = IFT_ARCNET; break; case S2WireType_LocalTalk: ssc->ss_if.if_type = IFT_LOCALTALK; break; case S2WireType_PPP: ssc->ss_if.if_type = IFT_PPP; break; case S2WireType_SLIP: case S2WireType_CSLIP: ssc->ss_if.if_type = IFT_SLIP; break; case S2WireType_PLIP: ssc->ss_if.if_type = IFT_PARA; break; default: ssc->ss_if.if_type = IFT_OTHER; } /* Initialize */ ssconfig(ssc, ifc); NewList((struct List*)&ssc->ss_freereq); if_attach((struct ifnet*)ssc); ifinit(); ssc->ss_next = ssq; ssq = ssc; } } } if (!ssc) CloseDevice((struct IORequest *)req); } DeleteIOSana2Req(req); }
int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt) { struct accept_filter_arg *afap; struct accept_filter *afp; struct so_accf *newaf; int error = 0; /* * Handle the simple delete case first. */ if (sopt == NULL || sopt->sopt_val == NULL) { SOCK_LOCK(so); if ((so->so_options & SO_ACCEPTCONN) == 0) { SOCK_UNLOCK(so); return (EINVAL); } if (so->so_accf != NULL) { struct so_accf *af = so->so_accf; if (af->so_accept_filter != NULL && af->so_accept_filter->accf_destroy != NULL) { af->so_accept_filter->accf_destroy(so); } if (af->so_accept_filter_str != NULL) bsd_free(af->so_accept_filter_str, M_ACCF); bsd_free(af, M_ACCF); so->so_accf = NULL; } so->so_options &= ~SO_ACCEPTFILTER; SOCK_UNLOCK(so); return (0); } /* * Pre-allocate any memory we may need later to avoid blocking at * untimely moments. This does not optimize for invalid arguments. */ afap = bsd_malloc(sizeof(*afap), M_TEMP, M_WAITOK); error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap); afap->af_name[sizeof(afap->af_name)-1] = '\0'; afap->af_arg[sizeof(afap->af_arg)-1] = '\0'; if (error) { bsd_free(afap, M_TEMP); return (error); } afp = accept_filt_get(afap->af_name); if (afp == NULL) { bsd_free(afap, M_TEMP); return (ENOENT); } /* * Allocate the new accept filter instance storage. We may * have to free it again later if we fail to attach it. If * attached properly, 'newaf' is NULLed to avoid a free() * while in use. */ newaf = bsd_malloc(sizeof(*newaf), M_ACCF, M_WAITOK | M_ZERO); if (afp->accf_create != NULL && afap->af_name[0] != '\0') { int len = strlen(afap->af_name) + 1; newaf->so_accept_filter_str = bsd_malloc(len, M_ACCF, M_WAITOK); strcpy(newaf->so_accept_filter_str, afap->af_name); } /* * Require a listen socket; don't try to replace an existing filter * without first removing it. */ SOCK_LOCK(so); if (((so->so_options & SO_ACCEPTCONN) == 0) || (so->so_accf != NULL)) { error = EINVAL; goto out; } /* * Invoke the accf_create() method of the filter if required. The * socket mutex is held over this call, so create methods for filters * can't block. */ if (afp->accf_create != NULL) { newaf->so_accept_filter_arg = afp->accf_create(so, afap->af_arg); if (newaf->so_accept_filter_arg == NULL) { error = EINVAL; goto out; } } newaf->so_accept_filter = afp; so->so_accf = newaf; so->so_options |= SO_ACCEPTFILTER; newaf = NULL; out: SOCK_UNLOCK(so); if (newaf != NULL) { if (newaf->so_accept_filter_str != NULL) bsd_free(newaf->so_accept_filter_str, M_ACCF); bsd_free(newaf, M_ACCF); } if (afap != NULL) bsd_free(afap, M_TEMP); return (error); }