static int ext2_mountroot() { #if !defined(__FreeBSD__) extern struct vnode *rootvp; #endif register struct ext2_sb_info *fs; register struct mount *mp; #if defined(__FreeBSD__) struct proc *p = curproc; #else struct proc *p = get_proc(); /* XXX */ #endif struct ufsmount *ump; u_int size; int error; /* * Get vnodes for swapdev and rootdev. */ if (bdevvp(swapdev, &swapdev_vp) || bdevvp(rootdev, &rootvp)) panic("ext2_mountroot: can't setup bdevvp's"); mp = bsd_malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK); bzero((char *)mp, (u_long)sizeof(struct mount)); mp->mnt_op = &ext2fs_vfsops; mp->mnt_flag = MNT_RDONLY; if (error = ext2_mountfs(rootvp, mp, p)) { bsd_free(mp, M_MOUNT); return (error); } if (error = vfs_lock(mp)) { (void)ext2_unmount(mp, 0, p); bsd_free(mp, M_MOUNT); return (error); } #if defined(__FreeBSD__) CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list); #else TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); #endif mp->mnt_flag |= MNT_ROOTFS; mp->mnt_vnodecovered = NULLVP; ump = VFSTOUFS(mp); fs = ump->um_e2fs; bzero(fs->fs_fsmnt, sizeof(fs->fs_fsmnt)); fs->fs_fsmnt[0] = '/'; bcopy((caddr_t)fs->fs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname, MNAMELEN); (void) copystr(ROOTNAME, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size); bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); (void)ext2_statfs(mp, &mp->mnt_stat, p); vfs_unlock(mp); inittodr(fs->s_es->s_wtime); /* this helps to set the time */ return (0); }
/* * Setup a DMA channel's bounce buffer. */ int isa_dma_init(int chan, u_int bouncebufsize, int flag) { void *buf; int contig; #ifdef DIAGNOSTIC if (chan & ~VALID_DMA_MASK) panic("isa_dma_init: channel out of range"); #endif /* Try malloc() first. It works better if it works. */ buf = bsd_malloc(bouncebufsize, M_DEVBUF, flag); if (buf != NULL) { if (isa_dmarangecheck(buf, bouncebufsize, chan) != 0) { bsd_free(buf, M_DEVBUF); buf = NULL; } contig = 0; } if (buf == NULL) { buf = contigmalloc(bouncebufsize, M_DEVBUF, flag, 0ul, 0xfffffful, 1ul, chan & 4 ? 0x20000ul : 0x10000ul); contig = 1; } if (buf == NULL) return (ENOMEM); mtx_lock(&isa_dma_lock); /* * If a DMA channel is shared, both drivers have to call isa_dma_init * since they don't know that the other driver will do it. * Just return if we're already set up good. * XXX: this only works if they agree on the bouncebuf size. This * XXX: is typically the case since they are multiple instances of * XXX: the same driver. */ if (dma_bouncebuf[chan] != NULL) { if (contig) contigfree(buf, bouncebufsize, M_DEVBUF); else bsd_free(buf, M_DEVBUF); mtx_unlock(&isa_dma_lock); return (0); } dma_bouncebufsize[chan] = bouncebufsize; dma_bouncebuf[chan] = buf; mtx_unlock(&isa_dma_lock); return (0); }
void alq_destroy(struct alq *alq) { /* Drain all pending IO. */ alq_shutdown(alq); mtx_destroy(&alq->aq_mtx); bsd_free(alq->aq_entbuf, M_ALD); bsd_free(alq, M_ALD); }
static int stf_clone_create(struct if_clone *ifc, char *name, size_t len, caddr_t params) { int err, unit; struct stf_softc *sc; struct ifnet *ifp; /* * We can only have one unit, but since unit allocation is * already locked, we use it to keep from allocating extra * interfaces. */ unit = STFUNIT; err = ifc_alloc_unit(ifc, &unit); if (err != 0) return (err); sc = bsd_malloc(sizeof(struct stf_softc), M_STF, M_WAITOK | M_ZERO); ifp = STF2IFP(sc) = if_alloc(IFT_STF); if (ifp == NULL) { bsd_free(sc, M_STF); ifc_free_unit(ifc, unit); return (ENOSPC); } ifp->if_softc = sc; sc->sc_fibnum = curthread->td_proc->p_fibnum; /* * Set the name manually rather then using if_initname because * we don't conform to the default naming convention for interfaces. */ strlcpy(ifp->if_xname, name, IFNAMSIZ); ifp->if_dname = ifc->ifc_name; ifp->if_dunit = IF_DUNIT_NONE; mtx_init(&(sc)->sc_ro_mtx, "stf ro", NULL, MTX_DEF); sc->encap_cookie = encap_attach_func(AF_INET, IPPROTO_IPV6, stf_encapcheck, &in_stf_protosw, sc); if (sc->encap_cookie == NULL) { if_printf(ifp, "attach failed\n"); bsd_free(sc, M_STF); ifc_free_unit(ifc, unit); return (ENOMEM); } ifp->if_mtu = IPV6_MMTU; ifp->if_ioctl = stf_ioctl; ifp->if_output = stf_output; ifp->if_snd.ifq_maxlen = ifqmaxlen; if_attach(ifp); bpfattach(ifp, DLT_NULL, sizeof(u_int32_t)); return (0); }
/* * Free BPF kernel buffers on device close. */ void bpf_buffer_free(struct bpf_d *d) { if (d->bd_sbuf != NULL) bsd_free(d->bd_sbuf, M_BPF); if (d->bd_hbuf != NULL) bsd_free(d->bd_hbuf, M_BPF); if (d->bd_fbuf != NULL) bsd_free(d->bd_fbuf, M_BPF); #ifdef INVARIANTS d->bd_sbuf = d->bd_hbuf = d->bd_fbuf = (caddr_t)~0; #endif }
int do_getopt_accept_filter(struct socket *so, struct sockopt *sopt) { struct accept_filter_arg *afap; int error; error = 0; afap = bsd_malloc(sizeof(*afap), M_TEMP, M_WAITOK | M_ZERO); SOCK_LOCK(so); if ((so->so_options & SO_ACCEPTCONN) == 0) { error = EINVAL; goto out; } if ((so->so_options & SO_ACCEPTFILTER) == 0) { error = EINVAL; goto out; } strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name); if (so->so_accf->so_accept_filter_str != NULL) strcpy(afap->af_arg, so->so_accf->so_accept_filter_str); out: SOCK_UNLOCK(so); if (error == 0) error = sooptcopyout(sopt, afap, sizeof(*afap)); bsd_free(afap, M_TEMP); return (error); }
int msi_release(int *irqs, int count) { struct msi_intsrc *msi, *first; int i; mtx_lock(&msi_lock); first = (struct msi_intsrc *)intr_lookup_source(irqs[0]); if (first == NULL) { mtx_unlock(&msi_lock); return (ENOENT); } /* Make sure this isn't an MSI-X message. */ if (first->msi_msix) { mtx_unlock(&msi_lock); return (EINVAL); } /* Make sure this message is allocated to a group. */ if (first->msi_first == NULL) { mtx_unlock(&msi_lock); return (ENXIO); } /* * Make sure this is the start of a group and that we are releasing * the entire group. */ if (first->msi_first != first || first->msi_count != count) { mtx_unlock(&msi_lock); return (EINVAL); } KASSERT(first->msi_dev != NULL, ("unowned group")); /* Clear all the extra messages in the group. */ for (i = 1; i < count; i++) { msi = (struct msi_intsrc *)intr_lookup_source(irqs[i]); KASSERT(msi->msi_first == first, ("message not in group")); KASSERT(msi->msi_dev == first->msi_dev, ("owner mismatch")); msi->msi_first = NULL; msi->msi_dev = NULL; apic_free_vector(msi->msi_cpu, msi->msi_vector, msi->msi_irq); msi->msi_vector = 0; } /* Clear out the first message. */ first->msi_first = NULL; first->msi_dev = NULL; apic_free_vector(first->msi_cpu, first->msi_vector, first->msi_irq); first->msi_vector = 0; first->msi_count = 0; first->msi_maxcount = 0; bsd_free(first->msi_irqs, M_MSI); first->msi_irqs = NULL; mtx_unlock(&msi_lock); return (0); }
/* * Allocate or resize buffers. */ int bpf_buffer_ioctl_sblen(struct bpf_d *d, u_int *i) { u_int size; caddr_t fbuf, sbuf; size = *i; if (size > bpf_maxbufsize) *i = size = bpf_maxbufsize; else if (size < BPF_MINBUFSIZE) *i = size = BPF_MINBUFSIZE; /* Allocate buffers immediately */ fbuf = (caddr_t)bsd_malloc(size, M_BPF, M_WAITOK); sbuf = (caddr_t)bsd_malloc(size, M_BPF, M_WAITOK); BPFD_LOCK(d); if (d->bd_bif != NULL) { /* Interface already attached, unable to change buffers */ BPFD_UNLOCK(d); bsd_free(fbuf, M_BPF); bsd_free(sbuf, M_BPF); return (EINVAL); } while (d->bd_hbuf_in_use) mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock, PRINET, "bd_hbuf", 0); /* Free old buffers if set */ if (d->bd_fbuf != NULL) bsd_free(d->bd_fbuf, M_BPF); if (d->bd_sbuf != NULL) bsd_free(d->bd_sbuf, M_BPF); /* Fill in new data */ d->bd_bufsize = size; d->bd_fbuf = fbuf; d->bd_sbuf = sbuf; d->bd_hbuf = NULL; d->bd_slen = 0; d->bd_hlen = 0; BPFD_UNLOCK(d); return (0); }
static int ext2_mountroot() { register struct ext2_sb_info *fs; register struct mount *mp; struct proc *p = curproc; struct ufsmount *ump; u_int size; int error; if ((error = bdevvp(rootdev, &rootvp))) { printf("ext2_mountroot: can't find rootvp\n"); return (error); } mp = bsd_malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK); bzero((char *)mp, (u_long)sizeof(struct mount)); mp->mnt_op = &ext2fs_vfsops; mp->mnt_flag = MNT_RDONLY; if (error = ext2_mountfs(rootvp, mp, p)) { bsd_free(mp, M_MOUNT); return (error); } if (error = vfs_lock(mp)) { (void)ext2_unmount(mp, 0, p); bsd_free(mp, M_MOUNT); return (error); } TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list); mp->mnt_flag |= MNT_ROOTFS; mp->mnt_vnodecovered = NULLVP; ump = VFSTOUFS(mp); fs = ump->um_e2fs; bzero(fs->fs_fsmnt, sizeof(fs->fs_fsmnt)); fs->fs_fsmnt[0] = '/'; bcopy((caddr_t)fs->fs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname, MNAMELEN); (void) copystr(ROOTNAME, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size); bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); (void)ext2_statfs(mp, &mp->mnt_stat, p); vfs_unlock(mp); inittodr(fs->s_es->s_wtime); /* this helps to set the time */ return (0); }
static int stf_clone_destroy(struct if_clone *ifc, struct ifnet *ifp) { struct stf_softc *sc = ifp->if_softc; int err; err = encap_detach(sc->encap_cookie); KASSERT(err == 0, ("Unexpected error detaching encap_cookie")); mtx_destroy(&(sc)->sc_ro_mtx); bpfdetach(ifp); if_detach(ifp); if_free(ifp); bsd_free(sc, M_STF); ifc_free_unit(ifc, STFUNIT); return (0); }
static device_t cpu_add_child(device_t bus, u_int order, const char *name, int unit) { struct cpu_device *cd; device_t child; struct pcpu *pc; if ((cd = bsd_malloc(sizeof(*cd), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) return (NULL); resource_list_init(&cd->cd_rl); pc = pcpu_find(device_get_unit(bus)); cd->cd_pcpu = pc; child = device_add_child_ordered(bus, order, name, unit); if (child != NULL) { pc->pc_device = child; device_set_ivars(child, cd); } else bsd_free(cd, M_DEVBUF); return (child); }
static device_t legacy_add_child(device_t bus, u_int order, const char *name, int unit) { device_t child; struct legacy_device *atdev; atdev = bsd_malloc(sizeof(struct legacy_device), M_LEGACYDEV, M_NOWAIT | M_ZERO); if (atdev == NULL) return(NULL); atdev->lg_pcibus = -1; atdev->lg_pcislot = -1; atdev->lg_pcifunc = -1; child = device_add_child_ordered(bus, order, name, unit); if (child == NULL) bsd_free(atdev, M_LEGACYDEV); else /* should we free this in legacy_child_detached? */ device_set_ivars(child, atdev); return (child); }
/* * Must be passed a malloc'd structure so we don't explode if the kld is * unloaded, we leak the struct on deallocation to deal with this, but if a * filter is loaded with the same name as a leaked one we re-use the entry. */ int accept_filt_add(struct accept_filter *filt) { struct accept_filter *p; ACCEPT_FILTER_LOCK(); BSD_SLIST_FOREACH(p, &accept_filtlsthd, accf_next) if (strcmp(p->accf_name, filt->accf_name) == 0) { if (p->accf_callback != NULL) { ACCEPT_FILTER_UNLOCK(); return (EEXIST); } else { p->accf_callback = filt->accf_callback; ACCEPT_FILTER_UNLOCK(); bsd_free(filt, M_ACCF); return (0); } } if (p == NULL) BSD_SLIST_INSERT_HEAD(&accept_filtlsthd, filt, accf_next); ACCEPT_FILTER_UNLOCK(); return (0); }
int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt) { struct accept_filter_arg *afap; struct accept_filter *afp; struct so_accf *newaf; int error = 0; /* * Handle the simple delete case first. */ if (sopt == NULL || sopt->sopt_val == NULL) { SOCK_LOCK(so); if ((so->so_options & SO_ACCEPTCONN) == 0) { SOCK_UNLOCK(so); return (EINVAL); } if (so->so_accf != NULL) { struct so_accf *af = so->so_accf; if (af->so_accept_filter != NULL && af->so_accept_filter->accf_destroy != NULL) { af->so_accept_filter->accf_destroy(so); } if (af->so_accept_filter_str != NULL) bsd_free(af->so_accept_filter_str, M_ACCF); bsd_free(af, M_ACCF); so->so_accf = NULL; } so->so_options &= ~SO_ACCEPTFILTER; SOCK_UNLOCK(so); return (0); } /* * Pre-allocate any memory we may need later to avoid blocking at * untimely moments. This does not optimize for invalid arguments. */ afap = bsd_malloc(sizeof(*afap), M_TEMP, M_WAITOK); error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap); afap->af_name[sizeof(afap->af_name)-1] = '\0'; afap->af_arg[sizeof(afap->af_arg)-1] = '\0'; if (error) { bsd_free(afap, M_TEMP); return (error); } afp = accept_filt_get(afap->af_name); if (afp == NULL) { bsd_free(afap, M_TEMP); return (ENOENT); } /* * Allocate the new accept filter instance storage. We may * have to free it again later if we fail to attach it. If * attached properly, 'newaf' is NULLed to avoid a free() * while in use. */ newaf = bsd_malloc(sizeof(*newaf), M_ACCF, M_WAITOK | M_ZERO); if (afp->accf_create != NULL && afap->af_name[0] != '\0') { int len = strlen(afap->af_name) + 1; newaf->so_accept_filter_str = bsd_malloc(len, M_ACCF, M_WAITOK); strcpy(newaf->so_accept_filter_str, afap->af_name); } /* * Require a listen socket; don't try to replace an existing filter * without first removing it. */ SOCK_LOCK(so); if (((so->so_options & SO_ACCEPTCONN) == 0) || (so->so_accf != NULL)) { error = EINVAL; goto out; } /* * Invoke the accf_create() method of the filter if required. The * socket mutex is held over this call, so create methods for filters * can't block. */ if (afp->accf_create != NULL) { newaf->so_accept_filter_arg = afp->accf_create(so, afap->af_arg); if (newaf->so_accept_filter_arg == NULL) { error = EINVAL; goto out; } } newaf->so_accept_filter = afp; so->so_accf = newaf; so->so_options |= SO_ACCEPTFILTER; newaf = NULL; out: SOCK_UNLOCK(so); if (newaf != NULL) { if (newaf->so_accept_filter_str != NULL) bsd_free(newaf->so_accept_filter_str, M_ACCF); bsd_free(newaf, M_ACCF); } if (afap != NULL) bsd_free(afap, M_TEMP); return (error); }
/* * Try to allocate 'count' interrupt sources with contiguous IDT values. */ int msi_alloc(device_t dev, int count, int maxcount, int *irqs) { struct msi_intsrc *msi, *fsrc; u_int cpu; int cnt, i, *mirqs, vector; if (!msi_enabled) return (ENXIO); if (count > 1) mirqs = bsd_malloc(count * sizeof(*mirqs), M_MSI, M_WAITOK); else mirqs = NULL; again: mtx_lock(&msi_lock); /* Try to find 'count' free IRQs. */ cnt = 0; for (i = FIRST_MSI_INT; i < FIRST_MSI_INT + NUM_MSI_INTS; i++) { msi = (struct msi_intsrc *)intr_lookup_source(i); /* End of allocated sources, so break. */ if (msi == NULL) break; /* If this is a free one, save its IRQ in the array. */ if (msi->msi_dev == NULL) { irqs[cnt] = i; cnt++; if (cnt == count) break; } } /* Do we need to create some new sources? */ if (cnt < count) { /* If we would exceed the max, give up. */ if (i + (count - cnt) > FIRST_MSI_INT + NUM_MSI_INTS) { mtx_unlock(&msi_lock); bsd_free(mirqs, M_MSI); return (ENXIO); } mtx_unlock(&msi_lock); /* We need count - cnt more sources. */ while (cnt < count) { msi_create_source(); cnt++; } goto again; } /* Ok, we now have the IRQs allocated. */ KASSERT(cnt == count, ("count mismatch")); /* Allocate 'count' IDT vectors. */ cpu = intr_next_cpu(); vector = apic_alloc_vectors(cpu, irqs, count, maxcount); if (vector == 0) { mtx_unlock(&msi_lock); bsd_free(mirqs, M_MSI); return (ENOSPC); } /* Assign IDT vectors and make these messages owned by 'dev'. */ fsrc = (struct msi_intsrc *)intr_lookup_source(irqs[0]); for (i = 0; i < count; i++) { msi = (struct msi_intsrc *)intr_lookup_source(irqs[i]); msi->msi_cpu = cpu; msi->msi_dev = dev; msi->msi_vector = vector + i; if (bootverbose) printf( "msi: routing MSI IRQ %d to local APIC %u vector %u\n", msi->msi_irq, msi->msi_cpu, msi->msi_vector); msi->msi_first = fsrc; KASSERT(msi->msi_intsrc.is_handlers == 0, ("dead MSI has handlers")); } fsrc->msi_count = count; fsrc->msi_maxcount = maxcount; if (count > 1) bcopy(irqs, mirqs, count * sizeof(*mirqs)); fsrc->msi_irqs = mirqs; mtx_unlock(&msi_lock); return (0); }