Esempio n. 1
0
static void
intr_deliver(struct intr_source *is, int virq)
{
	bool locked = false;
	for (struct intrhand *ih = is->is_hand; ih != NULL; ih = ih->ih_next) {
		KASSERTMSG(ih->ih_fun != NULL,
		    "%s: irq %d, hwirq %d, is %p ih %p: "
		     "NULL interrupt handler!\n", __func__,
		     virq, is->is_hwirq, is, ih);
		if (ih->ih_ipl == IPL_VM) {
			if (!locked) {
				KERNEL_LOCK(1, NULL);
				locked = true;
			}
		} else if (locked) {
			KERNEL_UNLOCK_ONE(NULL);
			locked = false;
		}
		(*ih->ih_fun)(ih->ih_arg);
	}
	if (locked) {
		KERNEL_UNLOCK_ONE(NULL);
	}
	is->is_ev.ev_count++;
}
Esempio n. 2
0
static int
tap_dev_kqfilter(int unit, struct knote *kn)
{
	struct tap_softc *sc =
	    device_lookup_private(&tap_cd, unit);

	if (sc == NULL)
		return (ENXIO);

	KERNEL_LOCK(1, NULL);
	switch(kn->kn_filter) {
	case EVFILT_READ:
		kn->kn_fop = &tap_read_filterops;
		break;
	case EVFILT_WRITE:
		kn->kn_fop = &tap_seltrue_filterops;
		break;
	default:
		KERNEL_UNLOCK_ONE(NULL);
		return (EINVAL);
	}

	kn->kn_hook = sc;
	mutex_spin_enter(&sc->sc_kqlock);
	SLIST_INSERT_HEAD(&sc->sc_rsel.sel_klist, kn, kn_selnext);
	mutex_spin_exit(&sc->sc_kqlock);
	KERNEL_UNLOCK_ONE(NULL);
	return (0);
}
Esempio n. 3
0
/*
 * It might happen that the administrator used ifconfig to externally destroy
 * the interface.  In that case, tap_fops_close will be called while
 * tap_detach is already happening.  If we called it again from here, we
 * would dead lock.  TAP_GOING ensures that this situation doesn't happen.
 */
static int
tap_fops_close(file_t *fp)
{
	int unit = fp->f_devunit;
	struct tap_softc *sc;
	int error;

	sc = device_lookup_private(&tap_cd, unit);
	if (sc == NULL)
		return (ENXIO);

	/* tap_dev_close currently always succeeds, but it might not
	 * always be the case. */
	KERNEL_LOCK(1, NULL);
	if ((error = tap_dev_close(sc)) != 0) {
		KERNEL_UNLOCK_ONE(NULL);
		return (error);
	}

	/* Destroy the device now that it is no longer useful,
	 * unless it's already being destroyed. */
	if ((sc->sc_flags & TAP_GOING) != 0) {
		KERNEL_UNLOCK_ONE(NULL);
		return (0);
	}

	error = tap_clone_destroyer(sc->sc_dev);
	KERNEL_UNLOCK_ONE(NULL);
	return error;
}
Esempio n. 4
0
static bool
npf_log(npf_cache_t *npc, nbuf_t *nbuf, void *meta, int *decision)
{
	struct mbuf *m = nbuf_head_mbuf(nbuf);
	const npf_ext_log_t *log = meta;
	ifnet_t *ifp;
	int family;

	/* Set the address family. */
	if (npf_iscached(npc, NPC_IP4)) {
		family = AF_INET;
	} else if (npf_iscached(npc, NPC_IP6)) {
		family = AF_INET6;
	} else {
		family = AF_UNSPEC;
	}

	KERNEL_LOCK(1, NULL);

	/* Find a pseudo-interface to log. */
	ifp = if_byindex(log->if_idx);
	if (ifp == NULL) {
		/* No interface. */
		KERNEL_UNLOCK_ONE(NULL);
		return true;
	}

	/* Pass through BPF. */
	ifp->if_opackets++;
	ifp->if_obytes += m->m_pkthdr.len;
	bpf_mtap_af(ifp, family, m);
	KERNEL_UNLOCK_ONE(NULL);

	return true;
}
void
svr4_delete_socket(struct proc *p, struct file *fp)
{
    struct svr4_sockcache_entry *e;
    void *cookie = ((struct socket *) fp->f_data)->so_internal;

    KERNEL_LOCK(1, NULL);

    if (!initialized) {
        TAILQ_INIT(&svr4_head);
        initialized = 1;
        KERNEL_UNLOCK_ONE(NULL);
        return;
    }

    for (e = svr4_head.tqh_first; e != NULL; e = e->entries.tqe_next)
        if (e->p == p && e->cookie == cookie) {
            TAILQ_REMOVE(&svr4_head, e, entries);
            DPRINTF(("svr4_delete_socket: %s [%p,%"PRId64",%lu]\n",
                     e->sock.sun_path, p, e->dev, e->ino));
            free(e, M_TEMP);
            break;
        }

    KERNEL_UNLOCK_ONE(NULL);
}
Esempio n. 6
0
int
VOP_GETPAGES(struct vnode *vp,
    voff_t offset,
    struct vm_page **m,
    int *count,
    int centeridx,
    vm_prot_t access_type,
    int advice,
    int flags)
{
	int error;
	bool mpsafe;
	struct vop_getpages_args a;
	a.a_desc = VDESC(vop_getpages);
	a.a_vp = vp;
	a.a_offset = offset;
	a.a_m = m;
	a.a_count = count;
	a.a_centeridx = centeridx;
	a.a_access_type = access_type;
	a.a_advice = advice;
	a.a_flags = flags;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_getpages), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}
Esempio n. 7
0
/*
 * npf_pfil_unregister: unregister pfil(9) hooks.
 */
void
npf_pfil_unregister(bool fini)
{
	npf_t *npf = npf_getkernctx();

	mutex_enter(softnet_lock);
	KERNEL_LOCK(1, NULL);

	if (fini && npf_ph_if) {
		(void)pfil_remove_hook(npf_ifhook, NULL,
		    PFIL_IFADDR | PFIL_IFNET, npf_ph_if);
	}
	if (npf_ph_inet) {
		(void)pfil_remove_hook(npf_packet_handler, npf,
		    PFIL_ALL, npf_ph_inet);
	}
	if (npf_ph_inet6) {
		(void)pfil_remove_hook(npf_packet_handler, npf,
		    PFIL_ALL, npf_ph_inet6);
	}
	pfil_registered = false;

	KERNEL_UNLOCK_ONE(NULL);
	mutex_exit(softnet_lock);
}
Esempio n. 8
0
/*
 * Callout to process delayed ACKs for a TCPCB.
 */
void
tcp_delack(void *arg)
{
	struct tcpcb *tp = arg;

	/*
	 * If tcp_output() wasn't able to transmit the ACK
	 * for whatever reason, it will restart the delayed
	 * ACK callout.
	 */

	mutex_enter(softnet_lock);
	if ((tp->t_flags & (TF_DEAD | TF_DELACK)) != TF_DELACK) {
		mutex_exit(softnet_lock);
		return;
	}
	if (!callout_expired(&tp->t_delack_ch)) {
		mutex_exit(softnet_lock);
		return;
	}

	tp->t_flags |= TF_ACKNOW;
	KERNEL_LOCK(1, NULL);
	(void) tcp_output(tp);
	KERNEL_UNLOCK_ONE(NULL);
	mutex_exit(softnet_lock);
}
Esempio n. 9
0
int
VOP_SYMLINK(struct vnode *dvp,
    struct vnode **vpp,
    struct componentname *cnp,
    struct vattr *vap,
    char *target)
{
	int error;
	bool mpsafe;
	struct vop_symlink_v3_args a;
	a.a_desc = VDESC(vop_symlink);
	a.a_dvp = dvp;
	a.a_vpp = vpp;
	a.a_cnp = cnp;
	a.a_vap = vap;
	a.a_target = target;
	mpsafe = (dvp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(dvp, VOFFSET(vop_symlink), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
#ifdef DIAGNOSTIC
	if (error == 0)
		KASSERT((*vpp)->v_size != VSIZENOTSET
		    && (*vpp)->v_writesize != VSIZENOTSET);
#endif /* DIAGNOSTIC */
	return error;
}
Esempio n. 10
0
/*
 * npf_pfil_unregister: unregister pfil(9) hooks.
 */
void
npf_pfil_unregister(void)
{

	mutex_enter(softnet_lock);
	KERNEL_LOCK(1, NULL);

	if (npf_ph_if) {
		(void)pfil_remove_hook(npf_ifhook, NULL,
		    PFIL_IFADDR | PFIL_IFNET, npf_ph_if);
	}
	if (npf_ph_inet) {
		(void)pfil_remove_hook(npf_packet_handler, NULL,
		    PFIL_ALL, npf_ph_inet);
	}
	if (npf_ph_inet6) {
		(void)pfil_remove_hook(npf_packet_handler, NULL,
		    PFIL_ALL, npf_ph_inet6);
	}

	npf_ph_if = NULL;

	KERNEL_UNLOCK_ONE(NULL);
	mutex_exit(softnet_lock);
}
Esempio n. 11
0
/*
 * IP software interrupt routine
 */
void
ipintr(void)
{
	int s;
	struct mbuf *m;
	struct ifqueue lcl_intrq;

	memset(&lcl_intrq, 0, sizeof(lcl_intrq));

	mutex_enter(softnet_lock);
	KERNEL_LOCK(1, NULL);
	if (!IF_IS_EMPTY(&ipintrq)) {
		s = splnet();

		/* Take existing queue onto stack */
		lcl_intrq = ipintrq;

		/* Zero out global queue, preserving maxlen and drops */
		ipintrq.ifq_head = NULL;
		ipintrq.ifq_tail = NULL;
		ipintrq.ifq_len = 0;
		ipintrq.ifq_maxlen = lcl_intrq.ifq_maxlen;
		ipintrq.ifq_drops = lcl_intrq.ifq_drops;

		splx(s);
	}
	KERNEL_UNLOCK_ONE(NULL);
	while (!IF_IS_EMPTY(&lcl_intrq)) {
		IF_DEQUEUE(&lcl_intrq, m);
		if (m == NULL)
			break;
		ip_input(m);
	}
	mutex_exit(softnet_lock);
}
Esempio n. 12
0
static int
tap_fops_stat(file_t *fp, struct stat *st)
{
	int error = 0;
	struct tap_softc *sc;
	int unit = fp->f_devunit;

	(void)memset(st, 0, sizeof(*st));

	KERNEL_LOCK(1, NULL);
	sc = device_lookup_private(&tap_cd, unit);
	if (sc == NULL) {
		error = ENXIO;
		goto out;
	}

	st->st_dev = makedev(cdevsw_lookup_major(&tap_cdevsw), unit);
	st->st_atimespec = sc->sc_atime;
	st->st_mtimespec = sc->sc_mtime;
	st->st_ctimespec = st->st_birthtimespec = sc->sc_btime;
	st->st_uid = kauth_cred_geteuid(fp->f_cred);
	st->st_gid = kauth_cred_getegid(fp->f_cred);
out:
	KERNEL_UNLOCK_ONE(NULL);
	return error;
}
Esempio n. 13
0
void
in6_tmpaddrtimer(void *ignored_arg)
{
	struct nd_ifinfo *ndi;
	u_int8_t nullbuf[8];
	struct ifnet *ifp;

	mutex_enter(softnet_lock);
	KERNEL_LOCK(1, NULL);

	callout_reset(&in6_tmpaddrtimer_ch,
	    (ip6_temp_preferred_lifetime - ip6_desync_factor -
	    ip6_temp_regen_advance) * hz, in6_tmpaddrtimer, NULL);

	memset(nullbuf, 0, sizeof(nullbuf));
	IFNET_FOREACH(ifp) {
		ndi = ND_IFINFO(ifp);
		if (memcmp(ndi->randomid, nullbuf, sizeof(nullbuf)) != 0) {
			/*
			 * We've been generating a random ID on this interface.
			 * Create a new one.
			 */
			(void)generate_tmp_ifid(ndi->randomseed0,
			    ndi->randomseed1, ndi->randomid);
		}
	}

	KERNEL_UNLOCK_ONE(NULL);
	mutex_exit(softnet_lock);
}
Esempio n. 14
0
/*
 * npf_pfil_register: register pfil(9) hooks.
 */
int
npf_pfil_register(bool init)
{
	npf_t *npf = npf_getkernctx();
	int error = 0;

	mutex_enter(softnet_lock);
	KERNEL_LOCK(1, NULL);

	/* Init: interface re-config and attach/detach hook. */
	if (!npf_ph_if) {
		npf_ph_if = pfil_head_get(PFIL_TYPE_IFNET, 0);
		if (!npf_ph_if) {
			error = ENOENT;
			goto out;
		}
		error = pfil_add_hook(npf_ifhook, NULL,
		    PFIL_IFADDR | PFIL_IFNET, npf_ph_if);
		KASSERT(error == 0);
	}
	if (init) {
		goto out;
	}

	/* Check if pfil hooks are not already registered. */
	if (pfil_registered) {
		error = EEXIST;
		goto out;
	}

	/* Capture points of the activity in the IP layer. */
	npf_ph_inet = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET);
	npf_ph_inet6 = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET6);
	if (!npf_ph_inet && !npf_ph_inet6) {
		error = ENOENT;
		goto out;
	}

	/* Packet IN/OUT handlers for IP layer. */
	if (npf_ph_inet) {
		error = pfil_add_hook(npf_packet_handler, npf,
		    PFIL_ALL, npf_ph_inet);
		KASSERT(error == 0);
	}
	if (npf_ph_inet6) {
		error = pfil_add_hook(npf_packet_handler, npf,
		    PFIL_ALL, npf_ph_inet6);
		KASSERT(error == 0);
	}
	pfil_registered = true;
out:
	KERNEL_UNLOCK_ONE(NULL);
	mutex_exit(softnet_lock);

	return error;
}
Esempio n. 15
0
static int
tap_fops_read(file_t *fp, off_t *offp, struct uio *uio,
    kauth_cred_t cred, int flags)
{
	int error;

	KERNEL_LOCK(1, NULL);
	error = tap_dev_read(fp->f_devunit, uio, flags);
	KERNEL_UNLOCK_ONE(NULL);
	return error;
}
Esempio n. 16
0
static int
tap_fops_write(file_t *fp, off_t *offp, struct uio *uio,
    kauth_cred_t cred, int flags)
{
	int error;

	KERNEL_LOCK(1, NULL);
	error = tap_dev_write((intptr_t)fp->f_data, uio, flags);
	KERNEL_UNLOCK_ONE(NULL);
	return error;
}
Esempio n. 17
0
static int
wrapifioctl(struct socket *so, u_long cmd, void *data)
{
	int rv;

	KERNEL_LOCK(1, NULL);
	rv = ifioctl(so, cmd, data, curlwp);
	KERNEL_UNLOCK_ONE(NULL);

	return rv;
}
Esempio n. 18
0
static void
tap_kqdetach(struct knote *kn)
{
	struct tap_softc *sc = (struct tap_softc *)kn->kn_hook;

	KERNEL_LOCK(1, NULL);
	mutex_spin_enter(&sc->sc_kqlock);
	SLIST_REMOVE(&sc->sc_rsel.sel_klist, kn, knote, kn_selnext);
	mutex_spin_exit(&sc->sc_kqlock);
	KERNEL_UNLOCK_ONE(NULL);
}
int
svr4_add_socket(struct proc *p, const char *path, struct stat *st)
{
    struct svr4_sockcache_entry *e;
    size_t len;
    int error;

    KERNEL_LOCK(1, NULL);

    if (!initialized) {
        TAILQ_INIT(&svr4_head);
        initialized = 1;
    }

    e = malloc(sizeof(*e), M_TEMP, M_WAITOK);
    e->cookie = NULL;
    e->dev = st->st_dev;
    e->ino = st->st_ino;
    e->p = p;

    if ((error = copyinstr(path, e->sock.sun_path,
                           sizeof(e->sock.sun_path), &len)) != 0) {
        DPRINTF(("svr4_add_socket: copyinstr failed %d\n", error));
        free(e, M_TEMP);
        KERNEL_UNLOCK_ONE(NULL);
        return error;
    }

    e->sock.sun_family = AF_LOCAL;
    e->sock.sun_len = len;

    TAILQ_INSERT_HEAD(&svr4_head, e, entries);
    DPRINTF(("svr4_add_socket: %s [%p,%"PRId64",%lu]\n", e->sock.sun_path,
             p, e->dev, e->ino));

    KERNEL_UNLOCK_ONE(NULL);
    return 0;
}
Esempio n. 20
0
int
VOP_ISLOCKED(struct vnode *vp)
{
	int error;
	bool mpsafe;
	struct vop_islocked_args a;
	a.a_desc = VDESC(vop_islocked);
	a.a_vp = vp;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_islocked), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}
Esempio n. 21
0
int
VOP_ABORTOP(struct vnode *dvp,
    struct componentname *cnp)
{
	int error;
	bool mpsafe;
	struct vop_abortop_args a;
	a.a_desc = VDESC(vop_abortop);
	a.a_dvp = dvp;
	a.a_cnp = cnp;
	mpsafe = (dvp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(dvp, VOFFSET(vop_abortop), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}
Esempio n. 22
0
int
VOP_INACTIVE(struct vnode *vp,
    bool *recycle)
{
	int error;
	bool mpsafe;
	struct vop_inactive_args a;
	a.a_desc = VDESC(vop_inactive);
	a.a_vp = vp;
	a.a_recycle = recycle;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_inactive), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}
Esempio n. 23
0
int
VOP_KQFILTER(struct vnode *vp,
    struct knote *kn)
{
	int error;
	bool mpsafe;
	struct vop_kqfilter_args a;
	a.a_desc = VDESC(vop_kqfilter);
	a.a_vp = vp;
	a.a_kn = kn;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_kqfilter), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}
Esempio n. 24
0
int
VOP_POLL(struct vnode *vp,
    int events)
{
	int error;
	bool mpsafe;
	struct vop_poll_args a;
	a.a_desc = VDESC(vop_poll);
	a.a_vp = vp;
	a.a_events = events;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_poll), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}
Esempio n. 25
0
int
VOP_REVOKE(struct vnode *vp,
    int flags)
{
	int error;
	bool mpsafe;
	struct vop_revoke_args a;
	a.a_desc = VDESC(vop_revoke);
	a.a_vp = vp;
	a.a_flags = flags;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_revoke), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}
Esempio n. 26
0
int
VOP_BWRITE(struct vnode *vp,
    struct buf *bp)
{
	int error;
	bool mpsafe;
	struct vop_bwrite_args a;
	a.a_desc = VDESC(vop_bwrite);
	a.a_vp = vp;
	a.a_bp = bp;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_bwrite), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}
Esempio n. 27
0
int
VOP_OPENEXTATTR(struct vnode *vp,
    kauth_cred_t cred)
{
	int error;
	bool mpsafe;
	struct vop_openextattr_args a;
	a.a_desc = VDESC(vop_openextattr);
	a.a_vp = vp;
	a.a_cred = cred;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_openextattr), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}
Esempio n. 28
0
/*
 * npf_pfil_register: register pfil(9) hooks.
 */
int
npf_pfil_register(void)
{
	int error;

	mutex_enter(softnet_lock);
	KERNEL_LOCK(1, NULL);

	/* Check if pfil hooks are not already registered. */
	if (npf_ph_if) {
		error = EEXIST;
		goto fail;
	}

	/* Capture point of any activity in interfaces and IP layer. */
	npf_ph_if = pfil_head_get(PFIL_TYPE_IFNET, 0);
	npf_ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
	npf_ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
	if (!npf_ph_if || (!npf_ph_inet && !npf_ph_inet6)) {
		npf_ph_if = NULL;
		error = ENOENT;
		goto fail;
	}

	/* Interface re-config or attach/detach hook. */
	error = pfil_add_hook(npf_ifhook, NULL,
	    PFIL_WAITOK | PFIL_IFADDR | PFIL_IFNET, npf_ph_if);
	KASSERT(error == 0);

	/* Packet IN/OUT handler on all interfaces and IP layer. */
	if (npf_ph_inet) {
		error = pfil_add_hook(npf_packet_handler, NULL,
		    PFIL_WAITOK | PFIL_ALL, npf_ph_inet);
		KASSERT(error == 0);
	}
	if (npf_ph_inet6) {
		error = pfil_add_hook(npf_packet_handler, NULL,
		    PFIL_WAITOK | PFIL_ALL, npf_ph_inet6);
		KASSERT(error == 0);
	}
fail:
	KERNEL_UNLOCK_ONE(NULL);
	mutex_exit(softnet_lock);

	return error;
}
Esempio n. 29
0
int
VOP_FDISCARD(struct vnode *vp,
    off_t pos,
    off_t len)
{
	int error;
	bool mpsafe;
	struct vop_fdiscard_args a;
	a.a_desc = VDESC(vop_fdiscard);
	a.a_vp = vp;
	a.a_pos = pos;
	a.a_len = len;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_fdiscard), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}
Esempio n. 30
0
int
VOP_MMAP(struct vnode *vp,
    vm_prot_t prot,
    kauth_cred_t cred)
{
	int error;
	bool mpsafe;
	struct vop_mmap_args a;
	a.a_desc = VDESC(vop_mmap);
	a.a_vp = vp;
	a.a_prot = prot;
	a.a_cred = cred;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_mmap), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}