Example #1
0
/*
 * Handle an AST for the current process.
 */
void
ast()
{
	struct cpu_info *ci = curcpu();
	struct proc *p = ci->ci_curproc;

	uvmexp.softs++;

	p->p_md.md_astpending = 0;
	if (p->p_flag & P_OWEUPC) {
		KERNEL_LOCK();
		ADDUPROF(p);
		KERNEL_UNLOCK();
	}
	if (ci->ci_want_resched)
		preempt(NULL);

	userret(p);
}
Example #2
0
int
sys_lfs_bmapv(struct lwp *l, const struct sys_lfs_bmapv_args *uap, register_t *retval)
{
	/* {
		syscallarg(fsid_t *) fsidp;
		syscallarg(struct block_info *) blkiov;
		syscallarg(int) blkcnt;
	} */
	BLOCK_INFO *blkiov;
	int blkcnt, error;
	fsid_t fsid;
	struct lfs *fs;
	struct mount *mntp;

	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
	    KAUTH_REQ_SYSTEM_LFS_BMAPV, NULL, NULL, NULL);
	if (error)
		return (error);

	if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
		return (error);

	if ((mntp = vfs_getvfs(&fsid)) == NULL) 
		return (ENOENT);
	fs = VFSTOULFS(mntp)->um_lfs;

	blkcnt = SCARG(uap, blkcnt);
	if ((u_int) blkcnt > SIZE_T_MAX / sizeof(BLOCK_INFO))
		return (EINVAL);
	KERNEL_LOCK(1, NULL);
	blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV);
	if ((error = copyin(SCARG(uap, blkiov), blkiov,
			    blkcnt * sizeof(BLOCK_INFO))) != 0)
		goto out;

	if ((error = lfs_bmapv(p, &fsid, blkiov, blkcnt)) == 0)
		copyout(blkiov, SCARG(uap, blkiov),
			blkcnt * sizeof(BLOCK_INFO));
    out:
	lfs_free(fs, blkiov, LFS_NB_BLKIOV);
	KERNEL_UNLOCK_ONE(NULL);
	return error;
}
Example #3
0
void
vnet_rx_vio_rdx(struct vnet_softc *sc, struct vio_msg_tag *tag)
{
	struct ifnet *ifp = &sc->sc_ac.ac_if;

	switch(tag->stype) {
	case VIO_SUBTYPE_INFO:
		DPRINTF(("CTRL/INFO/RDX\n"));

		tag->stype = VIO_SUBTYPE_ACK;
		tag->sid = sc->sc_local_sid;
		vnet_sendmsg(sc, tag, sizeof(*tag));
		sc->sc_vio_state |= VIO_RCV_RDX;
		break;

	case VIO_SUBTYPE_ACK:
		DPRINTF(("CTRL/ACK/RDX\n"));
		if (!ISSET(sc->sc_vio_state, VIO_SND_RDX)) {
			ldc_reset(&sc->sc_lc);
			break;
		}
		sc->sc_vio_state |= VIO_ACK_RDX;
		break;

	default:
		DPRINTF(("CTRL/0x%02x/RDX (VIO)\n", tag->stype));
		break;
	}

	if (ISSET(sc->sc_vio_state, VIO_RCV_RDX) &&
	    ISSET(sc->sc_vio_state, VIO_ACK_RDX)) {
		/* Link is up! */
		vnet_link_state(sc);

		/* Configure multicast now that we can. */
		vnet_setmulti(sc, 1);

		KERNEL_LOCK();
		ifp->if_flags &= ~IFF_OACTIVE;
		vnet_start(ifp);
		KERNEL_UNLOCK();
	}
}
Example #4
0
static int
emdtv_dtv_stop_transfer(void *priv)
{
	struct emdtv_softc *sc = priv;

	aprint_debug_dev(sc->sc_dev, "stopping stream\n");

	sc->sc_streaming = false;

	KERNEL_LOCK(1, curlwp);
	if (sc->sc_isoc_pipe != NULL)
		usbd_abort_pipe(sc->sc_isoc_pipe);
	KERNEL_UNLOCK_ONE(curlwp);

	sc->sc_dtvsubmitcb = NULL;
	sc->sc_dtvsubmitarg = NULL;

	return 0;
}
Example #5
0
static usbd_status
usbd_attachwholedevice(device_t parent, struct usbd_device *dev, int port,
	int usegeneric)
{
	struct usb_attach_arg uaa;
	usb_device_descriptor_t *dd = &dev->ud_ddesc;
	device_t dv;
	int dlocs[USBDEVIFCF_NLOCS];

	uaa.uaa_device = dev;
	uaa.uaa_usegeneric = usegeneric;
	uaa.uaa_port = port;
	uaa.uaa_vendor = UGETW(dd->idVendor);
	uaa.uaa_product = UGETW(dd->idProduct);
	uaa.uaa_release = UGETW(dd->bcdDevice);
	uaa.uaa_class = dd->bDeviceClass;
	uaa.uaa_subclass = dd->bDeviceSubClass;
	uaa.uaa_proto = dd->bDeviceProtocol;

	dlocs[USBDEVIFCF_PORT] = uaa.uaa_port;
	dlocs[USBDEVIFCF_VENDOR] = uaa.uaa_vendor;
	dlocs[USBDEVIFCF_PRODUCT] = uaa.uaa_product;
	dlocs[USBDEVIFCF_RELEASE] = uaa.uaa_release;
	/* the rest is historical ballast */
	dlocs[USBDEVIFCF_CONFIGURATION] = -1;
	dlocs[USBDEVIFCF_INTERFACE] = -1;

	KERNEL_LOCK(1, NULL);
	dv = config_found_sm_loc(parent, "usbdevif", dlocs, &uaa, usbd_print,
				 config_stdsubmatch);
	KERNEL_UNLOCK_ONE(NULL);
	if (dv) {
		dev->ud_subdevs = kmem_alloc(sizeof(dv), KM_SLEEP);
		if (dev->ud_subdevs == NULL)
			return USBD_NOMEM;
		dev->ud_subdevs[0] = dv;
		dev->ud_subdevlen = 1;
		dev->ud_nifaces_claimed = 1; /* XXX */
		usbd_serialnumber(dv, dev);
	}
	return USBD_NORMAL_COMPLETION;
}
Example #6
0
int
VOP_PUTPAGES(struct vnode *vp,
    voff_t offlo,
    voff_t offhi,
    int flags)
{
	int error;
	bool mpsafe;
	struct vop_putpages_args a;
	a.a_desc = VDESC(vop_putpages);
	a.a_vp = vp;
	a.a_offlo = offlo;
	a.a_offhi = offhi;
	a.a_flags = flags;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_putpages), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}
Example #7
0
int
VOP_WRITE(struct vnode *vp,
    struct uio *uio,
    int ioflag,
    kauth_cred_t cred)
{
	int error;
	bool mpsafe;
	struct vop_write_args a;
	a.a_desc = VDESC(vop_write);
	a.a_vp = vp;
	a.a_uio = uio;
	a.a_ioflag = ioflag;
	a.a_cred = cred;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_write), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}
Example #8
0
int
VOP_SEEK(struct vnode *vp,
    off_t oldoff,
    off_t newoff,
    kauth_cred_t cred)
{
	int error;
	bool mpsafe;
	struct vop_seek_args a;
	a.a_desc = VDESC(vop_seek);
	a.a_vp = vp;
	a.a_oldoff = oldoff;
	a.a_newoff = newoff;
	a.a_cred = cred;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_seek), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}
Example #9
0
int
VOP_DELETEEXTATTR(struct vnode *vp,
    int attrnamespace,
    const char *name,
    kauth_cred_t cred)
{
	int error;
	bool mpsafe;
	struct vop_deleteextattr_args a;
	a.a_desc = VDESC(vop_deleteextattr);
	a.a_vp = vp;
	a.a_attrnamespace = attrnamespace;
	a.a_name = name;
	a.a_cred = cred;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_deleteextattr), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}
Example #10
0
void
vnet_setmulti(struct vnet_softc *sc, int set)
{
	struct arpcom *ac = &sc->sc_ac;
	struct ether_multi *enm;
	struct ether_multistep step;
	struct vnet_mcast_info mi;
	int count = 0;

	if (!ISSET(sc->sc_vio_state, VIO_RCV_RDX) ||
	    !ISSET(sc->sc_vio_state, VIO_ACK_RDX))
		return;

	bzero(&mi, sizeof(mi));
	mi.tag.type = VIO_TYPE_CTRL;
	mi.tag.stype = VIO_SUBTYPE_INFO;
	mi.tag.stype_env = VNET_MCAST_INFO;
	mi.tag.sid = sc->sc_local_sid;
	mi.set = set ? 1 : 0;
	KERNEL_LOCK();
	ETHER_FIRST_MULTI(step, ac, enm);
	while (enm != NULL) {
		/* XXX What about multicast ranges? */
		bcopy(enm->enm_addrlo, mi.mcast_addr[count], ETHER_ADDR_LEN);
		ETHER_NEXT_MULTI(step, enm);

		count++;
		if (count < VNET_NUM_MCAST)
			continue;

		mi.count = VNET_NUM_MCAST;
		vnet_sendmsg(sc, &mi, sizeof(mi));
		count = 0;
	}

	if (count > 0) {
		mi.count = count;
		vnet_sendmsg(sc, &mi, sizeof(mi));
	}
	KERNEL_UNLOCK();
}
Example #11
0
static int
tap_kqread(struct knote *kn, long hint)
{
	struct tap_softc *sc = (struct tap_softc *)kn->kn_hook;
	struct ifnet *ifp = &sc->sc_ec.ec_if;
	struct mbuf *m;
	int s, rv;

	KERNEL_LOCK(1, NULL);
	s = splnet();
	IFQ_POLL(&ifp->if_snd, m);

	if (m == NULL)
		kn->kn_data = 0;
	else
		kn->kn_data = m->m_pkthdr.len;
	splx(s);
	rv = (kn->kn_data != 0 ? 1 : 0);
	KERNEL_UNLOCK_ONE(NULL);
	return rv;
}
Example #12
0
int
VOP_FSYNC(struct vnode *vp,
    kauth_cred_t cred,
    int flags,
    off_t offlo,
    off_t offhi)
{
	int error;
	bool mpsafe;
	struct vop_fsync_args a;
	a.a_desc = VDESC(vop_fsync);
	a.a_vp = vp;
	a.a_cred = cred;
	a.a_flags = flags;
	a.a_offlo = offlo;
	a.a_offhi = offhi;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_fsync), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}
Example #13
0
/*
 * Interrupt handler.
 */
int
virtio_pci_intr(void *arg)
{
	struct virtio_pci_softc *sc = arg;
	struct virtio_softc *vsc = &sc->sc_sc;
	int isr, r = 0;

	/* check and ack the interrupt */
	isr = bus_space_read_1(sc->sc_iot, sc->sc_ioh,
	    VIRTIO_CONFIG_ISR_STATUS);
	if (isr == 0)
		return 0;
	KERNEL_LOCK();
	if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
	    (vsc->sc_config_change != NULL))
		r = (vsc->sc_config_change)(vsc);
	if (vsc->sc_intrhand != NULL)
		r |= (vsc->sc_intrhand)(vsc);
	KERNEL_UNLOCK();

	return r;
}
Example #14
0
/*
 * This will block until a segment in file system fsid is written.  A timeout
 * in milliseconds may be specified which will awake the cleaner automatically.
 * An fsid of -1 means any file system, and a timeout of 0 means forever.
 */
int
lfs_segwait(fsid_t *fsidp, struct timeval *tv)
{
	struct mount *mntp;
	void *addr;
	u_long timeout;
	int error;

	KERNEL_LOCK(1, NULL);
	if (fsidp == NULL || (mntp = vfs_getvfs(fsidp)) == NULL)
		addr = &lfs_allclean_wakeup;
	else
		addr = &VFSTOULFS(mntp)->um_lfs->lfs_nextseg;
	/*
	 * XXX THIS COULD SLEEP FOREVER IF TIMEOUT IS {0,0}!
	 * XXX IS THAT WHAT IS INTENDED?
	 */
	timeout = tvtohz(tv);
	error = tsleep(addr, PCATCH | PVFS, "segment", timeout);
	KERNEL_UNLOCK_ONE(NULL);
	return (error == ERESTART ? EINTR : 0);
}
Example #15
0
int
VOP_FCNTL(struct vnode *vp,
    u_int command,
    void *data,
    int fflag,
    kauth_cred_t cred)
{
	int error;
	bool mpsafe;
	struct vop_fcntl_args a;
	a.a_desc = VDESC(vop_fcntl);
	a.a_vp = vp;
	a.a_command = command;
	a.a_data = data;
	a.a_fflag = fflag;
	a.a_cred = cred;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_fcntl), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}
Example #16
0
int
VOP_ADVLOCK(struct vnode *vp,
    void *id,
    int op,
    struct flock *fl,
    int flags)
{
	int error;
	bool mpsafe;
	struct vop_advlock_args a;
	a.a_desc = VDESC(vop_advlock);
	a.a_vp = vp;
	a.a_id = id;
	a.a_op = op;
	a.a_fl = fl;
	a.a_flags = flags;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_advlock), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}
Example #17
0
int
VOP_BMAP(struct vnode *vp,
    daddr_t bn,
    struct vnode **vpp,
    daddr_t *bnp,
    int *runp)
{
	int error;
	bool mpsafe;
	struct vop_bmap_args a;
	a.a_desc = VDESC(vop_bmap);
	a.a_vp = vp;
	a.a_bn = bn;
	a.a_vpp = vpp;
	a.a_bnp = bnp;
	a.a_runp = runp;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_bmap), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}
Example #18
0
void
dosoftint(int pcpl)
{
	struct cpu_info *ci = curcpu();
	int sir, q, mask;

	ppc_intr_enable(1);
	KERNEL_LOCK();

	while ((sir = (ci->ci_ipending & ppc_smask[pcpl])) != 0) {
		atomic_clearbits_int(&ci->ci_ipending, sir);

		for (q = SI_NQUEUES - 1; q >= 0; q--) {
			mask = SI_TO_IRQBIT(q);
			if (sir & mask)
				softintr_dispatch(q);
		}
	}

	KERNEL_UNLOCK();
	(void)ppc_intr_disable();
}
Example #19
0
int
sys_lfs_markv(struct lwp *l, const struct sys_lfs_markv_args *uap, register_t *retval)
{
	/* {
		syscallarg(fsid_t *) fsidp;
		syscallarg(struct block_info *) blkiov;
		syscallarg(int) blkcnt;
	} */
	BLOCK_INFO *blkiov;
	int blkcnt, error;
	fsid_t fsid;
	struct lfs *fs;
	struct mount *mntp;

	if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
		return (error);

	if ((mntp = vfs_getvfs(&fsid)) == NULL) 
		return (ENOENT);
	fs = VFSTOULFS(mntp)->um_lfs;

	blkcnt = SCARG(uap, blkcnt);
	if ((u_int) blkcnt > LFS_MARKV_MAXBLKCNT)
		return (EINVAL);

	KERNEL_LOCK(1, NULL);
	blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV);
	if ((error = copyin(SCARG(uap, blkiov), blkiov,
			    blkcnt * sizeof(BLOCK_INFO))) != 0)
		goto out;

	if ((error = lfs_markv(l, &fsid, blkiov, blkcnt)) == 0)
		copyout(blkiov, SCARG(uap, blkiov),
			blkcnt * sizeof(BLOCK_INFO));
    out:
	lfs_free(fs, blkiov, LFS_NB_BLKIOV);
	KERNEL_UNLOCK_ONE(NULL);
	return error;
}
int
svr4_add_socket(struct proc *p, const char *path, struct stat *st)
{
    struct svr4_sockcache_entry *e;
    size_t len;
    int error;

    KERNEL_LOCK(1, NULL);

    if (!initialized) {
        TAILQ_INIT(&svr4_head);
        initialized = 1;
    }

    e = malloc(sizeof(*e), M_TEMP, M_WAITOK);
    e->cookie = NULL;
    e->dev = st->st_dev;
    e->ino = st->st_ino;
    e->p = p;

    if ((error = copyinstr(path, e->sock.sun_path,
                           sizeof(e->sock.sun_path), &len)) != 0) {
        DPRINTF(("svr4_add_socket: copyinstr failed %d\n", error));
        free(e, M_TEMP);
        KERNEL_UNLOCK_ONE(NULL);
        return error;
    }

    e->sock.sun_family = AF_LOCAL;
    e->sock.sun_len = len;

    TAILQ_INSERT_HEAD(&svr4_head, e, entries);
    DPRINTF(("svr4_add_socket: %s [%p,%"PRId64",%lu]\n", e->sock.sun_path,
             p, e->dev, e->ino));

    KERNEL_UNLOCK_ONE(NULL);
    return 0;
}
Example #21
0
/*
 * npf_pfil_unregister: unregister pfil(9) hooks.
 */
void
npf_pfil_unregister(bool fini)
{
	mutex_enter(softnet_lock);
	KERNEL_LOCK(1, NULL);

	if (fini && npf_ph_if) {
		(void)pfil_remove_hook(npf_ifhook, NULL,
		    PFIL_IFADDR | PFIL_IFNET, npf_ph_if);
	}
	if (npf_ph_inet) {
		(void)pfil_remove_hook(npf_packet_handler, NULL,
		    PFIL_ALL, npf_ph_inet);
	}
	if (npf_ph_inet6) {
		(void)pfil_remove_hook(npf_packet_handler, NULL,
		    PFIL_ALL, npf_ph_inet6);
	}
	pfil_registered = false;

	KERNEL_UNLOCK_ONE(NULL);
	mutex_exit(softnet_lock);
}
Example #22
0
int
VOP_LOOKUP(struct vnode *dvp,
    struct vnode **vpp,
    struct componentname *cnp)
{
	int error;
	bool mpsafe;
	struct vop_lookup_v2_args a;
	a.a_desc = VDESC(vop_lookup);
	a.a_dvp = dvp;
	a.a_vpp = vpp;
	a.a_cnp = cnp;
	mpsafe = (dvp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(dvp, VOFFSET(vop_lookup), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
#ifdef DIAGNOSTIC
	if (error == 0)
		KASSERT((*vpp)->v_size != VSIZENOTSET
		    && (*vpp)->v_writesize != VSIZENOTSET);
#endif /* DIAGNOSTIC */
	return error;
}
Example #23
0
/*
 * sys_lfs_segclean:
 *
 * Mark the segment clean.
 *
 *  0 on success
 * -1/errno is return on error.
 */
int
sys_lfs_segclean(struct lwp *l, const struct sys_lfs_segclean_args *uap, register_t *retval)
{
	/* {
		syscallarg(fsid_t *) fsidp;
		syscallarg(u_long) segment;
	} */
	struct lfs *fs;
	struct mount *mntp;
	fsid_t fsid;
	int error;
	unsigned long segnum;

	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
	    KAUTH_REQ_SYSTEM_LFS_SEGCLEAN, NULL, NULL, NULL);
	if (error)
		return (error);

	if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
		return (error);
	if ((mntp = vfs_getvfs(&fsid)) == NULL)
		return (ENOENT);

	fs = VFSTOULFS(mntp)->um_lfs;
	segnum = SCARG(uap, segment);

	if ((error = vfs_busy(mntp, NULL)) != 0)
		return (error);

	KERNEL_LOCK(1, NULL);
	lfs_seglock(fs, SEGM_PROT);
	error = lfs_do_segclean(fs, segnum);
	lfs_segunlock(fs);
	KERNEL_UNLOCK_ONE(NULL);
	vfs_unbusy(mntp, false, NULL);
	return error;
}
Example #24
0
/*
 * Finalise a completed autosense operation
 */
Static void
umass_scsipi_sense_cb(struct umass_softc *sc, void *priv, int residue,
		      int status)
{
	struct scsipi_xfer *xs = priv;
	int s;

	DPRINTF(UDMASS_CMD,("umass_scsipi_sense_cb: xs=%p residue=%d "
		"status=%d\n", xs, residue, status));

	sc->sc_sense = 0;
	switch (status) {
	case STATUS_CMD_OK:
	case STATUS_CMD_UNKNOWN:
		/* getting sense data succeeded */
		if (residue == 0 || residue == 14)/* XXX */
			xs->error = XS_SENSE;
		else
			xs->error = XS_SHORTSENSE;
		break;
	default:
		DPRINTF(UDMASS_SCSI, ("%s: Autosense failed, status %d\n",
			device_xname(sc->sc_dev), status));
		xs->error = XS_DRIVER_STUFFUP;
		break;
	}

	DPRINTF(UDMASS_CMD,("umass_scsipi_sense_cb: return xs->error=%d, "
		"xs->xs_status=0x%x xs->resid=%d\n", xs->error, xs->xs_status,
		xs->resid));

	s = splbio();
	KERNEL_LOCK(1, curlwp);
	scsipi_done(xs);
	KERNEL_UNLOCK_ONE(curlwp);
	splx(s);
}
Example #25
0
static int
emdtv_dtv_isoc_start(struct emdtv_softc *sc, struct emdtv_isoc_xfer *ix)
{
	int i;

	if (sc->sc_isoc_pipe == NULL)
		return EIO;

	for (i = 0; i < EMDTV_NFRAMES; i++)
		ix->ix_frlengths[i] = sc->sc_isoc_maxpacketsize;

	usbd_setup_isoc_xfer(ix->ix_xfer,
			     ix,
			     ix->ix_frlengths,
			     EMDTV_NFRAMES,
			     USBD_SHORT_XFER_OK,
			     emdtv_dtv_isoc);

	KERNEL_LOCK(1, curlwp);
	usbd_transfer(ix->ix_xfer);
	KERNEL_UNLOCK_ONE(curlwp);

	return 0;
}
Example #26
0
int
VOP_LISTEXTATTR(struct vnode *vp,
    int attrnamespace,
    struct uio *uio,
    size_t *size,
    int flag,
    kauth_cred_t cred)
{
	int error;
	bool mpsafe;
	struct vop_listextattr_args a;
	a.a_desc = VDESC(vop_listextattr);
	a.a_vp = vp;
	a.a_attrnamespace = attrnamespace;
	a.a_uio = uio;
	a.a_size = size;
	a.a_flag = flag;
	a.a_cred = cred;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_listextattr), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}
Example #27
0
int
VOP_READDIR(struct vnode *vp,
    struct uio *uio,
    kauth_cred_t cred,
    int *eofflag,
    off_t **cookies,
    int *ncookies)
{
	int error;
	bool mpsafe;
	struct vop_readdir_args a;
	a.a_desc = VDESC(vop_readdir);
	a.a_vp = vp;
	a.a_uio = uio;
	a.a_cred = cred;
	a.a_eofflag = eofflag;
	a.a_cookies = cookies;
	a.a_ncookies = ncookies;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_readdir), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}
Example #28
0
void
taskq_thread(void *xtq)
{
    sleepfn tqsleep = msleep;
    struct taskq *tq = xtq;
    struct task work;
    int last;

    if (ISSET(tq->tq_flags, TASKQ_MPSAFE))
        KERNEL_UNLOCK();

    if (ISSET(tq->tq_flags, TASKQ_CANTSLEEP)) {
        tqsleep = taskq_sleep;
        atomic_setbits_int(&curproc->p_flag, P_CANTSLEEP);
    }

    while (taskq_next_work(tq, &work, tqsleep)) {
        (*work.t_func)(work.t_arg);
        sched_pause();
    }

    mtx_enter(&tq->tq_mtx);
    last = (--tq->tq_running == 0);
    mtx_leave(&tq->tq_mtx);

    if (ISSET(tq->tq_flags, TASKQ_MPSAFE))
        KERNEL_LOCK();

    if (ISSET(tq->tq_flags, TASKQ_CANTSLEEP))
        atomic_clearbits_int(&curproc->p_flag, P_CANTSLEEP);

    if (last)
        wakeup_one(&tq->tq_running);

    kthread_exit(0);
}
Example #29
0
int
VOP_RENAME(struct vnode *fdvp,
    struct vnode *fvp,
    struct componentname *fcnp,
    struct vnode *tdvp,
    struct vnode *tvp,
    struct componentname *tcnp)
{
	int error;
	bool mpsafe;
	struct vop_rename_args a;
	a.a_desc = VDESC(vop_rename);
	a.a_fdvp = fdvp;
	a.a_fvp = fvp;
	a.a_fcnp = fcnp;
	a.a_tdvp = tdvp;
	a.a_tvp = tvp;
	a.a_tcnp = tcnp;
	mpsafe = (fdvp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(fdvp, VOFFSET(vop_rename), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}
Example #30
0
/*ARGSUSED*/
void
trap(struct trapframe *frame)
{
	struct proc *p = curproc;
	int type = (int)frame->tf_trapno;
	struct pcb *pcb;
	extern char doreti_iret[], resume_iret[];
	caddr_t onfault;
	int error;
	uint64_t cr2;
	union sigval sv;

	uvmexp.traps++;

	pcb = (p != NULL && p->p_addr != NULL) ? &p->p_addr->u_pcb : NULL;

#ifdef DEBUG
	if (trapdebug) {
		printf("trap %d code %lx rip %lx cs %lx rflags %lx cr2 %lx "
		       "cpl %x\n",
		    type, frame->tf_err, frame->tf_rip, frame->tf_cs,
		    frame->tf_rflags, rcr2(), curcpu()->ci_ilevel);
		printf("curproc %p\n", curproc);
		if (curproc)
			printf("pid %d\n", p->p_pid);
	}
#endif

	if (!KERNELMODE(frame->tf_cs, frame->tf_rflags)) {
		type |= T_USER;
		p->p_md.md_regs = frame;
	}

	switch (type) {

	default:
	we_re_toast:
#ifdef KGDB
		if (kgdb_trap(type, frame))
			return;
		else {
			/*
			 * If this is a breakpoint, don't panic
			 * if we're not connected.
			 */
			if (type == T_BPTFLT) {
				printf("kgdb: ignored %s\n", trap_type[type]);
				return;
			}
		}
#endif
#ifdef DDB
		if (kdb_trap(type, 0, frame))
			return;
#endif
		if (frame->tf_trapno < trap_types)
			printf("fatal %s", trap_type[frame->tf_trapno]);
		else
			printf("unknown trap %ld", (u_long)frame->tf_trapno);
		printf(" in %s mode\n", (type & T_USER) ? "user" : "supervisor");
		printf("trap type %d code %lx rip %lx cs %lx rflags %lx cr2 "
		       " %lx cpl %x rsp %lx\n",
		    type, frame->tf_err, (u_long)frame->tf_rip, frame->tf_cs,
		    frame->tf_rflags, rcr2(), curcpu()->ci_ilevel, frame->tf_rsp);

		panic("trap type %d, code=%lx, pc=%lx",
		    type, frame->tf_err, frame->tf_rip);
		/*NOTREACHED*/

	case T_PROTFLT:
	case T_SEGNPFLT:
	case T_ALIGNFLT:
	case T_TSSFLT:
		if (p == NULL)
			goto we_re_toast;
		/* Check for copyin/copyout fault. */
		if (pcb->pcb_onfault != 0) {
			error = EFAULT;
copyfault:
			frame->tf_rip = (u_int64_t)pcb->pcb_onfault;
			frame->tf_rax = error;
			return;
		}

		/*
		 * Check for failure during return to user mode.
		 * We do this by looking at the address of the
		 * instruction that faulted.
		 */
		if (frame->tf_rip == (u_int64_t)doreti_iret) {
			frame->tf_rip = (u_int64_t)resume_iret;
			return;
		}
		goto we_re_toast;

	case T_PROTFLT|T_USER:		/* protection fault */
	case T_TSSFLT|T_USER:
	case T_SEGNPFLT|T_USER:
	case T_STKFLT|T_USER:
	case T_NMI|T_USER:
#ifdef TRAP_SIGDEBUG
		printf("pid %d (%s): BUS at rip %lx addr %lx\n",
		    p->p_pid, p->p_comm, frame->tf_rip, rcr2());
		frame_dump(frame);
#endif
		sv.sival_ptr = (void *)frame->tf_rip;
		KERNEL_LOCK();
		trapsignal(p, SIGBUS, type & ~T_USER, BUS_OBJERR, sv);
		KERNEL_UNLOCK();
		goto out;
	case T_ALIGNFLT|T_USER:
		sv.sival_ptr = (void *)frame->tf_rip;
		KERNEL_LOCK();
		trapsignal(p, SIGBUS, type & ~T_USER, BUS_ADRALN, sv);
		KERNEL_UNLOCK();
		goto out;

	case T_PRIVINFLT|T_USER:	/* privileged instruction fault */
		sv.sival_ptr = (void *)frame->tf_rip;
		KERNEL_LOCK();
		trapsignal(p, SIGILL, type & ~T_USER, ILL_PRVOPC, sv);
		KERNEL_UNLOCK();
		goto out;
	case T_FPOPFLT|T_USER:		/* coprocessor operand fault */
#ifdef TRAP_SIGDEBUG
		printf("pid %d (%s): ILL at rip %lx addr %lx\n",
		    p->p_pid, p->p_comm, frame->tf_rip, rcr2());
		frame_dump(frame);
#endif
		sv.sival_ptr = (void *)frame->tf_rip;
		KERNEL_LOCK();
		trapsignal(p, SIGILL, type & ~T_USER, ILL_COPROC, sv);
		KERNEL_UNLOCK();
		goto out;

	case T_ASTFLT|T_USER:		/* Allow process switch */
		uvmexp.softs++;
		if (p->p_flag & P_OWEUPC) {
			KERNEL_LOCK();
			ADDUPROF(p);
			KERNEL_UNLOCK();
		}
		/* Allow a forced task switch. */
		if (curcpu()->ci_want_resched)
			preempt(NULL);
		goto out;

	case T_BOUND|T_USER:
		sv.sival_ptr = (void *)frame->tf_rip;
		KERNEL_LOCK();
		trapsignal(p, SIGFPE, type &~ T_USER, FPE_FLTSUB, sv);
		KERNEL_UNLOCK();
		goto out;
	case T_OFLOW|T_USER:
		sv.sival_ptr = (void *)frame->tf_rip;
		KERNEL_LOCK();
		trapsignal(p, SIGFPE, type &~ T_USER, FPE_INTOVF, sv);
		KERNEL_UNLOCK();
		goto out;
	case T_DIVIDE|T_USER:
		sv.sival_ptr = (void *)frame->tf_rip;
		KERNEL_LOCK();
		trapsignal(p, SIGFPE, type &~ T_USER, FPE_INTDIV, sv);
		KERNEL_UNLOCK();
		goto out;

	case T_ARITHTRAP|T_USER:
	case T_XMM|T_USER:
		fputrap(frame);
		goto out;

	case T_PAGEFLT:			/* allow page faults in kernel mode */
		if (p == NULL)
			goto we_re_toast;
		cr2 = rcr2();
		KERNEL_LOCK();
		goto faultcommon;

	case T_PAGEFLT|T_USER: {	/* page fault */
		vaddr_t va, fa;
		struct vmspace *vm;
		struct vm_map *map;
		vm_prot_t ftype;
		extern struct vm_map *kernel_map;

		cr2 = rcr2();
		KERNEL_LOCK();
faultcommon:
		vm = p->p_vmspace;
		if (vm == NULL)
			goto we_re_toast;
		fa = cr2;
		va = trunc_page((vaddr_t)cr2);
		/*
		 * It is only a kernel address space fault iff:
		 *	1. (type & T_USER) == 0  and
		 *	2. pcb_onfault not set or
		 *	3. pcb_onfault set but supervisor space fault
		 * The last can occur during an exec() copyin where the
		 * argument space is lazy-allocated.
		 */
		if (type == T_PAGEFLT && va >= VM_MIN_KERNEL_ADDRESS)
			map = kernel_map;
		else
			map = &vm->vm_map;
		if (frame->tf_err & PGEX_W)
			ftype = VM_PROT_WRITE;
		else if (frame->tf_err & PGEX_I)
			ftype = VM_PROT_EXECUTE;
		else
			ftype = VM_PROT_READ;

#ifdef DIAGNOSTIC
		if (map == kernel_map && va == 0) {
			printf("trap: bad kernel access at %lx\n", va);
			goto we_re_toast;
		}
#endif

		/* Fault the original page in. */
		onfault = pcb->pcb_onfault;
		pcb->pcb_onfault = NULL;
		error = uvm_fault(map, va, frame->tf_err & PGEX_P?
		    VM_FAULT_PROTECT : VM_FAULT_INVALID, ftype);
		pcb->pcb_onfault = onfault;
		if (error == 0) {
			if (map != kernel_map)
				uvm_grow(p, va);

			if (type == T_PAGEFLT) {
				KERNEL_UNLOCK();
				return;
			}
			KERNEL_UNLOCK();
			goto out;
		}
		if (error == EACCES) {
			error = EFAULT;
		}

		if (type == T_PAGEFLT) {
			if (pcb->pcb_onfault != 0) {
				KERNEL_UNLOCK();
				goto copyfault;
			}
			printf("uvm_fault(%p, 0x%lx, 0, %d) -> %x\n",
			    map, va, ftype, error);
			goto we_re_toast;
		}
		if (error == ENOMEM) {
			printf("UVM: pid %d (%s), uid %d killed: out of swap\n",
			       p->p_pid, p->p_comm,
			       p->p_cred && p->p_ucred ?
			       (int)p->p_ucred->cr_uid : -1);
			sv.sival_ptr = (void *)fa;
			trapsignal(p, SIGKILL, T_PAGEFLT, SEGV_MAPERR, sv);
		} else {
#ifdef TRAP_SIGDEBUG
			printf("pid %d (%s): SEGV at rip %lx addr %lx\n",
			    p->p_pid, p->p_comm, frame->tf_rip, va);
			frame_dump(frame);
#endif
			sv.sival_ptr = (void *)fa;
			trapsignal(p, SIGSEGV, T_PAGEFLT, SEGV_MAPERR, sv);
		}
		KERNEL_UNLOCK();
		break;
	}

	case T_TRCTRAP:
		goto we_re_toast;

	case T_BPTFLT|T_USER:		/* bpt instruction fault */
	case T_TRCTRAP|T_USER:		/* trace trap */
#ifdef MATH_EMULATE
	trace:
#endif
		KERNEL_LOCK();
		trapsignal(p, SIGTRAP, type &~ T_USER, TRAP_BRKPT, sv);
		KERNEL_UNLOCK();
		break;

#if	NISA > 0
	case T_NMI:
#if defined(KGDB) || defined(DDB)
		/* NMI can be hooked up to a pushbutton for debugging */
		printf ("NMI ... going to debugger\n");
#ifdef KGDB

		if (kgdb_trap(type, frame))
			return;
#endif
#ifdef DDB
		if (kdb_trap(type, 0, frame))
			return;
#endif
#endif /* KGDB || DDB */
		/* machine/parity/power fail/"kitchen sink" faults */

		if (x86_nmi() != 0)
			goto we_re_toast;
		else
			return;
#endif /* NISA > 0 */
	}

	if ((type & T_USER) == 0)
		return;
out:
	userret(p);
}