Beispiel #1
0
/**
 * mrsas_passthru:        Handle pass-through commands 
 * input:                 Adapter instance soft state
 *                        argument pointer
 *
 * This function is called from mrsas_ioctl() to handle pass-through and 
 * ioctl commands to Firmware.  
 */
int mrsas_passthru( struct mrsas_softc *sc, void *arg )
{
    struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg;
    union  mrsas_frame *in_cmd = (union mrsas_frame *) &(user_ioc->frame.raw);
    struct mrsas_mfi_cmd *cmd = NULL;
    bus_dma_tag_t ioctl_data_tag[MAX_IOCTL_SGE];
    bus_dmamap_t ioctl_data_dmamap[MAX_IOCTL_SGE];
    void *ioctl_data_mem[MAX_IOCTL_SGE];  // ioctl data virtual addr
    bus_addr_t ioctl_data_phys_addr[MAX_IOCTL_SGE]; // ioctl data phys addr
    bus_dma_tag_t ioctl_sense_tag = 0;
    bus_dmamap_t ioctl_sense_dmamap = 0;
    void *ioctl_sense_mem = 0;  
    bus_addr_t ioctl_sense_phys_addr = 0; 
    int i, adapter, ioctl_data_size, ioctl_sense_size, ret=0;
    struct mrsas_sge32 *kern_sge32;
    unsigned long *sense_ptr;

    /* For debug - uncomment the following line for debug output */
    //mrsas_dump_ioctl(sc, user_ioc); 

    /* 
     * Check for NOP from MegaCli... MegaCli can issue a DCMD of 0.  In this 
     * case do nothing and return 0 to it as status.
     */
    if (in_cmd->dcmd.opcode == 0) {
        device_printf(sc->mrsas_dev, "In %s() Got a NOP\n", __func__);
        user_ioc->frame.hdr.cmd_status = MFI_STAT_OK;
        return (0);
    }

    /* Validate host_no */
    adapter = user_ioc->host_no;
    if (adapter != device_get_unit(sc->mrsas_dev)) {
        device_printf(sc->mrsas_dev, "In %s() IOCTL not for me!\n", __func__);
        return(ENOENT);
    }

    /* Validate SGL length */
    if (user_ioc->sge_count > MAX_IOCTL_SGE) { 
        device_printf(sc->mrsas_dev, "In %s() SGL is too long (%d > 8).\n", 
            __func__, user_ioc->sge_count);
        return(ENOENT);
    }

    /* Get a command */
    cmd = mrsas_get_mfi_cmd(sc);
    if (!cmd) {
        device_printf(sc->mrsas_dev, "Failed to get a free cmd for IOCTL\n");
        return(ENOMEM);
    }

    /*
     * User's IOCTL packet has 2 frames (maximum). Copy those two
     * frames into our cmd's frames. cmd->frame's context will get
     * overwritten when we copy from user's frames. So set that value
     * alone separately
     */
    memcpy(cmd->frame, user_ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
    cmd->frame->hdr.context = cmd->index;
    cmd->frame->hdr.pad_0 = 0;
    cmd->frame->hdr.flags &= ~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 |
                               MFI_FRAME_SENSE64);

    /*
     * The management interface between applications and the fw uses
     * MFI frames. E.g, RAID configuration changes, LD property changes
     * etc are accomplishes through different kinds of MFI frames. The
     * driver needs to care only about substituting user buffers with
     * kernel buffers in SGLs. The location of SGL is embedded in the
     * struct iocpacket itself.
     */
    kern_sge32 = (struct mrsas_sge32 *)
        ((unsigned long)cmd->frame + user_ioc->sgl_off);

    /*
     * For each user buffer, create a mirror buffer and copy in
     */
    for (i=0; i < user_ioc->sge_count; i++) {
        if (!user_ioc->sgl[i].iov_len)
            continue;
        ioctl_data_size = user_ioc->sgl[i].iov_len;
        if (bus_dma_tag_create( sc->mrsas_parent_tag,   // parent
                                1, 0,                   // algnmnt, boundary
                                BUS_SPACE_MAXADDR_32BIT,// lowaddr
                                BUS_SPACE_MAXADDR,      // highaddr
                                NULL, NULL,             // filter, filterarg
                                ioctl_data_size,        // maxsize
                                1,                      // msegments
                                ioctl_data_size,        // maxsegsize
                                BUS_DMA_ALLOCNOW,       // flags
                                NULL, NULL,             // lockfunc, lockarg
                                &ioctl_data_tag[i])) {
            device_printf(sc->mrsas_dev, "Cannot allocate ioctl data tag\n");
            return (ENOMEM);
        }
        if (bus_dmamem_alloc(ioctl_data_tag[i], (void **)&ioctl_data_mem[i],
                (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &ioctl_data_dmamap[i])) {
            device_printf(sc->mrsas_dev, "Cannot allocate ioctl data mem\n");
            return (ENOMEM);
        }
        if (bus_dmamap_load(ioctl_data_tag[i], ioctl_data_dmamap[i],
                ioctl_data_mem[i], ioctl_data_size, mrsas_alloc_cb,
                &ioctl_data_phys_addr[i], BUS_DMA_NOWAIT)) {
            device_printf(sc->mrsas_dev, "Cannot load ioctl data mem\n");
            return (ENOMEM);
        }

        /* Save the physical address and length */
        kern_sge32[i].phys_addr = (u_int32_t)ioctl_data_phys_addr[i];
        kern_sge32[i].length = user_ioc->sgl[i].iov_len;

        /* Copy in data from user space */
        ret = copyin(user_ioc->sgl[i].iov_base, ioctl_data_mem[i], 
                        user_ioc->sgl[i].iov_len);
        if (ret) {
            device_printf(sc->mrsas_dev, "IOCTL copyin failed!\n");
            goto out;
        }
    }

    ioctl_sense_size = user_ioc->sense_len;
    if (user_ioc->sense_len) {
        if (bus_dma_tag_create( sc->mrsas_parent_tag,   // parent
                                1, 0,                   // algnmnt, boundary
                                BUS_SPACE_MAXADDR_32BIT,// lowaddr
                                BUS_SPACE_MAXADDR,      // highaddr
                                NULL, NULL,             // filter, filterarg
                                ioctl_sense_size,       // maxsize
                                1,                      // msegments
                                ioctl_sense_size,       // maxsegsize
                                BUS_DMA_ALLOCNOW,       // flags
                                NULL, NULL,             // lockfunc, lockarg
                                &ioctl_sense_tag)) {
            device_printf(sc->mrsas_dev, "Cannot allocate ioctl sense tag\n");
            return (ENOMEM);
        }
        if (bus_dmamem_alloc(ioctl_sense_tag, (void **)&ioctl_sense_mem,
                (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &ioctl_sense_dmamap)) {
            device_printf(sc->mrsas_dev, "Cannot allocate ioctl data mem\n");
            return (ENOMEM);
        }
        if (bus_dmamap_load(ioctl_sense_tag, ioctl_sense_dmamap,
                ioctl_sense_mem, ioctl_sense_size, mrsas_alloc_cb,
                &ioctl_sense_phys_addr, BUS_DMA_NOWAIT)) {
            device_printf(sc->mrsas_dev, "Cannot load ioctl sense mem\n");
            return (ENOMEM);
        }
        sense_ptr = 
            (unsigned long *)((unsigned long)cmd->frame + user_ioc->sense_off);
            sense_ptr = ioctl_sense_mem;
    }

    /*
     * Set the sync_cmd flag so that the ISR knows not to complete this
     * cmd to the SCSI mid-layer
     */
    cmd->sync_cmd = 1;
    mrsas_issue_blocked_cmd(sc, cmd);
    cmd->sync_cmd = 0;

    /*
     * copy out the kernel buffers to user buffers
     */
    for (i = 0; i < user_ioc->sge_count; i++) {
        ret = copyout(ioctl_data_mem[i], user_ioc->sgl[i].iov_base, 
            user_ioc->sgl[i].iov_len);
        if (ret) {
            device_printf(sc->mrsas_dev, "IOCTL copyout failed!\n");
            goto out;
        }
    }

    /*
     * copy out the sense
     */
    if (user_ioc->sense_len) {
        /*
         * sense_buff points to the location that has the user
         * sense buffer address
         */
        sense_ptr = (unsigned long *) ((unsigned long)user_ioc->frame.raw +
                      user_ioc->sense_off);
        ret = copyout(ioctl_sense_mem, (unsigned long*)*sense_ptr, 
                      user_ioc->sense_len); 
        if (ret) {
            device_printf(sc->mrsas_dev, "IOCTL sense copyout failed!\n");
            goto out;
        }
    }

    /*
     * Return command status to user space
     */
    memcpy(&user_ioc->frame.hdr.cmd_status, &cmd->frame->hdr.cmd_status, 
            sizeof(u_int8_t));

out:
    /* 
     * Release sense buffer 
     */
    if (ioctl_sense_phys_addr)
        bus_dmamap_unload(ioctl_sense_tag, ioctl_sense_dmamap);
    if (ioctl_sense_mem)
        bus_dmamem_free(ioctl_sense_tag, ioctl_sense_mem, ioctl_sense_dmamap);
    if (ioctl_sense_tag)
        bus_dma_tag_destroy(ioctl_sense_tag);

    /* 
     * Release data buffers 
     */
    for (i = 0; i < user_ioc->sge_count; i++) {
        if (!user_ioc->sgl[i].iov_len)
            continue;
        if (ioctl_data_phys_addr[i])
            bus_dmamap_unload(ioctl_data_tag[i], ioctl_data_dmamap[i]);
        if (ioctl_data_mem[i] != NULL)
            bus_dmamem_free(ioctl_data_tag[i], ioctl_data_mem[i], 
                ioctl_data_dmamap[i]);
        if (ioctl_data_tag[i] != NULL)
            bus_dma_tag_destroy(ioctl_data_tag[i]);
    }

    /* Free command */
    mrsas_release_mfi_cmd(cmd);

    return(ret);
}
Beispiel #2
0
static int
freebsd32_ioctl_sg(struct thread *td,
    struct freebsd32_ioctl_args *uap, struct file *fp)
{
	struct sg_io_hdr io;
	struct sg_io_hdr32 io32;
	int error;

	if ((error = copyin(uap->data, &io32, sizeof(io32))) != 0)
		return (error);

	CP(io32, io, interface_id);
	CP(io32, io, dxfer_direction);
	CP(io32, io, cmd_len);
	CP(io32, io, mx_sb_len);
	CP(io32, io, iovec_count);
	CP(io32, io, dxfer_len);
	PTRIN_CP(io32, io, dxferp);
	PTRIN_CP(io32, io, cmdp);
	PTRIN_CP(io32, io, sbp);
	CP(io32, io, timeout);
	CP(io32, io, flags);
	CP(io32, io, pack_id);
	PTRIN_CP(io32, io, usr_ptr);
	CP(io32, io, status);
	CP(io32, io, masked_status);
	CP(io32, io, msg_status);
	CP(io32, io, sb_len_wr);
	CP(io32, io, host_status);
	CP(io32, io, driver_status);
	CP(io32, io, resid);
	CP(io32, io, duration);
	CP(io32, io, info);

	if ((error = fo_ioctl(fp, SG_IO, (caddr_t)&io, td->td_ucred, td)) != 0)
		return (error);

	CP(io, io32, interface_id);
	CP(io, io32, dxfer_direction);
	CP(io, io32, cmd_len);
	CP(io, io32, mx_sb_len);
	CP(io, io32, iovec_count);
	CP(io, io32, dxfer_len);
	PTROUT_CP(io, io32, dxferp);
	PTROUT_CP(io, io32, cmdp);
	PTROUT_CP(io, io32, sbp);
	CP(io, io32, timeout);
	CP(io, io32, flags);
	CP(io, io32, pack_id);
	PTROUT_CP(io, io32, usr_ptr);
	CP(io, io32, status);
	CP(io, io32, masked_status);
	CP(io, io32, msg_status);
	CP(io, io32, sb_len_wr);
	CP(io, io32, host_status);
	CP(io, io32, driver_status);
	CP(io, io32, resid);
	CP(io, io32, duration);
	CP(io, io32, info);

	error = copyout(&io32, uap->data, sizeof(io32));

	return (error);
}
Beispiel #3
0
/*
 * mp - path - addr in user space of mount point (ie /usr or whatever)
 * data - addr in user space of mount params including the name of the block
 * special file to treat as a filesystem.
 */
static int
msdosfs_mount(struct mount *mp, char *path, caddr_t data, struct ucred *cred)
{
	struct vnode *devvp;	  /* vnode for blk device to mount */
	struct msdosfs_args args; /* will hold data from mount request */
	/* msdosfs specific mount control block */
	struct msdosfsmount *pmp = NULL;
	size_t size;
	int error, flags;
	mode_t accessmode;
	struct nlookupdata nd;

	error = copyin(data, (caddr_t)&args, sizeof(struct msdosfs_args));
	if (error)
		return (error);
	if (args.magic != MSDOSFS_ARGSMAGIC)
		args.flags = 0;
	/*
	 * If updating, check whether changing from read-only to
	 * read/write; if there is no device name, that's all we do.
	 */
	if (mp->mnt_flag & MNT_UPDATE) {
		pmp = VFSTOMSDOSFS(mp);
		error = 0;
		if (!(pmp->pm_flags & MSDOSFSMNT_RONLY) && (mp->mnt_flag & MNT_RDONLY)) {
			flags = WRITECLOSE;
			if (mp->mnt_flag & MNT_FORCE)
				flags |= FORCECLOSE;
			error = vflush(mp, 0, flags);
			if (error == 0) {
				devvp = pmp->pm_devvp;
				vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
				VOP_OPEN(devvp, FREAD, FSCRED, NULL);
				VOP_CLOSE(devvp, FREAD|FWRITE);
				vn_unlock(devvp);
				pmp->pm_flags |= MSDOSFSMNT_RONLY;
			}
		}
		if (!error && (mp->mnt_flag & MNT_RELOAD))
			/* not yet implemented */
			error = EOPNOTSUPP;
		if (error)
			return (error);
		if ((pmp->pm_flags & MSDOSFSMNT_RONLY) && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
			/*
			 * If upgrade to read-write by non-root, then verify
			 * that user has necessary permissions on the device.
			 */
			devvp = pmp->pm_devvp;
			vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
			if (cred->cr_uid != 0) {
				error = VOP_EACCESS(devvp, VREAD | VWRITE, cred);
				if (error) {
					vn_unlock(devvp);
					return (error);
				}
			}
			VOP_OPEN(devvp, FREAD|FWRITE, FSCRED, NULL);
			VOP_CLOSE(devvp, FREAD);
			vn_unlock(devvp);
			pmp->pm_flags &= ~MSDOSFSMNT_RONLY;
		}
		if (args.fspec == NULL) {
#ifdef	__notyet__		/* doesn't work correctly with current mountd	XXX */
			if (args.flags & MSDOSFSMNT_MNTOPT) {
				pmp->pm_flags &= ~MSDOSFSMNT_MNTOPT;
				pmp->pm_flags |= args.flags & MSDOSFSMNT_MNTOPT;
				if (pmp->pm_flags & MSDOSFSMNT_NOWIN95)
					pmp->pm_flags |= MSDOSFSMNT_SHORTNAME;
			}
#endif
			/*
			 * Process export requests.
			 */
			return (vfs_export(mp, &pmp->pm_export, &args.export));
		}
	}
int
svr4_sys_fcntl(struct lwp *l, const struct svr4_sys_fcntl_args *uap, register_t *retval)
{
	struct sys_fcntl_args	fa;
	register_t		flags;
	struct svr4_flock64	ifl64;
	struct svr4_flock	ifl;
	struct flock		fl;
	int error;
	int cmd;

	SCARG(&fa, fd) = SCARG(uap, fd);
	SCARG(&fa, arg) = SCARG(uap, arg);

	switch (SCARG(uap, cmd)) {
	case SVR4_F_DUPFD:
		cmd = F_DUPFD;
		break;
	case SVR4_F_GETFD:
		cmd = F_GETFD;
		break;
	case SVR4_F_SETFD:
		cmd = F_SETFD;
		break;

	case SVR4_F_GETFL:
		cmd = F_GETFL;
		break;

	case SVR4_F_SETFL:
		/*
		 * we must save the O_ASYNC flag, as that is
		 * handled by ioctl(_, I_SETSIG, _) emulation.
		 */
		SCARG(&fa, cmd) = F_GETFL;
		if ((error = sys_fcntl(l, &fa, &flags)) != 0)
			return error;
		flags &= O_ASYNC;
		flags |= svr4_to_bsd_flags((u_long) SCARG(uap, arg));
		cmd = F_SETFL;
		SCARG(&fa, arg) = (void *) flags;
		break;

	case SVR4_F_GETLK:
		cmd = F_GETLK;
		goto lock32;
	case SVR4_F_SETLK:
		cmd = F_SETLK;
		goto lock32;
	case SVR4_F_SETLKW:
		cmd = F_SETLKW;
	    lock32:
		error = copyin(SCARG(uap, arg), &ifl, sizeof ifl);
		if (error)
			return error;
		svr4_to_bsd_flock(&ifl, &fl);

		error = do_fcntl_lock(SCARG(uap, fd), cmd, &fl);
		if (cmd != F_GETLK || error != 0)
			return error;

		bsd_to_svr4_flock(&fl, &ifl);
		return copyout(&ifl, SCARG(uap, arg), sizeof ifl);

	case SVR4_F_DUP2FD:
		{
			struct sys_dup2_args du;

			SCARG(&du, from) = SCARG(uap, fd);
			SCARG(&du, to) = (int)(u_long)SCARG(uap, arg);
			error = sys_dup2(l, &du, retval);
			if (error)
				return error;
			*retval = SCARG(&du, to);
			return 0;
		}

	case SVR4_F_FREESP:
		error = copyin(SCARG(uap, arg), &ifl, sizeof ifl);
		if (error)
			return error;
		svr4_to_bsd_flock(&ifl, &fl);
		return fd_truncate(l, SCARG(uap, fd), &fl, retval);

	case SVR4_F_GETLK64:
		cmd = F_GETLK;
		goto lock64;
	case SVR4_F_SETLK64:
		cmd = F_SETLK;
		goto lock64;
	case SVR4_F_SETLKW64:
		cmd = F_SETLKW;
	    lock64:
		error = copyin(SCARG(uap, arg), &ifl64, sizeof ifl64);
		if (error)
			return error;
		svr4_to_bsd_flock64(&ifl64, &fl);

		error = do_fcntl_lock(SCARG(uap, fd), cmd, &fl);
		if (cmd != F_GETLK || error != 0)
			return error;

		bsd_to_svr4_flock64(&fl, &ifl64);
		return copyout(&ifl64, SCARG(uap, arg), sizeof ifl64);

	case SVR4_F_FREESP64:
		error = copyin(SCARG(uap, arg), &ifl64, sizeof ifl64);
		if (error)
			return error;
		svr4_to_bsd_flock64(&ifl64, &fl);
		return fd_truncate(l, SCARG(uap, fd), &fl, retval);

	case SVR4_F_REVOKE:
		return fd_revoke(l, SCARG(uap, fd), retval);

	default:
		return ENOSYS;
	}

	SCARG(&fa, cmd) = cmd;

	error = sys_fcntl(l, &fa, retval);
	if (error != 0)
		return error;

	switch (SCARG(uap, cmd)) {

	case SVR4_F_GETFL:
		*retval = bsd_to_svr4_flags(*retval);
		break;
	}

	return 0;
}
Beispiel #5
0
int
sti_ioctl(void *v, u_long cmd, caddr_t data, int flag, struct proc *p)
{
	struct sti_screen *scr = (struct sti_screen *)v;
	struct sti_rom *rom = scr->scr_rom;
	struct wsdisplay_fbinfo *wdf;
	struct wsdisplay_cmap *cmapp;
	u_int mode, idx, count;
	int i, ret;

	ret = 0;
	switch (cmd) {
	case WSDISPLAYIO_SMODE:
		mode = *(u_int *)data;
		if (scr->scr_wsmode == WSDISPLAYIO_MODE_EMUL &&
		    mode == WSDISPLAYIO_MODE_DUMBFB)
			ret = sti_init(scr, 0);
		else if (scr->scr_wsmode == WSDISPLAYIO_MODE_DUMBFB &&
		    mode == WSDISPLAYIO_MODE_EMUL)
			ret = sti_init(scr, STI_TEXTMODE);
		scr->scr_wsmode = mode;
		break;

	case WSDISPLAYIO_GTYPE:
		*(u_int *)data = WSDISPLAY_TYPE_STI;
		break;

	case WSDISPLAYIO_GINFO:
		wdf = (struct wsdisplay_fbinfo *)data;
		wdf->height = scr->scr_cfg.scr_height;
		wdf->width  = scr->scr_cfg.scr_width;
		wdf->depth  = scr->scr_bpp;
		if (rom->scment == NULL)
			wdf->cmsize = 0;
		else
			wdf->cmsize = STI_NCMAP;
		break;

	case WSDISPLAYIO_LINEBYTES:
		*(u_int *)data = scr->scr_cfg.fb_width;
		break;

	case WSDISPLAYIO_GETCMAP:
		if (rom->scment == NULL)
			return ENODEV;
		cmapp = (struct wsdisplay_cmap *)data;
		idx = cmapp->index;
		count = cmapp->count;
		if (idx >= STI_NCMAP || idx + count > STI_NCMAP)
			return EINVAL;
		if ((ret = copyout(&scr->scr_rcmap[idx], cmapp->red, count)))
			break;
		if ((ret = copyout(&scr->scr_gcmap[idx], cmapp->green, count)))
			break;
		if ((ret = copyout(&scr->scr_bcmap[idx], cmapp->blue, count)))
			break;
		break;

	case WSDISPLAYIO_PUTCMAP:
		if (rom->scment == NULL)
			return ENODEV;
		cmapp = (struct wsdisplay_cmap *)data;
		idx = cmapp->index;
		count = cmapp->count;
		if (idx >= STI_NCMAP || idx + count > STI_NCMAP)
			return EINVAL;
		if ((ret = copyin(cmapp->red, &scr->scr_rcmap[idx], count)))
			break;
		if ((ret = copyin(cmapp->green, &scr->scr_gcmap[idx], count)))
			break;
		if ((ret = copyin(cmapp->blue, &scr->scr_bcmap[idx], count)))
			break;
		for (i = idx + count - 1; i >= idx; i--)
			if ((ret = sti_setcment(scr, i, scr->scr_rcmap[i],
			    scr->scr_gcmap[i], scr->scr_bcmap[i]))) {
#ifdef STIDEBUG
				printf("sti_ioctl: "
				    "sti_setcment(%d, %u, %u, %u): %d\n", i,
				    (u_int)scr->scr_rcmap[i],
				    (u_int)scr->scr_gcmap[i],
				    (u_int)scr->scr_bcmap[i], ret);
#endif
				ret = EINVAL;
				break;
			}
		break;

	case WSDISPLAYIO_SVIDEO:
	case WSDISPLAYIO_GVIDEO:
		break;

	default:
		return (-1);		/* not supported yet */
	}

	return (ret);
}
Beispiel #6
0
/*
 * Handle an exception.
 * In the case of a kernel trap, we return the pc where to resume if
 * pcb_onfault is set, otherwise, return old pc.
 */
void
trap(struct trap_frame *trapframe)
{
	struct cpu_info *ci = curcpu();
	int type, i;
	unsigned ucode = 0;
	struct proc *p = ci->ci_curproc;
	vm_prot_t ftype;
	extern vaddr_t onfault_table[];
	int onfault;
	int typ = 0;
	union sigval sv;

	trapdebug_enter(ci, trapframe, -1);

	type = (trapframe->cause & CR_EXC_CODE) >> CR_EXC_CODE_SHIFT;
	if (USERMODE(trapframe->sr)) {
		type |= T_USER;
	}

	/*
	 * Enable hardware interrupts if they were on before the trap;
	 * enable IPI interrupts only otherwise.
	 */
	if (type != T_BREAK) {
		if (ISSET(trapframe->sr, SR_INT_ENAB))
			enableintr();
		else {
#ifdef MULTIPROCESSOR
			ENABLEIPI();
#endif
		}
	}

	switch (type) {
	case T_TLB_MOD:
		/* check for kernel address */
		if (trapframe->badvaddr < 0) {
			pt_entry_t *pte, entry;
			paddr_t pa;
			vm_page_t pg;

			pte = kvtopte(trapframe->badvaddr);
			entry = *pte;
#ifdef DIAGNOSTIC
			if (!(entry & PG_V) || (entry & PG_M))
				panic("trap: ktlbmod: invalid pte");
#endif
			if (pmap_is_page_ro(pmap_kernel(),
			    trunc_page(trapframe->badvaddr), entry)) {
				/* write to read only page in the kernel */
				ftype = VM_PROT_WRITE;
				goto kernel_fault;
			}
			entry |= PG_M;
			*pte = entry;
			KERNEL_LOCK();
			pmap_update_kernel_page(trapframe->badvaddr & ~PGOFSET,
			    entry);
			pa = pfn_to_pad(entry);
			pg = PHYS_TO_VM_PAGE(pa);
			if (pg == NULL)
				panic("trap: ktlbmod: unmanaged page");
			pmap_set_modify(pg);
			KERNEL_UNLOCK();
			return;
		}
		/* FALLTHROUGH */

	case T_TLB_MOD+T_USER:
	    {
		pt_entry_t *pte, entry;
		paddr_t pa;
		vm_page_t pg;
		pmap_t pmap = p->p_vmspace->vm_map.pmap;

		if (!(pte = pmap_segmap(pmap, trapframe->badvaddr)))
			panic("trap: utlbmod: invalid segmap");
		pte += uvtopte(trapframe->badvaddr);
		entry = *pte;
#ifdef DIAGNOSTIC
		if (!(entry & PG_V) || (entry & PG_M))
			panic("trap: utlbmod: invalid pte");
#endif
		if (pmap_is_page_ro(pmap,
		    trunc_page(trapframe->badvaddr), entry)) {
			/* write to read only page */
			ftype = VM_PROT_WRITE;
			goto fault_common;
		}
		entry |= PG_M;
		*pte = entry;
		KERNEL_LOCK();
		pmap_update_user_page(pmap, (trapframe->badvaddr & ~PGOFSET), 
		    entry);
		pa = pfn_to_pad(entry);
		pg = PHYS_TO_VM_PAGE(pa);
		if (pg == NULL)
			panic("trap: utlbmod: unmanaged page");
		pmap_set_modify(pg);
		KERNEL_UNLOCK();
		if (!USERMODE(trapframe->sr))
			return;
		goto out;
	    }

	case T_TLB_LD_MISS:
	case T_TLB_ST_MISS:
		ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ;
		/* check for kernel address */
		if (trapframe->badvaddr < 0) {
			vaddr_t va;
			int rv;

	kernel_fault:
			va = trunc_page((vaddr_t)trapframe->badvaddr);
			onfault = p->p_addr->u_pcb.pcb_onfault;
			p->p_addr->u_pcb.pcb_onfault = 0;
			KERNEL_LOCK();
			rv = uvm_fault(kernel_map, trunc_page(va), 0, ftype);
			KERNEL_UNLOCK();
			p->p_addr->u_pcb.pcb_onfault = onfault;
			if (rv == 0)
				return;
			if (onfault != 0) {
				p->p_addr->u_pcb.pcb_onfault = 0;
				trapframe->pc = onfault_table[onfault];
				return;
			}
			goto err;
		}
		/*
		 * It is an error for the kernel to access user space except
		 * through the copyin/copyout routines.
		 */
		if (p->p_addr->u_pcb.pcb_onfault != 0) {
			/*
			 * We want to resolve the TLB fault before invoking
			 * pcb_onfault if necessary.
			 */
			goto fault_common;
		} else {
			goto err;
		}

	case T_TLB_LD_MISS+T_USER:
		ftype = VM_PROT_READ;
		goto fault_common;

	case T_TLB_ST_MISS+T_USER:
		ftype = VM_PROT_WRITE;
fault_common:
	    {
		vaddr_t va;
		struct vmspace *vm;
		vm_map_t map;
		int rv;

		vm = p->p_vmspace;
		map = &vm->vm_map;
		va = trunc_page((vaddr_t)trapframe->badvaddr);

		onfault = p->p_addr->u_pcb.pcb_onfault;
		p->p_addr->u_pcb.pcb_onfault = 0;
		KERNEL_LOCK();

		rv = uvm_fault(map, trunc_page(va), 0, ftype);
		p->p_addr->u_pcb.pcb_onfault = onfault;

		/*
		 * If this was a stack access we keep track of the maximum
		 * accessed stack size.  Also, if vm_fault gets a protection
		 * failure it is due to accessing the stack region outside
		 * the current limit and we need to reflect that as an access
		 * error.
		 */
		if ((caddr_t)va >= vm->vm_maxsaddr) {
			if (rv == 0)
				uvm_grow(p, va);
			else if (rv == EACCES)
				rv = EFAULT;
		}
		KERNEL_UNLOCK();
		if (rv == 0) {
			if (!USERMODE(trapframe->sr))
				return;
			goto out;
		}
		if (!USERMODE(trapframe->sr)) {
			if (onfault != 0) {
				p->p_addr->u_pcb.pcb_onfault = 0;
				trapframe->pc =  onfault_table[onfault];
				return;
			}
			goto err;
		}

#ifdef ADEBUG
printf("SIG-SEGV @%p pc %p, ra %p\n", trapframe->badvaddr, trapframe->pc, trapframe->ra);
#endif
		ucode = ftype;
		i = SIGSEGV;
		typ = SEGV_MAPERR;
		break;
	    }

	case T_ADDR_ERR_LD+T_USER:	/* misaligned or kseg access */
	case T_ADDR_ERR_ST+T_USER:	/* misaligned or kseg access */
		ucode = 0;		/* XXX should be VM_PROT_something */
		i = SIGBUS;
		typ = BUS_ADRALN;
#ifdef ADEBUG
printf("SIG-BUSA @%p pc %p, ra %p\n", trapframe->badvaddr, trapframe->pc, trapframe->ra);
#endif
		break;
	case T_BUS_ERR_IFETCH+T_USER:	/* BERR asserted to cpu */
	case T_BUS_ERR_LD_ST+T_USER:	/* BERR asserted to cpu */
		ucode = 0;		/* XXX should be VM_PROT_something */
		i = SIGBUS;
		typ = BUS_OBJERR;
#ifdef ADEBUG
printf("SIG-BUSB @%p pc %p, ra %p\n", trapframe->badvaddr, trapframe->pc, trapframe->ra);
#endif
		break;

	case T_SYSCALL+T_USER:
	    {
		struct trap_frame *locr0 = p->p_md.md_regs;
		struct sysent *callp;
		unsigned int code;
		unsigned long tpc;
		int numsys;
		struct args {
			register_t i[8];
		} args;
		register_t rval[2];

		uvmexp.syscalls++;

		/* compute next PC after syscall instruction */
		tpc = trapframe->pc; /* Remember if restart */
		if (trapframe->cause & CR_BR_DELAY)
			locr0->pc = MipsEmulateBranch(locr0,
			    trapframe->pc, 0, 0);
		else
			locr0->pc += 4;
		callp = p->p_emul->e_sysent;
		numsys = p->p_emul->e_nsysent;
		code = locr0->v0;
		switch (code) {
		case SYS_syscall:
			/*
			 * Code is first argument, followed by actual args.
			 */
			code = locr0->a0;
			if (code >= numsys)
				callp += p->p_emul->e_nosys; /* (illegal) */
			else
				callp += code;
			i = callp->sy_argsize / sizeof(register_t);
			args.i[0] = locr0->a1;
			args.i[1] = locr0->a2;
			args.i[2] = locr0->a3;
			if (i > 3) {
				args.i[3] = locr0->a4;
				args.i[4] = locr0->a5;
				args.i[5] = locr0->a6;
				args.i[6] = locr0->a7;
				i = copyin((void *)locr0->sp,
				    &args.i[7], sizeof(register_t));
			}
			break;

		case SYS___syscall:
			/*
			 * Like syscall, but code is a quad, so as to maintain
			 * quad alignment for the rest of the arguments.
			 */
			code = locr0->a0;
			args.i[0] = locr0->a1;
			args.i[1] = locr0->a2;
			args.i[2] = locr0->a3;

			if (code >= numsys)
				callp += p->p_emul->e_nosys; /* (illegal) */
			else
				callp += code;
			i = callp->sy_argsize / sizeof(int);
			if (i > 3) {
				args.i[3] = locr0->a4;
				args.i[4] = locr0->a5;
				args.i[5] = locr0->a6;
				args.i[6] = locr0->a7;
				i = copyin((void *)locr0->sp, &args.i[7],
				    sizeof(register_t));
			}
			break;

		default:
			if (code >= numsys)
				callp += p->p_emul->e_nosys; /* (illegal) */
			else
				callp += code;

			i = callp->sy_narg;
			args.i[0] = locr0->a0;
			args.i[1] = locr0->a1;
			args.i[2] = locr0->a2;
			args.i[3] = locr0->a3;
			if (i > 4) {
				args.i[4] = locr0->a4;
				args.i[5] = locr0->a5;
				args.i[6] = locr0->a6;
				args.i[7] = locr0->a7;
			}
		}
#ifdef SYSCALL_DEBUG
		KERNEL_LOCK();
		scdebug_call(p, code, args.i);
		KERNEL_UNLOCK();
#endif
#ifdef KTRACE
		if (KTRPOINT(p, KTR_SYSCALL)) {
			KERNEL_LOCK();
			ktrsyscall(p, code, callp->sy_argsize, args.i);
			KERNEL_UNLOCK();
		}
#endif
		rval[0] = 0;
		rval[1] = locr0->v1;
#if defined(DDB) || defined(DEBUG)
		trapdebug[TRAPSIZE * ci->ci_cpuid + (trppos[ci->ci_cpuid] == 0 ?
		    TRAPSIZE : trppos[ci->ci_cpuid]) - 1].code = code;
#endif

#if NSYSTRACE > 0
		if (ISSET(p->p_flag, P_SYSTRACE)) {
			KERNEL_LOCK();
			i = systrace_redirect(code, p, args.i, rval);
			KERNEL_UNLOCK();
		} else
#endif
		{
			int nolock = (callp->sy_flags & SY_NOLOCK);
			if (!nolock)
				KERNEL_LOCK();
			i = (*callp->sy_call)(p, &args, rval);
			if (!nolock)
				KERNEL_UNLOCK();
		}
		switch (i) {
		case 0:
			locr0->v0 = rval[0];
			locr0->v1 = rval[1];
			locr0->a3 = 0;
			break;

		case ERESTART:
			locr0->pc = tpc;
			break;

		case EJUSTRETURN:
			break;	/* nothing to do */

		default:
			locr0->v0 = i;
			locr0->a3 = 1;
		}
#ifdef SYSCALL_DEBUG
		KERNEL_LOCK();
		scdebug_ret(p, code, i, rval);
		KERNEL_UNLOCK();
#endif
#ifdef KTRACE
		if (KTRPOINT(p, KTR_SYSRET)) {
			KERNEL_LOCK();
			ktrsysret(p, code, i, rval[0]);
			KERNEL_UNLOCK();
		}
#endif
		goto out;
	    }

	case T_BREAK:
#ifdef DDB
		kdb_trap(type, trapframe);
#endif
		/* Reenable interrupts if necessary */
		if (trapframe->sr & SR_INT_ENAB) {
			enableintr();
		}
		return;

	case T_BREAK+T_USER:
	    {
		caddr_t va;
		u_int32_t instr;
		struct trap_frame *locr0 = p->p_md.md_regs;

		/* compute address of break instruction */
		va = (caddr_t)trapframe->pc;
		if (trapframe->cause & CR_BR_DELAY)
			va += 4;

		/* read break instruction */
		copyin(va, &instr, sizeof(int32_t));

		switch ((instr & BREAK_VAL_MASK) >> BREAK_VAL_SHIFT) {
		case 6:	/* gcc range error */
			i = SIGFPE;
			typ = FPE_FLTSUB;
			/* skip instruction */
			if (trapframe->cause & CR_BR_DELAY)
				locr0->pc = MipsEmulateBranch(locr0,
				    trapframe->pc, 0, 0);
			else
				locr0->pc += 4;
			break;
		case 7:	/* gcc3 divide by zero */
			i = SIGFPE;
			typ = FPE_INTDIV;
			/* skip instruction */
			if (trapframe->cause & CR_BR_DELAY)
				locr0->pc = MipsEmulateBranch(locr0,
				    trapframe->pc, 0, 0);
			else
				locr0->pc += 4;
			break;
#ifdef PTRACE
		case BREAK_SSTEP_VAL:
			if (p->p_md.md_ss_addr == (long)va) {
#ifdef DEBUG
				printf("trap: %s (%d): breakpoint at %p "
				    "(insn %08x)\n",
				    p->p_comm, p->p_pid,
				    p->p_md.md_ss_addr, p->p_md.md_ss_instr);
#endif

				/* Restore original instruction and clear BP */
				process_sstep(p, 0);
				typ = TRAP_BRKPT;
			} else {
				typ = TRAP_TRACE;
			}
			i = SIGTRAP;
			break;
#endif
#ifdef FPUEMUL
		case BREAK_FPUEMUL_VAL:
			/*
			 * If this is a genuine FP emulation break,
			 * resume execution to our branch destination.
			 */
			if ((p->p_md.md_flags & MDP_FPUSED) != 0 &&
			    p->p_md.md_fppgva + 4 == (vaddr_t)va) {
				struct vm_map *map = &p->p_vmspace->vm_map;

				p->p_md.md_flags &= ~MDP_FPUSED;
				locr0->pc = p->p_md.md_fpbranchva;

				/*
				 * Prevent access to the relocation page.
				 * XXX needs to be fixed to work with rthreads
				 */
				uvm_fault_unwire(map, p->p_md.md_fppgva,
				    p->p_md.md_fppgva + PAGE_SIZE);
				(void)uvm_map_protect(map, p->p_md.md_fppgva,
				    p->p_md.md_fppgva + PAGE_SIZE,
				    UVM_PROT_NONE, FALSE);
				goto out;
			}
			/* FALLTHROUGH */
#endif
		default:
			typ = TRAP_TRACE;
			i = SIGTRAP;
			break;
		}

		break;
	    }

	case T_IWATCH+T_USER:
	case T_DWATCH+T_USER:
	    {
		caddr_t va;
		/* compute address of trapped instruction */
		va = (caddr_t)trapframe->pc;
		if (trapframe->cause & CR_BR_DELAY)
			va += 4;
		printf("watch exception @ %p\n", va);
#ifdef RM7K_PERFCNTR
		if (rm7k_watchintr(trapframe)) {
			/* Return to user, don't add any more overhead */
			goto out;
		}
#endif
		i = SIGTRAP;
		typ = TRAP_BRKPT;
		break;
	    }

	case T_TRAP+T_USER:
	    {
		caddr_t va;
		u_int32_t instr;
		struct trap_frame *locr0 = p->p_md.md_regs;

		/* compute address of trap instruction */
		va = (caddr_t)trapframe->pc;
		if (trapframe->cause & CR_BR_DELAY)
			va += 4;
		/* read break instruction */
		copyin(va, &instr, sizeof(int32_t));

		if (trapframe->cause & CR_BR_DELAY)
			locr0->pc = MipsEmulateBranch(locr0,
			    trapframe->pc, 0, 0);
		else
			locr0->pc += 4;
#ifdef RM7K_PERFCNTR
		if (instr == 0x040c0000) { /* Performance cntr trap */
			int result;

			result = rm7k_perfcntr(trapframe->a0, trapframe->a1,
						trapframe->a2, trapframe->a3);
			locr0->v0 = -result;
			/* Return to user, don't add any more overhead */
			goto out;
		} else
#endif
		/*
		 * GCC 4 uses teq with code 7 to signal divide by
	 	 * zero at runtime. This is one instruction shorter
		 * than the BEQ + BREAK combination used by gcc 3.
		 */
		if ((instr & 0xfc00003f) == 0x00000034 /* teq */ &&
		    (instr & 0x001fffc0) == ((ZERO << 16) | (7 << 6))) {
			i = SIGFPE;
			typ = FPE_INTDIV;
		} else {
			i = SIGEMT;	/* Stuff it with something for now */
			typ = 0;
		}
		break;
	    }

	case T_RES_INST+T_USER:
		i = SIGILL;
		typ = ILL_ILLOPC;
		break;

	case T_COP_UNUSABLE+T_USER:
		/*
		 * Note MIPS IV COP1X instructions issued with FPU
		 * disabled correctly report coprocessor 1 as the
		 * unusable coprocessor number.
		 */
		if ((trapframe->cause & CR_COP_ERR) != 0x10000000) {
			i = SIGILL;	/* only FPU instructions allowed */
			typ = ILL_ILLOPC;
			break;
		}
#ifdef FPUEMUL
		MipsFPTrap(trapframe);
#else
		enable_fpu(p);
#endif
		goto out;

	case T_FPE:
		printf("FPU Trap: PC %x CR %x SR %x\n",
			trapframe->pc, trapframe->cause, trapframe->sr);
		goto err;

	case T_FPE+T_USER:
		MipsFPTrap(trapframe);
		goto out;

	case T_OVFLOW+T_USER:
		i = SIGFPE;
		typ = FPE_FLTOVF;
		break;

	case T_ADDR_ERR_LD:	/* misaligned access */
	case T_ADDR_ERR_ST:	/* misaligned access */
	case T_BUS_ERR_LD_ST:	/* BERR asserted to cpu */
		if ((onfault = p->p_addr->u_pcb.pcb_onfault) != 0) {
			p->p_addr->u_pcb.pcb_onfault = 0;
			trapframe->pc = onfault_table[onfault];
			return;
		}
		goto err;

	default:
	err:
		disableintr();
#if !defined(DDB) && defined(DEBUG)
		trapDump("trap");
#endif
		printf("\nTrap cause = %d Frame %p\n", type, trapframe);
		printf("Trap PC %p RA %p fault %p\n",
		    trapframe->pc, trapframe->ra, trapframe->badvaddr);
#ifdef DDB
		stacktrace(!USERMODE(trapframe->sr) ? trapframe : p->p_md.md_regs);
		kdb_trap(type, trapframe);
#endif
		panic("trap");
	}
#ifdef FPUEMUL
	/*
	 * If a relocated delay slot causes an exception, blame the
	 * original delay slot address - userland is not supposed to
	 * know anything about emulation bowels.
	 */
	if ((p->p_md.md_flags & MDP_FPUSED) != 0 &&
	    trapframe->badvaddr == p->p_md.md_fppgva)
		trapframe->badvaddr = p->p_md.md_fpslotva;
#endif
	p->p_md.md_regs->pc = trapframe->pc;
	p->p_md.md_regs->cause = trapframe->cause;
	p->p_md.md_regs->badvaddr = trapframe->badvaddr;
	sv.sival_ptr = (void *)trapframe->badvaddr;
	KERNEL_LOCK();
	trapsignal(p, i, ucode, typ, sv);
	KERNEL_UNLOCK();
out:
	/*
	 * Note: we should only get here if returning to user mode.
	 */
	userret(p);
}
Beispiel #7
0
void
m88100_trap(u_int type, struct trapframe *frame)
{
	struct proc *p;
	struct vm_map *map;
	vaddr_t va, pcb_onfault;
	vm_prot_t ftype;
	int fault_type, pbus_type;
	u_long fault_code;
	vaddr_t fault_addr;
	struct vmspace *vm;
	union sigval sv;
	int result;
#ifdef DDB
	int s;
	u_int psr;
#endif
	int sig = 0;

	uvmexp.traps++;
	if ((p = curproc) == NULL)
		p = &proc0;

	if (USERMODE(frame->tf_epsr)) {
		type += T_USER;
		p->p_md.md_tf = frame;	/* for ptrace/signals */
	}
	fault_type = SI_NOINFO;
	fault_code = 0;
	fault_addr = frame->tf_sxip & XIP_ADDR;

	switch (type) {
	default:
	case T_ILLFLT:
lose:
		panictrap(frame->tf_vector, frame);
		break;
		/*NOTREACHED*/

#if defined(DDB)
	case T_KDB_BREAK:
		s = splhigh();
		set_psr((psr = get_psr()) & ~PSR_IND);
		ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame);
		set_psr(psr);
		splx(s);
		return;
	case T_KDB_ENTRY:
		s = splhigh();
		set_psr((psr = get_psr()) & ~PSR_IND);
		ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame);
		set_psr(psr);
		splx(s);
		return;
#endif /* DDB */
	case T_MISALGNFLT:
		printf("kernel misaligned access exception @%p\n",
		    frame->tf_sxip);
		goto lose;
	case T_INSTFLT:
		/* kernel mode instruction access fault.
		 * Should never, never happen for a non-paged kernel.
		 */
#ifdef TRAPDEBUG
		pbus_type = CMMU_PFSR_FAULT(frame->tf_ipfsr);
		printf("Kernel Instruction fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
		    pbus_type, pbus_exception_type[pbus_type],
		    fault_addr, frame, frame->tf_cpu);
#endif
		goto lose;
	case T_DATAFLT:
		/* kernel mode data fault */

		/* data fault on the user address? */
		if ((frame->tf_dmt0 & DMT_DAS) == 0) {
			KERNEL_LOCK();
			goto user_fault;
		}

		fault_addr = frame->tf_dma0;
		if (frame->tf_dmt0 & (DMT_WRITE|DMT_LOCKBAR)) {
			ftype = VM_PROT_READ|VM_PROT_WRITE;
			fault_code = VM_PROT_WRITE;
		} else {
			ftype = VM_PROT_READ;
			fault_code = VM_PROT_READ;
		}

		va = trunc_page((vaddr_t)fault_addr);

		KERNEL_LOCK();
		vm = p->p_vmspace;
		map = kernel_map;

		pbus_type = CMMU_PFSR_FAULT(frame->tf_dpfsr);
#ifdef TRAPDEBUG
		printf("Kernel Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
		    pbus_type, pbus_exception_type[pbus_type],
		    fault_addr, frame, frame->tf_cpu);
#endif

		switch (pbus_type) {
		case CMMU_PFSR_SUCCESS:
			/*
			 * The fault was resolved. Call data_access_emulation
			 * to drain the data unit pipe line and reset dmt0
			 * so that trap won't get called again.
			 */
			data_access_emulation((u_int *)frame);
			frame->tf_dpfsr = 0;
			frame->tf_dmt0 = 0;
			KERNEL_UNLOCK();
			return;
		case CMMU_PFSR_SFAULT:
		case CMMU_PFSR_PFAULT:
			if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
				p->p_addr->u_pcb.pcb_onfault = 0;
			result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
			p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
			/*
			 * This could be a fault caused in copyout*()
			 * while accessing kernel space.
			 */
			if (result != 0 && pcb_onfault != 0) {
				frame->tf_snip = pcb_onfault | NIP_V;
				frame->tf_sfip = (pcb_onfault + 4) | FIP_V;
				frame->tf_sxip = 0;
				/*
				 * Continue as if the fault had been resolved,
				 * but do not try to complete the faulting
				 * access.
				 */
				frame->tf_dmt0 |= DMT_SKIP;
				result = 0;
			}
			if (result == 0) {
				/*
				 * We could resolve the fault. Call
				 * data_access_emulation to drain the data
				 * unit pipe line and reset dmt0 so that trap
				 * won't get called again.
				 */
				data_access_emulation((u_int *)frame);
				frame->tf_dpfsr = 0;
				frame->tf_dmt0 = 0;
				KERNEL_UNLOCK();
				return;
			}
			break;
		}
#ifdef TRAPDEBUG
		printf("PBUS Fault %d (%s) va = 0x%x\n", pbus_type,
		    pbus_exception_type[pbus_type], va);
#endif
		KERNEL_UNLOCK();
		goto lose;
		/* NOTREACHED */
	case T_INSTFLT+T_USER:
		/* User mode instruction access fault */
		/* FALLTHROUGH */
	case T_DATAFLT+T_USER:
		KERNEL_LOCK();
user_fault:
		if (type == T_INSTFLT + T_USER) {
			pbus_type = CMMU_PFSR_FAULT(frame->tf_ipfsr);
#ifdef TRAPDEBUG
			printf("User Instruction fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
			    pbus_type, pbus_exception_type[pbus_type],
			    fault_addr, frame, frame->tf_cpu);
#endif
		} else {
			fault_addr = frame->tf_dma0;
			pbus_type = CMMU_PFSR_FAULT(frame->tf_dpfsr);
#ifdef TRAPDEBUG
			printf("User Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
			    pbus_type, pbus_exception_type[pbus_type],
			    fault_addr, frame, frame->tf_cpu);
#endif
		}

		if (frame->tf_dmt0 & (DMT_WRITE | DMT_LOCKBAR)) {
			ftype = VM_PROT_READ | VM_PROT_WRITE;
			fault_code = VM_PROT_WRITE;
		} else {
			ftype = VM_PROT_READ;
			fault_code = VM_PROT_READ;
		}

		va = trunc_page((vaddr_t)fault_addr);

		vm = p->p_vmspace;
		map = &vm->vm_map;
		if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
			p->p_addr->u_pcb.pcb_onfault = 0;

		/* Call uvm_fault() to resolve non-bus error faults */
		switch (pbus_type) {
		case CMMU_PFSR_SUCCESS:
			result = 0;
			break;
		case CMMU_PFSR_BERROR:
			result = EACCES;
			break;
		default:
			result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
			break;
		}

		p->p_addr->u_pcb.pcb_onfault = pcb_onfault;

		if ((caddr_t)va >= vm->vm_maxsaddr) {
			if (result == 0)
				uvm_grow(p, va);
			else if (result == EACCES)
				result = EFAULT;
		}

		/*
		 * This could be a fault caused in copyin*()
		 * while accessing user space.
		 */
		if (result != 0 && pcb_onfault != 0) {
			frame->tf_snip = pcb_onfault | NIP_V;
			frame->tf_sfip = (pcb_onfault + 4) | FIP_V;
			frame->tf_sxip = 0;
			/*
			 * Continue as if the fault had been resolved, but
			 * do not try to complete the faulting access.
			 */
			frame->tf_dmt0 |= DMT_SKIP;
			result = 0;
		}

		if (result == 0) {
			if (type == T_INSTFLT + T_USER) {
				/*
				 * back up SXIP, SNIP,
				 * clearing the Error bit
				 */
				frame->tf_sfip = frame->tf_snip & ~FIP_E;
				frame->tf_snip = frame->tf_sxip & ~NIP_E;
				frame->tf_ipfsr = 0;
			} else {
				/*
			 	 * We could resolve the fault. Call
			 	 * data_access_emulation to drain the data unit
			 	 * pipe line and reset dmt0 so that trap won't
			 	 * get called again.
			 	 */
				data_access_emulation((u_int *)frame);
				frame->tf_dpfsr = 0;
				frame->tf_dmt0 = 0;
			}
		} else {
			sig = result == EACCES ? SIGBUS : SIGSEGV;
			fault_type = result == EACCES ?
			    BUS_ADRERR : SEGV_MAPERR;
		}
		KERNEL_UNLOCK();
		break;
	case T_MISALGNFLT+T_USER:
		/* Fix any misaligned ld.d or st.d instructions */
		sig = double_reg_fixup(frame);
		fault_type = BUS_ADRALN;
		break;
	case T_PRIVINFLT+T_USER:
	case T_ILLFLT+T_USER:
#ifndef DDB
	case T_KDB_BREAK:
	case T_KDB_ENTRY:
#endif
	case T_KDB_BREAK+T_USER:
	case T_KDB_ENTRY+T_USER:
	case T_KDB_TRACE:
	case T_KDB_TRACE+T_USER:
		sig = SIGILL;
		break;
	case T_BNDFLT+T_USER:
		sig = SIGFPE;
		break;
	case T_ZERODIV+T_USER:
		sig = SIGFPE;
		fault_type = FPE_INTDIV;
		break;
	case T_OVFFLT+T_USER:
		sig = SIGFPE;
		fault_type = FPE_INTOVF;
		break;
	case T_FPEPFLT+T_USER:
		sig = SIGFPE;
		break;
	case T_SIGSYS+T_USER:
		sig = SIGSYS;
		break;
	case T_STEPBPT+T_USER:
#ifdef PTRACE
		/*
		 * This trap is used by the kernel to support single-step
		 * debugging (although any user could generate this trap
		 * which should probably be handled differently). When a
		 * process is continued by a debugger with the PT_STEP
		 * function of ptrace (single step), the kernel inserts
		 * one or two breakpoints in the user process so that only
		 * one instruction (or two in the case of a delayed branch)
		 * is executed.  When this breakpoint is hit, we get the
		 * T_STEPBPT trap.
		 */
		{
			u_int instr;
			vaddr_t pc = PC_REGS(&frame->tf_regs);

			/* read break instruction */
			copyin((caddr_t)pc, &instr, sizeof(u_int));

			/* check and see if we got here by accident */
			if ((p->p_md.md_bp0va != pc &&
			     p->p_md.md_bp1va != pc) ||
			    instr != SSBREAKPOINT) {
				sig = SIGTRAP;
				fault_type = TRAP_TRACE;
				break;
			}

			/* restore original instruction and clear breakpoint */
			if (p->p_md.md_bp0va == pc) {
				ss_put_value(p, pc, p->p_md.md_bp0save);
				p->p_md.md_bp0va = 0;
			}
			if (p->p_md.md_bp1va == pc) {
				ss_put_value(p, pc, p->p_md.md_bp1save);
				p->p_md.md_bp1va = 0;
			}

#if 1
			frame->tf_sfip = frame->tf_snip;
			frame->tf_snip = pc | NIP_V;
#endif
			sig = SIGTRAP;
			fault_type = TRAP_BRKPT;
		}
#else
		sig = SIGTRAP;
		fault_type = TRAP_TRACE;
#endif
		break;

	case T_USERBPT+T_USER:
		/*
		 * This trap is meant to be used by debuggers to implement
		 * breakpoint debugging.  When we get this trap, we just
		 * return a signal which gets caught by the debugger.
		 */
		frame->tf_sfip = frame->tf_snip;
		frame->tf_snip = frame->tf_sxip;
		sig = SIGTRAP;
		fault_type = TRAP_BRKPT;
		break;

	}

	/*
	 * If trap from supervisor mode, just return
	 */
	if (type < T_USER)
		return;

	if (sig) {
		sv.sival_ptr = (void *)fault_addr;
		KERNEL_LOCK();
		trapsignal(p, sig, fault_code, fault_type, sv);
		KERNEL_UNLOCK();
		/*
		 * don't want multiple faults - we are going to
		 * deliver signal.
		 */
		frame->tf_dmt0 = 0;
		frame->tf_ipfsr = frame->tf_dpfsr = 0;
	}

	userret(p);
}
/*
 * Post-syscall processing.  Perform abnormal system call completion
 * actions such as /proc tracing, profiling, signals, preemption, etc.
 *
 * This routine is called only if t_post_sys, t_sig_check, or t_astflag is set.
 * Any condition requiring pre-syscall handling must set one of these.
 * If the condition is persistent, this routine will repost t_post_sys.
 */
void
post_syscall(long rval1, long rval2)
{
	kthread_t *t = curthread;
	klwp_t *lwp = ttolwp(t);
	proc_t *p = ttoproc(t);
	struct regs *rp = lwptoregs(lwp);
	uint_t	error;
	uint_t	code = t->t_sysnum;
	int	repost = 0;
	int	proc_stop = 0;		/* non-zero if stopping */
	int	sigprof = 0;		/* non-zero if sending SIGPROF */

	t->t_post_sys = 0;

	error = lwp->lwp_errno;

	/*
	 * Code can be zero if this is a new LWP returning after a forkall(),
	 * other than the one which matches the one in the parent which called
	 * forkall().  In these LWPs, skip most of post-syscall activity.
	 */
	if (code == 0)
		goto sig_check;
	/*
	 * If the trace flag is set, mark the lwp to take a single-step trap
	 * on return to user level (below). The x86 lcall interface and
	 * sysenter has already done this, and turned off the flag, but
	 * amd64 syscall interface has not.
	 */
	if (rp->r_ps & PS_T) {
		lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
		rp->r_ps &= ~PS_T;
		aston(curthread);
	}
	if (audit_active) {	/* put out audit record for this syscall */
		rval_t	rval;

		/* XX64 -- truncation of 64-bit return values? */
		rval.r_val1 = (int)rval1;
		rval.r_val2 = (int)rval2;
		audit_finish(T_SYSCALL, code, error, &rval);
		repost = 1;
	}

	if (curthread->t_pdmsg != NULL) {
		char *m = curthread->t_pdmsg;

		uprintf("%s", m);
		kmem_free(m, strlen(m) + 1);
		curthread->t_pdmsg = NULL;
	}

	/*
	 * If we're going to stop for /proc tracing, set the flag and
	 * save the arguments so that the return values don't smash them.
	 */
	if (PTOU(p)->u_systrap) {
		if (prismember(&PTOU(p)->u_exitmask, code)) {
			if (lwp_getdatamodel(lwp) == DATAMODEL_LP64)
				(void) save_syscall_args();
			proc_stop = 1;
		}
		repost = 1;
	}

	/*
	 * Similarly check to see if SIGPROF might be sent.
	 */
	if (curthread->t_rprof != NULL &&
	    curthread->t_rprof->rp_anystate != 0) {
		if (lwp_getdatamodel(lwp) == DATAMODEL_LP64)
			(void) save_syscall_args();
		sigprof = 1;
	}

	if (lwp->lwp_eosys == NORMALRETURN) {
		if (error == 0) {
#ifdef SYSCALLTRACE
			if (syscalltrace) {
				mutex_enter(&systrace_lock);
				printf(
				    "%d: r_val1=0x%lx, r_val2=0x%lx, id 0x%p\n",
				    p->p_pid, rval1, rval2, curthread);
				mutex_exit(&systrace_lock);
			}
#endif /* SYSCALLTRACE */
			rp->r_ps &= ~PS_C;
			rp->r_r0 = rval1;
			rp->r_r1 = rval2;
		} else {
			int sig;
#ifdef SYSCALLTRACE
			if (syscalltrace) {
				mutex_enter(&systrace_lock);
				printf("%d: error=%d, id 0x%p\n",
				    p->p_pid, error, curthread);
				mutex_exit(&systrace_lock);
			}
#endif /* SYSCALLTRACE */
			if (error == EINTR && t->t_activefd.a_stale)
				error = EBADF;
			if (error == EINTR &&
			    (sig = lwp->lwp_cursig) != 0 &&
			    sigismember(&PTOU(p)->u_sigrestart, sig) &&
			    PTOU(p)->u_signal[sig - 1] != SIG_DFL &&
			    PTOU(p)->u_signal[sig - 1] != SIG_IGN)
				error = ERESTART;
			rp->r_r0 = error;
			rp->r_ps |= PS_C;
		}
	}

	/*
	 * From the proc(4) manual page:
	 * When exit from a system call is being traced, the traced process
	 * stops on completion of the system call just prior to checking for
	 * signals and returning to user level.  At this point all return
	 * values have been stored into the traced process's saved registers.
	 */
	if (proc_stop) {
		mutex_enter(&p->p_lock);
		if (PTOU(p)->u_systrap &&
		    prismember(&PTOU(p)->u_exitmask, code))
			stop(PR_SYSEXIT, code);
		mutex_exit(&p->p_lock);
	}

	/*
	 * If we are the parent returning from a successful
	 * vfork, wait for the child to exec or exit.
	 * This code must be here and not in the bowels of the system
	 * so that /proc can intercept exit from vfork in a timely way.
	 */
	if (t->t_flag & T_VFPARENT) {
		ASSERT(code == SYS_vfork || code == SYS_forksys);
		ASSERT(rp->r_r1 == 0 && error == 0);
		vfwait((pid_t)rval1);
		t->t_flag &= ~T_VFPARENT;
	}

	/*
	 * If profiling is active, bill the current PC in user-land
	 * and keep reposting until profiling is disabled.
	 */
	if (p->p_prof.pr_scale) {
		if (lwp->lwp_oweupc)
			profil_tick(rp->r_pc);
		repost = 1;
	}

sig_check:
	/*
	 * Reset flag for next time.
	 * We must do this after stopping on PR_SYSEXIT
	 * because /proc uses the information in lwp_eosys.
	 */
	lwp->lwp_eosys = NORMALRETURN;
	clear_stale_fd();
	t->t_flag &= ~T_FORKALL;

	if (t->t_astflag | t->t_sig_check) {
		/*
		 * Turn off the AST flag before checking all the conditions that
		 * may have caused an AST.  This flag is on whenever a signal or
		 * unusual condition should be handled after the next trap or
		 * syscall.
		 */
		astoff(t);
		/*
		 * If a single-step trap occurred on a syscall (see trap())
		 * recognize it now.  Do this before checking for signals
		 * because deferred_singlestep_trap() may generate a SIGTRAP to
		 * the LWP or may otherwise mark the LWP to call issig(FORREAL).
		 */
		if (lwp->lwp_pcb.pcb_flags & DEBUG_PENDING)
			deferred_singlestep_trap((caddr_t)rp->r_pc);

		t->t_sig_check = 0;

		/*
		 * The following check is legal for the following reasons:
		 *	1) The thread we are checking, is ourselves, so there is
		 *	   no way the proc can go away.
		 *	2) The only time we need to be protected by the
		 *	   lock is if the binding is changed.
		 *
		 *	Note we will still take the lock and check the binding
		 *	if the condition was true without the lock held.  This
		 *	prevents lock contention among threads owned by the
		 * 	same proc.
		 */

		if (curthread->t_proc_flag & TP_CHANGEBIND) {
			mutex_enter(&p->p_lock);
			if (curthread->t_proc_flag & TP_CHANGEBIND) {
				timer_lwpbind();
				curthread->t_proc_flag &= ~TP_CHANGEBIND;
			}
			mutex_exit(&p->p_lock);
		}

		/*
		 * for kaio requests on the special kaio poll queue,
		 * copyout their results to user memory.
		 */
		if (p->p_aio)
			aio_cleanup(0);
		/*
		 * If this LWP was asked to hold, call holdlwp(), which will
		 * stop.  holdlwps() sets this up and calls pokelwps() which
		 * sets the AST flag.
		 *
		 * Also check TP_EXITLWP, since this is used by fresh new LWPs
		 * through lwp_rtt().  That flag is set if the lwp_create(2)
		 * syscall failed after creating the LWP.
		 */
		if (ISHOLD(p) || (t->t_proc_flag & TP_EXITLWP))
			holdlwp();

		/*
		 * All code that sets signals and makes ISSIG_PENDING
		 * evaluate true must set t_sig_check afterwards.
		 */
		if (ISSIG_PENDING(t, lwp, p)) {
			if (issig(FORREAL))
				psig();
			t->t_sig_check = 1;	/* recheck next time */
		}

		if (sigprof) {
			int nargs = (code > 0 && code < NSYSCALL)?
			    LWP_GETSYSENT(lwp)[code].sy_narg : 0;
			realsigprof(code, nargs, error);
			t->t_sig_check = 1;	/* recheck next time */
		}

		/*
		 * If a performance counter overflow interrupt was
		 * delivered *during* the syscall, then re-enable the
		 * AST so that we take a trip through trap() to cause
		 * the SIGEMT to be delivered.
		 */
		if (lwp->lwp_pcb.pcb_flags & CPC_OVERFLOW)
			aston(t);

		/*
		 * /proc can't enable/disable the trace bit itself
		 * because that could race with the call gate used by
		 * system calls via "lcall". If that happened, an
		 * invalid EFLAGS would result. prstep()/prnostep()
		 * therefore schedule an AST for the purpose.
		 */
		if (lwp->lwp_pcb.pcb_flags & REQUEST_STEP) {
			lwp->lwp_pcb.pcb_flags &= ~REQUEST_STEP;
			rp->r_ps |= PS_T;
		}
		if (lwp->lwp_pcb.pcb_flags & REQUEST_NOSTEP) {
			lwp->lwp_pcb.pcb_flags &= ~REQUEST_NOSTEP;
			rp->r_ps &= ~PS_T;
		}
	}

	lwp->lwp_errno = 0;		/* clear error for next time */

#ifndef NPROBE
	/* Kernel probe */
	if (tnf_tracing_active) {
		TNF_PROBE_3(syscall_end, "syscall thread", /* CSTYLED */,
		    tnf_long,	rval1,		rval1,
		    tnf_long,	rval2,		rval2,
		    tnf_long,	errno,		(long)error);
		repost = 1;
	}
#endif /* NPROBE */

	/*
	 * Set state to LWP_USER here so preempt won't give us a kernel
	 * priority if it occurs after this point.  Call CL_TRAPRET() to
	 * restore the user-level priority.
	 *
	 * It is important that no locks (other than spinlocks) be entered
	 * after this point before returning to user mode (unless lwp_state
	 * is set back to LWP_SYS).
	 *
	 * XXX Sampled times past this point are charged to the user.
	 */
	lwp->lwp_state = LWP_USER;

	if (t->t_trapret) {
		t->t_trapret = 0;
		thread_lock(t);
		CL_TRAPRET(t);
		thread_unlock(t);
	}
	if (CPU->cpu_runrun || t->t_schedflag & TS_ANYWAITQ)
		preempt();
	prunstop();

	lwp->lwp_errno = 0;		/* clear error for next time */

	/*
	 * The thread lock must be held in order to clear sysnum and reset
	 * lwp_ap atomically with respect to other threads in the system that
	 * may be looking at the args via lwp_ap from get_syscall_args().
	 */

	thread_lock(t);
	t->t_sysnum = 0;		/* no longer in a system call */

	if (lwp_getdatamodel(lwp) == DATAMODEL_NATIVE) {
#if defined(_LP64)
		/*
		 * In case the args were copied to the lwp, reset the
		 * pointer so the next syscall will have the right
		 * lwp_ap pointer.
		 */
		lwp->lwp_ap = (long *)&rp->r_rdi;
	} else {
#endif
		lwp->lwp_ap = NULL;	/* reset on every syscall entry */
	}
	thread_unlock(t);

	lwp->lwp_argsaved = 0;

	/*
	 * If there was a continuing reason for post-syscall processing,
	 * set the t_post_sys flag for the next system call.
	 */
	if (repost)
		t->t_post_sys = 1;

	/*
	 * If there is a ustack registered for this lwp, and the stack rlimit
	 * has been altered, read in the ustack. If the saved stack rlimit
	 * matches the bounds of the ustack, update the ustack to reflect
	 * the new rlimit. If the new stack rlimit is RLIM_INFINITY, disable
	 * stack checking by setting the size to 0.
	 */
	if (lwp->lwp_ustack != 0 && lwp->lwp_old_stk_ctl != 0) {
		rlim64_t new_size;
		caddr_t top;
		stack_t stk;
		struct rlimit64 rl;

		mutex_enter(&p->p_lock);
		new_size = p->p_stk_ctl;
		top = p->p_usrstack;
		(void) rctl_rlimit_get(rctlproc_legacy[RLIMIT_STACK], p, &rl);
		mutex_exit(&p->p_lock);

		if (rl.rlim_cur == RLIM64_INFINITY)
			new_size = 0;

		if (copyin((stack_t *)lwp->lwp_ustack, &stk,
		    sizeof (stack_t)) == 0 &&
		    (stk.ss_size == lwp->lwp_old_stk_ctl ||
		    stk.ss_size == 0) &&
		    stk.ss_sp == top - stk.ss_size) {
			stk.ss_sp = (void *)((uintptr_t)stk.ss_sp +
			    stk.ss_size - (uintptr_t)new_size);
			stk.ss_size = new_size;

			(void) copyout(&stk, (stack_t *)lwp->lwp_ustack,
			    sizeof (stack_t));
		}

		lwp->lwp_old_stk_ctl = 0;
	}
}
/*
 * Set various fields of the dqblk according to the command.
 * Q_SETQUOTA - assign an entire dqblk structure.
 * Q_SETQLIM - assign a dqblk structure except for the usage.
 */
static int
setquota(int cmd, uid_t uid, struct ufsvfs *ufsvfsp,
    caddr_t addr, struct cred *cr)
{
	struct dquot *dqp;
	struct inode	*qip;
	struct dquot *xdqp;
	struct dqblk newlim;
	int error;
	int scan_type = SQD_TYPE_NONE;
	daddr_t bn;
	int contig;

	if (secpolicy_fs_quota(cr, ufsvfsp->vfs_vfs) != 0)
		return (EPERM);

	rw_enter(&ufsvfsp->vfs_dqrwlock, RW_WRITER);

	/*
	 * Quotas are not enabled on this file system so there is
	 * nothing more to do.
	 */
	if ((ufsvfsp->vfs_qflags & MQ_ENABLED) == 0) {
		rw_exit(&ufsvfsp->vfs_dqrwlock);
		return (ESRCH);
	}

	/*
	 * At this point, the quota subsystem is quiescent on this file
	 * system so we can do all the work necessary to modify the quota
	 * information for this user.
	 */

	if (copyin(addr, (caddr_t)&newlim, sizeof (struct dqblk)) != 0) {
		rw_exit(&ufsvfsp->vfs_dqrwlock);
		return (EFAULT);
	}
	error = getdiskquota(uid, ufsvfsp, 0, &xdqp);
	if (error) {
		rw_exit(&ufsvfsp->vfs_dqrwlock);
		return (error);
	}
	dqp = xdqp;
	/*
	 * Don't change disk usage on Q_SETQLIM
	 */
	mutex_enter(&dqp->dq_lock);
	if (cmd == Q_SETQLIM) {
		newlim.dqb_curblocks = dqp->dq_curblocks;
		newlim.dqb_curfiles = dqp->dq_curfiles;
	}
	if (uid == 0) {
		/*
		 * Timelimits for uid 0 set the relative time
		 * the other users can be over quota for this file system.
		 * If it is zero a default is used (see quota.h).
		 */
		ufsvfsp->vfs_btimelimit =
		    newlim.dqb_btimelimit? newlim.dqb_btimelimit: DQ_BTIMELIMIT;
		ufsvfsp->vfs_ftimelimit =
		    newlim.dqb_ftimelimit? newlim.dqb_ftimelimit: DQ_FTIMELIMIT;
	} else {
		if (newlim.dqb_bsoftlimit &&
		    newlim.dqb_curblocks >= newlim.dqb_bsoftlimit) {
			if (dqp->dq_bsoftlimit == 0 ||
			    dqp->dq_curblocks < dqp->dq_bsoftlimit) {
				/* If we're suddenly over the limit(s),	*/
				/* start the timer(s)			*/
				newlim.dqb_btimelimit =
				    (uint32_t)gethrestime_sec() +
				    ufsvfsp->vfs_btimelimit;
				dqp->dq_flags &= ~DQ_BLKS;
			} else {
				/* If we're currently over the soft	*/
				/* limit and were previously over the	*/
				/* soft limit then preserve the old	*/
				/* time limit but make sure the DQ_BLKS	*/
				/* flag is set since we must have been	*/
				/* previously warned.			*/
				newlim.dqb_btimelimit = dqp->dq_btimelimit;
				dqp->dq_flags |= DQ_BLKS;
			}
		} else {
			/* Either no quota or under quota, clear time limit */
			newlim.dqb_btimelimit = 0;
			dqp->dq_flags &= ~DQ_BLKS;
		}

		if (newlim.dqb_fsoftlimit &&
		    newlim.dqb_curfiles >= newlim.dqb_fsoftlimit) {
			if (dqp->dq_fsoftlimit == 0 ||
			    dqp->dq_curfiles < dqp->dq_fsoftlimit) {
				/* If we're suddenly over the limit(s),	*/
				/* start the timer(s)			*/
				newlim.dqb_ftimelimit =
				    (uint32_t)gethrestime_sec() +
				    ufsvfsp->vfs_ftimelimit;
				dqp->dq_flags &= ~DQ_FILES;
			} else {
				/* If we're currently over the soft	*/
				/* limit and were previously over the	*/
				/* soft limit then preserve the old	*/
				/* time limit but make sure the		*/
				/* DQ_FILES flag is set since we must	*/
				/* have been previously warned.		*/
				newlim.dqb_ftimelimit = dqp->dq_ftimelimit;
				dqp->dq_flags |= DQ_FILES;
			}
		} else {
			/* Either no quota or under quota, clear time limit */
			newlim.dqb_ftimelimit = 0;
			dqp->dq_flags &= ~DQ_FILES;
		}
	}

	/*
	 * If there was previously no limit and there is now at least
	 * one limit, then any inodes in the cache have NULL d_iquot
	 * fields (getinoquota() returns NULL when there are no limits).
	 */
	if ((dqp->dq_fhardlimit == 0 && dqp->dq_fsoftlimit == 0 &&
	    dqp->dq_bhardlimit == 0 && dqp->dq_bsoftlimit == 0) &&
	    (newlim.dqb_fhardlimit || newlim.dqb_fsoftlimit ||
	    newlim.dqb_bhardlimit || newlim.dqb_bsoftlimit)) {
		scan_type = SQD_TYPE_LIMIT;
	}

	/*
	 * If there was previously at least one limit and there is now
	 * no limit, then any inodes in the cache have non-NULL d_iquot
	 * fields need to be reset to NULL.
	 */
	else if ((dqp->dq_fhardlimit || dqp->dq_fsoftlimit ||
	    dqp->dq_bhardlimit || dqp->dq_bsoftlimit) &&
	    (newlim.dqb_fhardlimit == 0 && newlim.dqb_fsoftlimit == 0 &&
	    newlim.dqb_bhardlimit == 0 && newlim.dqb_bsoftlimit == 0)) {
		scan_type = SQD_TYPE_NO_LIMIT;
	}

	dqp->dq_dqb = newlim;
	dqp->dq_flags |= DQ_MOD;

	/*
	 *  push the new quota to disk now.  If this is a trans device
	 *  then force the page out with ufs_putpage so it will be deltaed
	 *  by ufs_startio.
	 */
	qip = ufsvfsp->vfs_qinod;
	rw_enter(&qip->i_contents, RW_WRITER);
	(void) ufs_rdwri(UIO_WRITE, FWRITE | FSYNC, qip, (caddr_t)&dqp->dq_dqb,
	    sizeof (struct dqblk), dqoff(uid), UIO_SYSSPACE,
	    (int *)NULL, kcred);
	rw_exit(&qip->i_contents);

	(void) VOP_PUTPAGE(ITOV(qip), dqoff(dqp->dq_uid) & ~qip->i_fs->fs_bmask,
	    qip->i_fs->fs_bsize, B_INVAL, kcred, NULL);

	/*
	 * We must set the dq_mof even if not we are not logging in case
	 * we are later remount to logging.
	 */
	contig = 0;
	rw_enter(&qip->i_contents, RW_WRITER);
	error = bmap_read(qip, dqoff(dqp->dq_uid), &bn, &contig);
	rw_exit(&qip->i_contents);
	if (error || (bn == UFS_HOLE)) {
		dqp->dq_mof = UFS_HOLE;
	} else {
		dqp->dq_mof = ldbtob(bn) +
		    (offset_t)((dqoff(dqp->dq_uid)) & (DEV_BSIZE - 1));
	}

	dqp->dq_flags &= ~DQ_MOD;
	dqput(dqp);
	mutex_exit(&dqp->dq_lock);
	if (scan_type) {
		struct setquota_data sqd;

		sqd.sqd_type = scan_type;
		sqd.sqd_ufsvfsp = ufsvfsp;
		sqd.sqd_uid = uid;
		(void) ufs_scan_inodes(0, setquota_scan_inode, &sqd, ufsvfsp);
	}
	rw_exit(&ufsvfsp->vfs_dqrwlock);
	return (0);
}
int
signotify(int cmd, siginfo_t *siginfo, signotify_id_t *sn_id)
{
	k_siginfo_t	info;
	signotify_id_t	id;
	proc_t		*p;
	proc_t		*cp = curproc;
	signotifyq_t	*snqp;
	struct cred	*cr;
	sigqueue_t	*sqp;
	sigqhdr_t	*sqh;
	u_longlong_t	sid;
	model_t 	datamodel = get_udatamodel();

	if (copyin(sn_id, &id, sizeof (signotify_id_t)))
		return (set_errno(EFAULT));

	if (id.sn_index >= _SIGNOTIFY_MAX || id.sn_index < 0)
		return (set_errno(EINVAL));

	switch (cmd) {
	case SN_PROC:
		/* get snid for the given user address of signotifyid_t */
		sid = get_sigid(cp, (caddr_t)sn_id);

		if (id.sn_pid > 0) {
			mutex_enter(&pidlock);
			if ((p = prfind(id.sn_pid)) != NULL) {
				mutex_enter(&p->p_lock);
				if (p->p_signhdr != NULL) {
					snqp = SIGN_PTR(p, id.sn_index);
					if (snqp->sn_snid == sid) {
						mutex_exit(&p->p_lock);
						mutex_exit(&pidlock);
						return (set_errno(EBUSY));
					}
				}
				mutex_exit(&p->p_lock);
			}
			mutex_exit(&pidlock);
		}

		if (copyin_siginfo(datamodel, siginfo, &info))
			return (set_errno(EFAULT));

		/* The si_code value must indicate the signal will be queued */
		if (!sigwillqueue(info.si_signo, info.si_code))
			return (set_errno(EINVAL));

		if (cp->p_signhdr == NULL) {
			/* Allocate signotify pool first time */
			sqh = sigqhdralloc(sizeof (signotifyq_t),
			    _SIGNOTIFY_MAX);
			mutex_enter(&cp->p_lock);
			if (cp->p_signhdr == NULL) {
				/* hang the pool head on proc */
				cp->p_signhdr = sqh;
			} else {
				/* another lwp allocated the pool, free ours */
				sigqhdrfree(sqh);
			}
		} else {
			mutex_enter(&cp->p_lock);
		}

		sqp = sigqalloc(cp->p_signhdr);
		if (sqp == NULL) {
			mutex_exit(&cp->p_lock);
			return (set_errno(EAGAIN));
		}
		cr = CRED();
		sqp->sq_info = info;
		sqp->sq_info.si_pid = cp->p_pid;
		sqp->sq_info.si_ctid = PRCTID(cp);
		sqp->sq_info.si_zoneid = getzoneid();
		sqp->sq_info.si_uid = crgetruid(cr);

		/* fill the signotifyq_t fields */
		((signotifyq_t *)sqp)->sn_snid = sid;

		mutex_exit(&cp->p_lock);

		/* complete the signotify_id_t fields */
		id.sn_index = (signotifyq_t *)sqp - SIGN_PTR(cp, 0);
		id.sn_pid = cp->p_pid;

		break;

	case SN_CANCEL:
	case SN_SEND:

		sid =  get_sigid(cp, (caddr_t)sn_id);
		mutex_enter(&pidlock);
		if ((id.sn_pid <= 0) || ((p = prfind(id.sn_pid)) == NULL)) {
			mutex_exit(&pidlock);
			return (set_errno(EINVAL));
		}
		mutex_enter(&p->p_lock);
		mutex_exit(&pidlock);

		if (p->p_signhdr == NULL) {
			mutex_exit(&p->p_lock);
			return (set_errno(EINVAL));
		}

		snqp = SIGN_PTR(p, id.sn_index);

		if (snqp->sn_snid == 0) {
			mutex_exit(&p->p_lock);
			return (set_errno(EINVAL));
		}

		if (snqp->sn_snid != sid) {
			mutex_exit(&p->p_lock);
			return (set_errno(EINVAL));
		}

		snqp->sn_snid = 0;

		/* cmd == SN_CANCEL or signo == 0 (SIGEV_NONE) */
		if (((sigqueue_t *)snqp)->sq_info.si_signo <= 0)
			cmd = SN_CANCEL;

		sigqsend(cmd, p, 0, (sigqueue_t *)snqp);
		mutex_exit(&p->p_lock);

		id.sn_pid = 0;
		id.sn_index = 0;

		break;

	default :
		return (set_errno(EINVAL));
	}

	if (copyout(&id, sn_id, sizeof (signotify_id_t)))
		return (set_errno(EFAULT));

	return (0);
}
Beispiel #11
0
/*
 * Ioctl routine for generic ppp devices.
 */
int
pppioctl(struct ppp_softc *sc, u_long cmd, caddr_t data,
         int flag, struct ucred *cred)
{
    int error, flags, mru, npx;
    u_int nb;
    struct ppp_option_data *odp;
    struct compressor **cp;
    struct npioctl *npi;
    time_t t;
#ifdef PPP_FILTER
    struct bpf_program *bp, *nbp;
    struct bpf_insn *newcode, *oldcode;
    int newcodelen;
#endif /* PPP_FILTER */
#ifdef	PPP_COMPRESS
    u_char ccp_option[CCP_MAX_OPTION_LENGTH];
#endif

    switch (cmd) {
    case FIONREAD:
        *(int *)data = sc->sc_inq.ifq_len;
        break;

    case PPPIOCGUNIT:
        *(int *)data = sc->sc_if.if_dunit;
        break;

    case PPPIOCGFLAGS:
        *(u_int *)data = sc->sc_flags;
        break;

    case PPPIOCSFLAGS:
        if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0)
            return (error);
        flags = *(int *)data & SC_MASK;
        crit_enter();
#ifdef PPP_COMPRESS
        if (sc->sc_flags & SC_CCP_OPEN && !(flags & SC_CCP_OPEN))
            ppp_ccp_closed(sc);
#endif
        sc->sc_flags = (sc->sc_flags & ~SC_MASK) | flags;
        crit_exit();
        break;

    case PPPIOCSMRU:
        if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0)
            return (error);
        mru = *(int *)data;
        if (mru >= PPP_MRU && mru <= PPP_MAXMRU)
            sc->sc_mru = mru;
        break;

    case PPPIOCGMRU:
        *(int *)data = sc->sc_mru;
        break;

#ifdef VJC
    case PPPIOCSMAXCID:
        if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0)
            return (error);
        if (sc->sc_comp) {
            crit_enter();
            sl_compress_init(sc->sc_comp, *(int *)data);
            crit_exit();
        }
        break;
#endif

    case PPPIOCXFERUNIT:
        if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0)
            return (error);
        sc->sc_xfer = curthread;
        break;

#ifdef PPP_COMPRESS
    case PPPIOCSCOMPRESS:
        if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0)
            return (error);
        odp = (struct ppp_option_data *) data;
        nb = odp->length;
        if (nb > sizeof(ccp_option))
            nb = sizeof(ccp_option);
        if ((error = copyin(odp->ptr, ccp_option, nb)) != 0)
            return (error);
        if (ccp_option[1] < 2)	/* preliminary check on the length byte */
            return (EINVAL);
        for (cp = ppp_compressors; *cp != NULL; ++cp)
            if ((*cp)->compress_proto == ccp_option[0]) {
                /*
                 * Found a handler for the protocol - try to allocate
                 * a compressor or decompressor.
                 */
                error = 0;
                if (odp->transmit) {
                    crit_enter();
                    if (sc->sc_xc_state != NULL)
                        (*sc->sc_xcomp->comp_free)(sc->sc_xc_state);
                    sc->sc_xcomp = *cp;
                    sc->sc_xc_state = (*cp)->comp_alloc(ccp_option, nb);
                    if (sc->sc_xc_state == NULL) {
                        if (sc->sc_flags & SC_DEBUG)
                            kprintf("%s: comp_alloc failed\n",
                                    sc->sc_if.if_xname);
                        error = ENOBUFS;
                    }
                    sc->sc_flags &= ~SC_COMP_RUN;
                    crit_exit();
                } else {
                    crit_enter();
                    if (sc->sc_rc_state != NULL)
                        (*sc->sc_rcomp->decomp_free)(sc->sc_rc_state);
                    sc->sc_rcomp = *cp;
                    sc->sc_rc_state = (*cp)->decomp_alloc(ccp_option, nb);
                    if (sc->sc_rc_state == NULL) {
                        if (sc->sc_flags & SC_DEBUG)
                            kprintf("%s: decomp_alloc failed\n",
                                    sc->sc_if.if_xname);
                        error = ENOBUFS;
                    }
                    sc->sc_flags &= ~SC_DECOMP_RUN;
                    crit_exit();
                }
                return (error);
            }
        if (sc->sc_flags & SC_DEBUG)
            kprintf("%s: no compressor for [%x %x %x], %x\n",
                    sc->sc_if.if_xname, ccp_option[0], ccp_option[1],
                    ccp_option[2], nb);
        return (EINVAL);	/* no handler found */
#endif /* PPP_COMPRESS */

    case PPPIOCGNPMODE:
    case PPPIOCSNPMODE:
        npi = (struct npioctl *) data;
        switch (npi->protocol) {
        case PPP_IP:
            npx = NP_IP;
            break;
        default:
            return EINVAL;
        }
        if (cmd == PPPIOCGNPMODE) {
            npi->mode = sc->sc_npmode[npx];
        } else {
            if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0)
                return (error);
            if (npi->mode != sc->sc_npmode[npx]) {
                crit_enter();
                sc->sc_npmode[npx] = npi->mode;
                if (npi->mode != NPMODE_QUEUE) {
                    ppp_requeue(sc);
                    (*sc->sc_start)(sc);
                }
                crit_exit();
            }
        }
        break;

    case PPPIOCGIDLE:
        crit_enter();
        t = time_second;
        ((struct ppp_idle *)data)->xmit_idle = t - sc->sc_last_sent;
        ((struct ppp_idle *)data)->recv_idle = t - sc->sc_last_recv;
        crit_exit();
        break;

#ifdef PPP_FILTER
    case PPPIOCSPASS:
    case PPPIOCSACTIVE:
        nbp = (struct bpf_program *) data;
        if ((unsigned) nbp->bf_len > BPF_MAXINSNS)
            return EINVAL;
        newcodelen = nbp->bf_len * sizeof(struct bpf_insn);
        if (newcodelen != 0) {
            MALLOC(newcode, struct bpf_insn *, newcodelen, M_DEVBUF, M_WAITOK);
            if ((error = copyin((caddr_t)nbp->bf_insns, (caddr_t)newcode,
                                newcodelen)) != 0) {
                kfree(newcode, M_DEVBUF);
                return error;
            }
            if (!bpf_validate(newcode, nbp->bf_len)) {
                kfree(newcode, M_DEVBUF);
                return EINVAL;
            }
        } else
            newcode = 0;
        bp = (cmd == PPPIOCSPASS)? &sc->sc_pass_filt: &sc->sc_active_filt;
        oldcode = bp->bf_insns;
        crit_enter();
        bp->bf_len = nbp->bf_len;
        bp->bf_insns = newcode;
        crit_exit();
        if (oldcode != 0)
            kfree(oldcode, M_DEVBUF);
        break;
#endif

    default:
        return (ENOIOCTL);
    }
    return (0);
}
Beispiel #12
0
int
linux32_sys___sysctl(struct lwp *l, const struct linux32_sys___sysctl_args *uap, register_t *retval)
{
	/* {
		syscallarg(linux32___sysctlp_t) lsp;
	} */
	struct linux32_sysctl ls32;
	int name[CTL_MAXNAME];
	size_t savelen;
	netbsd32_size_t oldlen32;
	size_t oldlen;
	int error;

	/*
	 * Read sysctl arguments 
	 */
	if ((error = copyin(SCARG_P32(uap, lsp), &ls32, sizeof(ls32))) != 0)
		return error;

	/*
	 * Read oldlen
	 */
	if (NETBSD32PTR64(ls32.oldlenp) != NULL) {
		if ((error = copyin(NETBSD32PTR64(ls32.oldlenp), 
		    &oldlen32, sizeof(oldlen32))) != 0)
			return error;
	} else {
		oldlen32 = 0;
	}

	savelen = (size_t)oldlen32;

	/* 
	 * Sanity check nlen
	 */
	if ((ls32.nlen > CTL_MAXNAME) || (ls32.nlen < 1))
		return ENOTDIR;

	/*
	 * Read the sysctl name
	 */
	if ((error = copyin(NETBSD32PTR64(ls32.name), &name, 
	   ls32.nlen * sizeof(int))) != 0)
		return error;

	ktrmib(name, ls32.nlen);
	/*
	 * First try linux32 tree, then linux tree
	 */
	oldlen = (size_t)oldlen32;
	sysctl_lock(NETBSD32PTR64(ls32.newval) != NULL);
	error = sysctl_dispatch(name, ls32.nlen,
				NETBSD32PTR64(ls32.oldval), &oldlen,
				NETBSD32PTR64(ls32.newval), ls32.newlen,
				name, l, &linux32_sysctl_root);
	oldlen32 = (netbsd32_size_t)oldlen;
	sysctl_unlock();

	/*
	 * Check for oldlen overflow (not likely, but who knows...)
	 */
	if (oldlen != oldlen32) {
#ifdef DEBUG_LINUX
		printf("%s: oldlen32 = %d, oldlen = %ld\n", 
		    __func__, oldlen32, oldlen);
#endif
		return EINVAL;
	}

	/*
	 * set caller's oldlen, even if we got an error
	 */
	if (NETBSD32PTR64(ls32.oldlenp)) {
		int nerror;

		nerror = copyout(&oldlen32, 
		    NETBSD32PTR64(ls32.oldlenp), sizeof(oldlen32));

		if (error == 0)
			error = nerror;
	}

	/*
	 * oldlen was too short
	 */
	if ((error == 0) && 
	    (NETBSD32PTR64(ls32.oldval) != NULL) &&
	    (savelen < oldlen32))
		error = ENOMEM;

	return error;
}
Beispiel #13
0
static int
uhid_ioctl(struct usb_fifo *fifo, u_long cmd, void *addr,
    int fflags)
{
	struct uhid_softc *sc = usb_fifo_softc(fifo);
	struct usb_gen_descriptor *ugd;
	uint32_t size;
	int error = 0;
	uint8_t id;

	switch (cmd) {
	case USB_GET_REPORT_DESC:
		ugd = addr;
		if (sc->sc_repdesc_size > ugd->ugd_maxlen) {
			size = ugd->ugd_maxlen;
		} else {
			size = sc->sc_repdesc_size;
		}
		ugd->ugd_actlen = size;
		if (ugd->ugd_data == NULL)
			break;		/* descriptor length only */
		error = copyout(sc->sc_repdesc_ptr, ugd->ugd_data, size);
		break;

	case USB_SET_IMMED:
		if (!(fflags & FREAD)) {
			error = EPERM;
			break;
		}
		if (*(int *)addr) {

			/* do a test read */

			error = uhid_get_report(sc, UHID_INPUT_REPORT,
			    sc->sc_iid, NULL, NULL, sc->sc_isize);
			if (error) {
				break;
			}
			mtx_lock(&sc->sc_mtx);
			sc->sc_flags |= UHID_FLAG_IMMED;
			mtx_unlock(&sc->sc_mtx);
		} else {
			mtx_lock(&sc->sc_mtx);
			sc->sc_flags &= ~UHID_FLAG_IMMED;
			mtx_unlock(&sc->sc_mtx);
		}
		break;

	case USB_GET_REPORT:
		if (!(fflags & FREAD)) {
			error = EPERM;
			break;
		}
		ugd = addr;
		switch (ugd->ugd_report_type) {
		case UHID_INPUT_REPORT:
			size = sc->sc_isize;
			id = sc->sc_iid;
			break;
		case UHID_OUTPUT_REPORT:
			size = sc->sc_osize;
			id = sc->sc_oid;
			break;
		case UHID_FEATURE_REPORT:
			size = sc->sc_fsize;
			id = sc->sc_fid;
			break;
		default:
			return (EINVAL);
		}
		if (id != 0)
			copyin(ugd->ugd_data, &id, 1);
		error = uhid_get_report(sc, ugd->ugd_report_type, id,
		    NULL, ugd->ugd_data, imin(ugd->ugd_maxlen, size));
		break;

	case USB_SET_REPORT:
		if (!(fflags & FWRITE)) {
			error = EPERM;
			break;
		}
		ugd = addr;
		switch (ugd->ugd_report_type) {
		case UHID_INPUT_REPORT:
			size = sc->sc_isize;
			id = sc->sc_iid;
			break;
		case UHID_OUTPUT_REPORT:
			size = sc->sc_osize;
			id = sc->sc_oid;
			break;
		case UHID_FEATURE_REPORT:
			size = sc->sc_fsize;
			id = sc->sc_fid;
			break;
		default:
			return (EINVAL);
		}
		if (id != 0)
			copyin(ugd->ugd_data, &id, 1);
		error = uhid_set_report(sc, ugd->ugd_report_type, id,
		    NULL, ugd->ugd_data, imin(ugd->ugd_maxlen, size));
		break;

	case USB_GET_REPORT_ID:
		*(int *)addr = 0;	/* XXX: we only support reportid 0? */
		break;

	default:
		error = EINVAL;
		break;
	}
	return (error);
}
Beispiel #14
0
static int
smbfs_mount(struct mount *mp, char *path, caddr_t data, struct ucred *cred)
{
	struct smbfs_args args; 	  /* will hold data from mount request */
	struct smbmount *smp = NULL;
	struct smb_vc *vcp;
	struct smb_share *ssp = NULL;
	struct vnode *vp;
	struct smb_cred scred;
	int error;
	char *pc, *pe;

	if (data == NULL) {
		kprintf("missing data argument\n");
		return EINVAL;
	}
	if (mp->mnt_flag & MNT_UPDATE) {
		kprintf("MNT_UPDATE not implemented");
		return EOPNOTSUPP;
	}
	error = copyin(data, (caddr_t)&args, sizeof(struct smbfs_args));
	if (error)
		return error;
	if (args.version != SMBFS_VERSION) {
		kprintf("mount version mismatch: kernel=%d, mount=%d\n",
		    SMBFS_VERSION, args.version);
		return EINVAL;
	}
	smb_makescred(&scred, curthread, cred);
	error = smb_dev2share(args.dev, SMBM_EXEC, &scred, &ssp);
	if (error) {
		kprintf("invalid device handle %d (%d)\n", args.dev, error);
		return error;
	}
	vcp = SSTOVC(ssp);
	smb_share_unlock(ssp, 0);
	mp->mnt_stat.f_iosize = SSTOVC(ssp)->vc_txmax;

#ifdef SMBFS_USEZONE
	smp = zalloc(smbfsmount_zone);
#else
        MALLOC(smp, struct smbmount*, sizeof(*smp), M_SMBFSDATA, M_WAITOK|M_USE_RESERVE);
#endif
        if (smp == NULL) {
                kprintf("could not alloc smbmount\n");
                error = ENOMEM;
		goto bad;
        }
	bzero(smp, sizeof(*smp));
        mp->mnt_data = (qaddr_t)smp;
	smp->sm_cred = crhold(cred);
	smp->sm_hash = hashinit(desiredvnodes, M_SMBFSHASH, &smp->sm_hashlen);
	if (smp->sm_hash == NULL)
		goto bad;
	lockinit(&smp->sm_hashlock, "smbfsh", 0, 0);
	smp->sm_share = ssp;
	smp->sm_root = NULL;
        smp->sm_args = args;
	smp->sm_caseopt = args.caseopt;
	smp->sm_args.file_mode = (smp->sm_args.file_mode &
			    (S_IRWXU|S_IRWXG|S_IRWXO)) | S_IFREG;
	smp->sm_args.dir_mode  = (smp->sm_args.dir_mode &
			    (S_IRWXU|S_IRWXG|S_IRWXO)) | S_IFDIR;

/*	simple_lock_init(&smp->sm_npslock);*/
	pc = mp->mnt_stat.f_mntfromname;
	pe = pc + sizeof(mp->mnt_stat.f_mntfromname);
	bzero(pc, MNAMELEN);
	*pc++ = '/';
	*pc++ = '/';
	pc=index(strncpy(pc, vcp->vc_username, pe - pc - 2), 0);
	if (pc < pe-1) {
		*(pc++) = '@';
		pc = index(strncpy(pc, vcp->vc_srvname, pe - pc - 2), 0);
		if (pc < pe - 1) {
			*(pc++) = '/';
			strncpy(pc, ssp->ss_name, pe - pc - 2);
		}
	}
	/* protect against invalid mount points */
	smp->sm_args.mount_point[sizeof(smp->sm_args.mount_point) - 1] = '\0';
	vfs_getnewfsid(mp);

	vfs_add_vnodeops(mp, &smbfs_vnode_vops, &mp->mnt_vn_norm_ops);

	error = smbfs_root(mp, &vp);
	if (error)
		goto bad;
	vn_unlock(vp);
	SMBVDEBUG("root.v_sysrefs = %d\n", vp->v_sysref.refcnt);

#ifdef DIAGNOSTICS
	SMBERROR("mp=%p\n", mp);
#endif
	return error;
bad:
        if (smp) {
		if (smp->sm_cred)
			crfree(smp->sm_cred);
		if (smp->sm_hash)
			kfree(smp->sm_hash, M_SMBFSHASH);
		lockdestroy(&smp->sm_hashlock);
#ifdef SMBFS_USEZONE
		zfree(smbfsmount_zone, smp);
#else
		kfree(smp, M_SMBFSDATA);
#endif
	}
	if (ssp)
		smb_share_put(ssp, &scred);
        return error;
}
/*
 * syscall2 -	MP aware system call request C handler
 *
 * A system call is essentially treated as a trap.  The MP lock is not
 * held on entry or return.  We are responsible for handling ASTs
 * (e.g. a task switch) prior to return.
 *
 * MPSAFE
 */
void
syscall2(struct trapframe *frame)
{
	struct thread *td = curthread;
	struct proc *p = td->td_proc;
	struct lwp *lp = td->td_lwp;
	caddr_t params;
	struct sysent *callp;
	register_t orig_tf_eflags;
	int sticks;
	int error;
	int narg;
#ifdef INVARIANTS
	int crit_count = td->td_critcount;
#endif
	int have_mplock = 0;
	u_int code;
	union sysunion args;

#ifdef DIAGNOSTIC
	if (ISPL(frame->tf_cs) != SEL_UPL) {
		get_mplock();
		panic("syscall");
		/* NOT REACHED */
	}
#endif

	KTR_LOG(kernentry_syscall, p->p_pid, lp->lwp_tid,
		frame->tf_eax);

	userenter(td, p);	/* lazy raise our priority */

	/*
	 * Misc
	 */
	sticks = (int)td->td_sticks;
	orig_tf_eflags = frame->tf_eflags;

	/*
	 * Virtual kernel intercept - if a VM context managed by a virtual
	 * kernel issues a system call the virtual kernel handles it, not us.
	 * Restore the virtual kernel context and return from its system
	 * call.  The current frame is copied out to the virtual kernel.
	 */
	if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
		vkernel_trap(lp, frame);
		error = EJUSTRETURN;
		callp = NULL;
		goto out;
	}

	/*
	 * Get the system call parameters and account for time
	 */
	lp->lwp_md.md_regs = frame;
	params = (caddr_t)frame->tf_esp + sizeof(int);
	code = frame->tf_eax;

	if (p->p_sysent->sv_prepsyscall) {
		(*p->p_sysent->sv_prepsyscall)(
			frame, (int *)(&args.nosys.sysmsg + 1),
			&code, &params);
	} else {
		/*
		 * Need to check if this is a 32 bit or 64 bit syscall.
		 * fuword is MP aware.
		 */
		if (code == SYS_syscall) {
			/*
			 * Code is first argument, followed by actual args.
			 */
			code = fuword(params);
			params += sizeof(int);
		} else if (code == SYS___syscall) {
			/*
			 * Like syscall, but code is a quad, so as to maintain
			 * quad alignment for the rest of the arguments.
			 */
			code = fuword(params);
			params += sizeof(quad_t);
		}
	}

	code &= p->p_sysent->sv_mask;

	if (code >= p->p_sysent->sv_size)
		callp = &p->p_sysent->sv_table[0];
	else
		callp = &p->p_sysent->sv_table[code];

	narg = callp->sy_narg & SYF_ARGMASK;

#if 0
	if (p->p_sysent->sv_name[0] == 'L')
		kprintf("Linux syscall, code = %d\n", code);
#endif

	/*
	 * copyin is MP aware, but the tracing code is not
	 */
	if (narg && params) {
		error = copyin(params, (caddr_t)(&args.nosys.sysmsg + 1),
				narg * sizeof(register_t));
		if (error) {
#ifdef KTRACE
			if (KTRPOINT(td, KTR_SYSCALL)) {
				MAKEMPSAFE(have_mplock);
				
				ktrsyscall(lp, code, narg,
					(void *)(&args.nosys.sysmsg + 1));
			}
#endif
			goto bad;
		}
	}

#ifdef KTRACE
	if (KTRPOINT(td, KTR_SYSCALL)) {
		MAKEMPSAFE(have_mplock);
		ktrsyscall(lp, code, narg, (void *)(&args.nosys.sysmsg + 1));
	}
#endif

	/*
	 * For traditional syscall code edx is left untouched when 32 bit
	 * results are returned.  Since edx is loaded from fds[1] when the 
	 * system call returns we pre-set it here.
	 */
	args.sysmsg_fds[0] = 0;
	args.sysmsg_fds[1] = frame->tf_edx;

	/*
	 * The syscall might manipulate the trap frame. If it does it
	 * will probably return EJUSTRETURN.
	 */
	args.sysmsg_frame = frame;

	STOPEVENT(p, S_SCE, narg);	/* MP aware */

	/*
	 * NOTE: All system calls run MPSAFE now.  The system call itself
	 *	 is responsible for getting the MP lock.
	 */
	error = (*callp->sy_call)(&args);

out:
	/*
	 * MP SAFE (we may or may not have the MP lock at this point)
	 */
	switch (error) {
	case 0:
		/*
		 * Reinitialize proc pointer `p' as it may be different
		 * if this is a child returning from fork syscall.
		 */
		p = curproc;
		lp = curthread->td_lwp;
		frame->tf_eax = args.sysmsg_fds[0];
		frame->tf_edx = args.sysmsg_fds[1];
		frame->tf_eflags &= ~PSL_C;
		break;
	case ERESTART:
		/*
		 * Reconstruct pc, assuming lcall $X,y is 7 bytes,
		 * int 0x80 is 2 bytes. We saved this in tf_err.
		 */
		frame->tf_eip -= frame->tf_err;
		break;
	case EJUSTRETURN:
		break;
	case EASYNC:
		panic("Unexpected EASYNC return value (for now)");
	default:
bad:
		if (p->p_sysent->sv_errsize) {
			if (error >= p->p_sysent->sv_errsize)
				error = -1;	/* XXX */
			else
				error = p->p_sysent->sv_errtbl[error];
		}
		frame->tf_eax = error;
		frame->tf_eflags |= PSL_C;
		break;
	}

	/*
	 * Traced syscall.  trapsignal() is not MP aware.
	 */
	if ((orig_tf_eflags & PSL_T) && !(orig_tf_eflags & PSL_VM)) {
		MAKEMPSAFE(have_mplock);
		frame->tf_eflags &= ~PSL_T;
		trapsignal(lp, SIGTRAP, TRAP_TRACE);
	}

	/*
	 * Handle reschedule and other end-of-syscall issues
	 */
	userret(lp, frame, sticks);

#ifdef KTRACE
	if (KTRPOINT(td, KTR_SYSRET)) {
		MAKEMPSAFE(have_mplock);
		ktrsysret(lp, code, error, args.sysmsg_result);
	}
#endif

	/*
	 * This works because errno is findable through the
	 * register set.  If we ever support an emulation where this
	 * is not the case, this code will need to be revisited.
	 */
	STOPEVENT(p, S_SCX, code);

	userexit(lp);
	/*
	 * Release the MP lock if we had to get it
	 */
	if (have_mplock)
		rel_mplock();
	KTR_LOG(kernentry_syscall_ret, p->p_pid, lp->lwp_tid, error);
#ifdef INVARIANTS
	KASSERT(crit_count == td->td_critcount,
		("syscall: critical section count mismatch! %d/%d",
		crit_count, td->td_pri));
	KASSERT(&td->td_toks_base == td->td_toks_stop,
		("syscall: extra tokens held after trap! %zd",
		td->td_toks_stop - &td->td_toks_base));
#endif
}
/*ARGSUSED*/
int
quotactl(struct vnode *vp, intptr_t arg, int flag, struct cred *cr)
{
	struct quotctl quot;
	struct ufsvfs *ufsvfsp;
	int error = 0;

	if ((flag & DATAMODEL_MASK) == DATAMODEL_NATIVE) {
		if (copyin((caddr_t)arg, &quot, sizeof (struct quotctl)))
			return (EFAULT);
	}
#ifdef _SYSCALL32_IMPL
	else {
		/* quotctl struct from ILP32 callers */
		struct quotctl32 quot32;
		if (copyin((caddr_t)arg, &quot32, sizeof (struct quotctl32)))
			return (EFAULT);
		quot.op = quot32.op;
		quot.uid = quot32.uid;
		quot.addr = (caddr_t)(uintptr_t)quot32.addr;
	}
#endif /* _SYSCALL32_IMPL */

	if (quot.uid < 0)
		quot.uid = crgetruid(cr);
	if (quot.op == Q_SYNC && vp == NULL) {
		ufsvfsp = NULL;
	} else if (quot.op != Q_ALLSYNC) {
		ufsvfsp = (struct ufsvfs *)(vp->v_vfsp->vfs_data);
	}
	switch (quot.op) {

	case Q_QUOTAON:
		rw_enter(&dq_rwlock, RW_WRITER);
		if (quotas_initialized == 0) {
			qtinit2();
			quotas_initialized = 1;
		}
		rw_exit(&dq_rwlock);
		error = opendq(ufsvfsp, vp, cr);
		break;

	case Q_QUOTAOFF:
		error = closedq(ufsvfsp, cr);
		if (!error) {
			invalidatedq(ufsvfsp);
		}
		break;

	case Q_SETQUOTA:
	case Q_SETQLIM:
		error = setquota(quot.op, (uid_t)quot.uid, ufsvfsp,
		    quot.addr, cr);
		break;

	case Q_GETQUOTA:
		error = getquota((uid_t)quot.uid, ufsvfsp, (caddr_t)quot.addr,
		    cr);
		break;

	case Q_SYNC:
		error = qsync(ufsvfsp);
		break;

	case Q_ALLSYNC:
		(void) qsync(NULL);
		break;

	default:
		error = EINVAL;
		break;
	}
	return (error);
}
/*
 * Function name:	tw_osli_fw_passthru
 * Description:		Builds a fw passthru cmd pkt, and submits it to CL.
 *
 * Input:		sc	-- ptr to OSL internal ctlr context
 *			buf	-- ptr to ioctl pkt understood by CL
 * Output:		None
 * Return value:	0	-- success
 *			non-zero-- failure
 */
TW_INT32
tw_osli_fw_passthru(struct twa_softc *sc, TW_INT8 *buf)
{
	struct tw_osli_req_context		*req;
	struct tw_osli_ioctl_no_data_buf	*user_buf =
		(struct tw_osli_ioctl_no_data_buf *)buf;
	TW_TIME					end_time;
	TW_UINT32				timeout = 60;
	TW_UINT32				data_buf_size_adjusted;
	struct tw_cl_req_packet			*req_pkt;
	struct tw_cl_passthru_req_packet	*pt_req;
	TW_INT32				error;

	tw_osli_dbg_dprintf(5, sc, "ioctl: passthru");
		
	if ((req = tw_osli_get_request(sc)) == NULL)
		return(EBUSY);

	req->req_handle.osl_req_ctxt = req;
	req->orig_req = buf;
	req->flags |= TW_OSLI_REQ_FLAGS_PASSTHRU;

	req_pkt = &(req->req_pkt);
	req_pkt->status = 0;
	req_pkt->tw_osl_callback = tw_osl_complete_passthru;
	/* Let the Common Layer retry the request on cmd queue full. */
	req_pkt->flags |= TW_CL_REQ_RETRY_ON_BUSY;

	pt_req = &(req_pkt->gen_req_pkt.pt_req);
	/*
	 * Make sure that the data buffer sent to firmware is a 
	 * 512 byte multiple in size.
	 */
	data_buf_size_adjusted =
		(user_buf->driver_pkt.buffer_length +
		(sc->sg_size_factor - 1)) & ~(sc->sg_size_factor - 1);
	if ((req->length = data_buf_size_adjusted)) {
		if ((req->data = malloc(data_buf_size_adjusted,
			TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) {
			error = ENOMEM;
			tw_osli_printf(sc, "error = %d",
				TW_CL_SEVERITY_ERROR_STRING,
				TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
				0x2016,
				"Could not alloc mem for "
				"fw_passthru data_buf",
				error);
			goto fw_passthru_err;
		}
		/* Copy the payload. */
		if ((error = copyin((TW_VOID *)(user_buf->pdata), 
			req->data,
			user_buf->driver_pkt.buffer_length)) != 0) {
			tw_osli_printf(sc, "error = %d",
				TW_CL_SEVERITY_ERROR_STRING,
				TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
				0x2017,
				"Could not copyin fw_passthru data_buf",
				error);
			goto fw_passthru_err;
		}
		pt_req->sgl_entries = 1; /* will be updated during mapping */
		req->flags |= (TW_OSLI_REQ_FLAGS_DATA_IN |
			TW_OSLI_REQ_FLAGS_DATA_OUT);
	} else
		pt_req->sgl_entries = 0; /* no payload */

	pt_req->cmd_pkt = (TW_VOID *)(&(user_buf->cmd_pkt));
	pt_req->cmd_pkt_length = sizeof(struct tw_cl_command_packet);

	if ((error = tw_osli_map_request(req)))
		goto fw_passthru_err;

	end_time = tw_osl_get_local_time() + timeout;
	while (req->state != TW_OSLI_REQ_STATE_COMPLETE) {
		mtx_lock(req->ioctl_wake_timeout_lock);
		req->flags |= TW_OSLI_REQ_FLAGS_SLEEPING;

		error = mtx_sleep(req, req->ioctl_wake_timeout_lock, 0,
			    "twa_passthru", timeout*hz);
		mtx_unlock(req->ioctl_wake_timeout_lock);

		if (!(req->flags & TW_OSLI_REQ_FLAGS_SLEEPING))
			error = 0;
		req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING;

		if (! error) {
			if (((error = req->error_code)) ||
				((error = (req->state !=
				TW_OSLI_REQ_STATE_COMPLETE))) ||
				((error = req_pkt->status)))
				goto fw_passthru_err;
			break;
		}

		if (req_pkt->status) {
			error = req_pkt->status;
			goto fw_passthru_err;
		}

		if (error == EWOULDBLOCK) {
			/* Time out! */
			if ((!(req->error_code))                       &&
			    (req->state == TW_OSLI_REQ_STATE_COMPLETE) &&
			    (!(req_pkt->status))			  ) {
#ifdef    TW_OSL_DEBUG
				tw_osli_printf(sc, "request = %p",
					TW_CL_SEVERITY_ERROR_STRING,
					TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
					0x7777,
					"FALSE Passthru timeout!",
					req);
#endif /* TW_OSL_DEBUG */
				error = 0; /* False error */
				break;
			}
			if (!(tw_cl_is_reset_needed(&(req->ctlr->ctlr_handle)))) {
#ifdef    TW_OSL_DEBUG
				tw_osli_printf(sc, "request = %p",
					TW_CL_SEVERITY_ERROR_STRING,
					TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
					0x2018,
					"Passthru request timed out!",
					req);
#else  /* TW_OSL_DEBUG */
			device_printf((sc)->bus_dev, "Passthru request timed out!\n");
#endif /* TW_OSL_DEBUG */
				tw_cl_reset_ctlr(&(req->ctlr->ctlr_handle));
			}

			error = 0;
			end_time = tw_osl_get_local_time() + timeout;
			continue;
			/*
			 * Don't touch req after a reset.  It (and any
			 * associated data) will be
			 * unmapped by the callback.
			 */
		}
		/* 
		 * Either the request got completed, or we were woken up by a
		 * signal.  Calculate the new timeout, in case it was the latter.
		 */
		timeout = (end_time - tw_osl_get_local_time());
	} /* End of while loop */

	/* If there was a payload, copy it back. */
	if ((!error) && (req->length))
		if ((error = copyout(req->data, user_buf->pdata,
			user_buf->driver_pkt.buffer_length)))
			tw_osli_printf(sc, "error = %d",
				TW_CL_SEVERITY_ERROR_STRING,
				TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
				0x2019,
				"Could not copyout fw_passthru data_buf",
				error);
	
fw_passthru_err:

	if (req_pkt->status == TW_CL_ERR_REQ_BUS_RESET)
		error = EBUSY;

	user_buf->driver_pkt.os_status = error;
	/* Free resources. */
	if (req->data)
		free(req->data, TW_OSLI_MALLOC_CLASS);
	tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
	return(error);
}
int
abort2(struct thread *td, struct abort2_args *uap)
{
	struct proc *p = td->td_proc;
	struct sbuf *sb;
	void *uargs[16];
	int error, i, sig;

	/*
	 * Do it right now so we can log either proper call of abort2(), or
	 * note, that invalid argument was passed. 512 is big enough to
	 * handle 16 arguments' descriptions with additional comments.
	 */
	sb = sbuf_new(NULL, NULL, 512, SBUF_FIXEDLEN);
	sbuf_clear(sb);
	sbuf_printf(sb, "%s(pid %d uid %d) aborted: ",
	    p->p_comm, p->p_pid, td->td_ucred->cr_uid);
	/* 
	 * Since we can't return from abort2(), send SIGKILL in cases, where
	 * abort2() was called improperly
	 */
	sig = SIGKILL;
	/* Prevent from DoSes from user-space. */
	if (uap->nargs < 0 || uap->nargs > 16)
		goto out;
	if (uap->nargs > 0) {
		if (uap->args == NULL)
			goto out;
		error = copyin(uap->args, uargs, uap->nargs * sizeof(void *));
		if (error != 0)
			goto out;
	}
	/*
	 * Limit size of 'reason' string to 128. Will fit even when
	 * maximal number of arguments was chosen to be logged.
	 */
	if (uap->why != NULL) {
		error = sbuf_copyin(sb, uap->why, 128);
		if (error < 0)
			goto out;
	} else {
		sbuf_printf(sb, "(null)");
	}
	if (uap->nargs > 0) {
		sbuf_printf(sb, "(");
		for (i = 0;i < uap->nargs; i++)
			sbuf_printf(sb, "%s%p", i == 0 ? "" : ", ", uargs[i]);
		sbuf_printf(sb, ")");
	}
	/*
	 * Final stage: arguments were proper, string has been
	 * successfully copied from userspace, and copying pointers
	 * from user-space succeed.
	 */
	sig = SIGABRT;
out:
	if (sig == SIGKILL) {
		sbuf_trim(sb);
		sbuf_printf(sb, " (Reason text inaccessible)");
	}
	sbuf_cat(sb, "\n");
	sbuf_finish(sb);
	log(LOG_INFO, "%s", sbuf_data(sb));
	sbuf_delete(sb);
	exit1(td, W_EXITCODE(0, sig));
	return (0);
}
Beispiel #19
0
/* Instruction pointers operate differently on mc88110 */
void
m88110_syscall(register_t code, struct trapframe *tf)
{
	int i, nsys, nap;
	struct sysent *callp;
	struct proc *p = curproc;
	int error;
	register_t args[8], rval[2], *ap;
	int nolock;

	uvmexp.syscalls++;

	callp = p->p_emul->e_sysent;
	nsys  = p->p_emul->e_nsysent;

	p->p_md.md_tf = tf;

	/*
	 * For 88k, all the arguments are passed in the registers (r2-r9),
	 * and further arguments (if any) on stack.
	 * For syscall (and __syscall), r2 (and r3) has the actual code.
	 * __syscall  takes a quad syscall number, so that other
	 * arguments are at their natural alignments.
	 */
	ap = &tf->tf_r[2];
	nap = 8;	/* r2-r9 */

	switch (code) {
	case SYS_syscall:
		code = *ap++;
		nap--;
		break;
	case SYS___syscall:
		if (callp != sysent)
			break;
		code = ap[_QUAD_LOWWORD];
		ap += 2;
		nap -= 2;
		break;
	}

	if (code < 0 || code >= nsys)
		callp += p->p_emul->e_nosys;
	else
		callp += code;

	i = callp->sy_argsize / sizeof(register_t);
	if (i > sizeof(args) / sizeof(register_t))
		panic("syscall nargs");
	if (i > nap) {
		bcopy((caddr_t)ap, (caddr_t)args, nap * sizeof(register_t));
		error = copyin((caddr_t)tf->tf_r[31], (caddr_t)(args + nap),
		    (i - nap) * sizeof(register_t));
	} else {
		bcopy((caddr_t)ap, (caddr_t)args, i * sizeof(register_t));
		error = 0;
	}

	if (error != 0)
		goto bad;

#ifdef SYSCALL_DEBUG
	KERNEL_LOCK();
	scdebug_call(p, code, args);
	KERNEL_UNLOCK();
#endif
#ifdef KTRACE
	if (KTRPOINT(p, KTR_SYSCALL)) {
		KERNEL_LOCK();
		ktrsyscall(p, code, callp->sy_argsize, args);
		KERNEL_UNLOCK();
	}
#endif
	rval[0] = 0;
	rval[1] = tf->tf_r[3];
#if NSYSTRACE > 0
	if (ISSET(p->p_flag, P_SYSTRACE)) {
		KERNEL_LOCK();
		error = systrace_redirect(code, p, args, rval);
		KERNEL_UNLOCK();
	} else
#endif
	{
		nolock = (callp->sy_flags & SY_NOLOCK);
		if (!nolock)
			KERNEL_LOCK();
		error = (*callp->sy_call)(p, args, rval);
		if (!nolock)
			KERNEL_UNLOCK();
	}

	/*
	 * system call will look like:
	 *	 or r13, r0, <code>
	 *       tb0 0, r0, <128> <- exip
	 *	 br err 	  <- enip
	 *       jmp r1
	 *  err: or.u r3, r0, hi16(errno)
	 *	 st r2, r3, lo16(errno)
	 *	 subu r2, r0, 1
	 *	 jmp r1
	 *
	 * So, when we take syscall trap, exip/enip will be as
	 * shown above.
	 * Given this,
	 * 1. If the system call returned 0, need to jmp r1.
	 *    exip += 8
	 * 2. If the system call returned an errno > 0, increment
	 *    exip += 4 and plug the value in r2. This will have us
	 *    executing "br err" on return to user space.
	 * 3. If the system call code returned ERESTART,
	 *    we need to rexecute the trap instruction. leave exip as is.
	 * 4. If the system call returned EJUSTRETURN, just return.
	 *    exip += 4
	 */

	switch (error) {
	case 0:
		tf->tf_r[2] = rval[0];
		tf->tf_r[3] = rval[1];
		tf->tf_epsr &= ~PSR_C;
		/* skip two instructions */
		m88110_skip_insn(tf);
		m88110_skip_insn(tf);
		break;
	case ERESTART:
		/*
		 * Reexecute the trap.
		 * exip is already at the trap instruction, so
		 * there is nothing to do.
		 */
		tf->tf_epsr &= ~PSR_C;
		break;
	case EJUSTRETURN:
		tf->tf_epsr &= ~PSR_C;
		/* skip one instruction */
		m88110_skip_insn(tf);
		break;
	default:
bad:
		if (p->p_emul->e_errno)
			error = p->p_emul->e_errno[error];
		tf->tf_r[2] = error;
		tf->tf_epsr |= PSR_C;   /* fail */
		/* skip one instruction */
		m88110_skip_insn(tf);
		break;
	}

#ifdef SYSCALL_DEBUG
	KERNEL_LOCK();
	scdebug_ret(p, code, error, rval);
	KERNEL_UNLOCK();
#endif
	userret(p);
#ifdef KTRACE
	if (KTRPOINT(p, KTR_SYSRET)) {
		KERNEL_LOCK();
		ktrsysret(p, code, error, rval[0]);
		KERNEL_UNLOCK();
	}
#endif
}
Beispiel #20
0
/* ARGSUSED */
int
sys_execve(struct proc *p, void *v, register_t *retval)
{
	struct sys_execve_args /* {
		syscallarg(const char *) path;
		syscallarg(char *const *) argp;
		syscallarg(char *const *) envp;
	} */ *uap = v;
	int error;
	struct exec_package pack;
	struct nameidata nid;
	struct vattr attr;
	struct ucred *cred = p->p_ucred;
	char *argp;
	char * const *cpp, *dp, *sp;
	struct process *pr = p->p_p;
	long argc, envc;
	size_t len, sgap;
#ifdef MACHINE_STACK_GROWS_UP
	size_t slen;
#endif
	char *stack;
	struct ps_strings arginfo;
	struct vmspace *vm = p->p_vmspace;
	char **tmpfap;
	extern struct emul emul_native;
#if NSYSTRACE > 0
	int wassugid = ISSET(pr->ps_flags, PS_SUGID | PS_SUGIDEXEC);
	size_t pathbuflen;
#endif
	char *pathbuf = NULL;

	/*
	 * Cheap solution to complicated problems.
	 * Mark this process as "leave me alone, I'm execing".
	 */
	atomic_setbits_int(&p->p_flag, P_INEXEC);

#if NSYSTRACE > 0
	if (ISSET(p->p_flag, P_SYSTRACE)) {
		systrace_execve0(p);
		pathbuf = pool_get(&namei_pool, PR_WAITOK);
		error = copyinstr(SCARG(uap, path), pathbuf, MAXPATHLEN,
		    &pathbuflen);
		if (error != 0)
			goto clrflag;
	}
#endif
	if (pathbuf != NULL) {
		NDINIT(&nid, LOOKUP, NOFOLLOW, UIO_SYSSPACE, pathbuf, p);
	} else {
		NDINIT(&nid, LOOKUP, NOFOLLOW, UIO_USERSPACE,
		    SCARG(uap, path), p);
	}

	/*
	 * initialize the fields of the exec package.
	 */
	if (pathbuf != NULL)
		pack.ep_name = pathbuf;
	else
		pack.ep_name = (char *)SCARG(uap, path);
	pack.ep_hdr = malloc(exec_maxhdrsz, M_EXEC, M_WAITOK);
	pack.ep_hdrlen = exec_maxhdrsz;
	pack.ep_hdrvalid = 0;
	pack.ep_ndp = &nid;
	pack.ep_interp = NULL;
	pack.ep_emul_arg = NULL;
	VMCMDSET_INIT(&pack.ep_vmcmds);
	pack.ep_vap = &attr;
	pack.ep_emul = &emul_native;
	pack.ep_flags = 0;

	/* see if we can run it. */
	if ((error = check_exec(p, &pack)) != 0) {
		goto freehdr;
	}

	/* XXX -- THE FOLLOWING SECTION NEEDS MAJOR CLEANUP */

	/* allocate an argument buffer */
	argp = (char *) uvm_km_valloc_wait(exec_map, NCARGS);
#ifdef DIAGNOSTIC
	if (argp == NULL)
		panic("execve: argp == NULL");
#endif
	dp = argp;
	argc = 0;

	/* copy the fake args list, if there's one, freeing it as we go */
	if (pack.ep_flags & EXEC_HASARGL) {
		tmpfap = pack.ep_fa;
		while (*tmpfap != NULL) {
			char *cp;

			cp = *tmpfap;
			while (*cp)
				*dp++ = *cp++;
			*dp++ = '\0';

			free(*tmpfap, M_EXEC);
			tmpfap++; argc++;
		}
		free(pack.ep_fa, M_EXEC);
		pack.ep_flags &= ~EXEC_HASARGL;
	}

	/* Now get argv & environment */
	if (!(cpp = SCARG(uap, argp))) {
		error = EFAULT;
		goto bad;
	}

	if (pack.ep_flags & EXEC_SKIPARG)
		cpp++;

	while (1) {
		len = argp + ARG_MAX - dp;
		if ((error = copyin(cpp, &sp, sizeof(sp))) != 0)
			goto bad;
		if (!sp)
			break;
		if ((error = copyinstr(sp, dp, len, &len)) != 0) {
			if (error == ENAMETOOLONG)
				error = E2BIG;
			goto bad;
		}
		dp += len;
		cpp++;
		argc++;
	}

	envc = 0;
	/* environment does not need to be there */
	if ((cpp = SCARG(uap, envp)) != NULL ) {
		while (1) {
			len = argp + ARG_MAX - dp;
			if ((error = copyin(cpp, &sp, sizeof(sp))) != 0)
				goto bad;
			if (!sp)
				break;
			if ((error = copyinstr(sp, dp, len, &len)) != 0) {
				if (error == ENAMETOOLONG)
					error = E2BIG;
				goto bad;
			}
			dp += len;
			cpp++;
			envc++;
		}
	}

	dp = (char *)ALIGN(dp);

	sgap = STACKGAPLEN;
	if (stackgap_random != 0)
		sgap += (arc4random() * ALIGNBYTES) & (stackgap_random - 1);
#ifdef MACHINE_STACK_GROWS_UP
	sgap = ALIGN(sgap);
#endif
	/* Now check if args & environ fit into new stack */
	len = ((argc + envc + 2 + pack.ep_emul->e_arglen) * sizeof(char *) +
	    sizeof(long) + dp + sgap + sizeof(struct ps_strings)) - argp;

	len = ALIGN(len);	/* make the stack "safely" aligned */

	if (len > pack.ep_ssize) { /* in effect, compare to initial limit */
		error = ENOMEM;
		goto bad;
	}

	/* adjust "active stack depth" for process VSZ */
	pack.ep_ssize = len;	/* maybe should go elsewhere, but... */

	/*
	 * Prepare vmspace for remapping. Note that uvmspace_exec can replace
	 * p_vmspace!
	 */
	uvmspace_exec(p, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);

	vm = p->p_vmspace;
	/* Now map address space */
	vm->vm_taddr = (char *)pack.ep_taddr;
	vm->vm_tsize = atop(round_page(pack.ep_tsize));
	vm->vm_daddr = (char *)pack.ep_daddr;
	vm->vm_dsize = atop(round_page(pack.ep_dsize));
	vm->vm_dused = 0;
	vm->vm_ssize = atop(round_page(pack.ep_ssize));
	vm->vm_maxsaddr = (char *)pack.ep_maxsaddr;
	vm->vm_minsaddr = (char *)pack.ep_minsaddr;

	/* create the new process's VM space by running the vmcmds */
#ifdef DIAGNOSTIC
	if (pack.ep_vmcmds.evs_used == 0)
		panic("execve: no vmcmds");
#endif
	error = exec_process_vmcmds(p, &pack);

	/* if an error happened, deallocate and punt */
	if (error)
		goto exec_abort;

	/* remember information about the process */
	arginfo.ps_nargvstr = argc;
	arginfo.ps_nenvstr = envc;

#ifdef MACHINE_STACK_GROWS_UP
	stack = (char *)USRSTACK + sizeof(arginfo) + sgap;
	slen = len - sizeof(arginfo) - sgap;
#else
	stack = (char *)(USRSTACK - len);
#endif
	/* Now copy argc, args & environ to new stack */
	if (!(*pack.ep_emul->e_copyargs)(&pack, &arginfo, stack, argp))
		goto exec_abort;

	/* copy out the process's ps_strings structure */
	if (copyout(&arginfo, (char *)PS_STRINGS, sizeof(arginfo)))
		goto exec_abort;

	stopprofclock(p);	/* stop profiling */
	fdcloseexec(p);		/* handle close on exec */
	execsigs(p);		/* reset caught signals */
	TCB_SET(p, NULL);	/* reset the TCB address */

	/* set command name & other accounting info */
	len = min(nid.ni_cnd.cn_namelen, MAXCOMLEN);
	bcopy(nid.ni_cnd.cn_nameptr, p->p_comm, len);
	p->p_comm[len] = 0;
	p->p_acflag &= ~AFORK;

	/* record proc's vnode, for use by procfs and others */
	if (p->p_textvp)
		vrele(p->p_textvp);
	vref(pack.ep_vp);
	p->p_textvp = pack.ep_vp;

	atomic_setbits_int(&pr->ps_flags, PS_EXEC);
	if (pr->ps_flags & PS_PPWAIT) {
		atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT);
		atomic_clearbits_int(&pr->ps_pptr->ps_flags, PS_ISPWAIT);
		wakeup(pr->ps_pptr);
	}

	/*
	 * If process does execve() while it has a mismatched real,
	 * effective, or saved uid/gid, we set PS_SUGIDEXEC.
	 */
	if (p->p_ucred->cr_uid != p->p_cred->p_ruid ||
	    p->p_ucred->cr_uid != p->p_cred->p_svuid ||
	    p->p_ucred->cr_gid != p->p_cred->p_rgid ||
	    p->p_ucred->cr_gid != p->p_cred->p_svgid)
		atomic_setbits_int(&pr->ps_flags, PS_SUGIDEXEC);
	else
		atomic_clearbits_int(&pr->ps_flags, PS_SUGIDEXEC);

	/*
	 * deal with set[ug]id.
	 * MNT_NOEXEC has already been used to disable s[ug]id.
	 */
	if ((attr.va_mode & (VSUID | VSGID)) && proc_cansugid(p)) {
		int i;

		atomic_setbits_int(&pr->ps_flags, PS_SUGID|PS_SUGIDEXEC);

#ifdef KTRACE
		/*
		 * If process is being ktraced, turn off - unless
		 * root set it.
		 */
		if (p->p_tracep && !(p->p_traceflag & KTRFAC_ROOT)) {
			p->p_traceflag = 0;
			ktrsettracevnode(p, NULL);
		}
#endif
		p->p_ucred = crcopy(cred);
		if (attr.va_mode & VSUID)
			p->p_ucred->cr_uid = attr.va_uid;
		if (attr.va_mode & VSGID)
			p->p_ucred->cr_gid = attr.va_gid;

		/*
		 * For set[ug]id processes, a few caveats apply to
		 * stdin, stdout, and stderr.
		 */
		for (i = 0; i < 3; i++) {
			struct file *fp = NULL;

			/*
			 * NOTE - This will never return NULL because of
			 * immature fds. The file descriptor table is not
			 * shared because we're suid.
			 */
			fp = fd_getfile(p->p_fd, i);
#ifdef PROCFS
			/*
			 * Close descriptors that are writing to procfs.
			 */
			if (fp && fp->f_type == DTYPE_VNODE &&
			    ((struct vnode *)(fp->f_data))->v_tag == VT_PROCFS &&
			    (fp->f_flag & FWRITE)) {
				fdrelease(p, i);
				fp = NULL;
			}
#endif

			/*
			 * Ensure that stdin, stdout, and stderr are already
			 * allocated.  We do not want userland to accidentally
			 * allocate descriptors in this range which has implied
			 * meaning to libc.
			 */
			if (fp == NULL) {
				short flags = FREAD | (i == 0 ? 0 : FWRITE);
				struct vnode *vp;
				int indx;

				if ((error = falloc(p, &fp, &indx)) != 0)
					goto exec_abort;
#ifdef DIAGNOSTIC
				if (indx != i)
					panic("sys_execve: falloc indx != i");
#endif
				if ((error = cdevvp(getnulldev(), &vp)) != 0) {
					fdremove(p->p_fd, indx);
					closef(fp, p);
					goto exec_abort;
				}
				if ((error = VOP_OPEN(vp, flags, p->p_ucred, p)) != 0) {
					fdremove(p->p_fd, indx);
					closef(fp, p);
					vrele(vp);
					goto exec_abort;
				}
				if (flags & FWRITE)
					vp->v_writecount++;
				fp->f_flag = flags;
				fp->f_type = DTYPE_VNODE;
				fp->f_ops = &vnops;
				fp->f_data = (caddr_t)vp;
				FILE_SET_MATURE(fp);
			}
		}
	} else
		atomic_clearbits_int(&pr->ps_flags, PS_SUGID);
	p->p_cred->p_svuid = p->p_ucred->cr_uid;
	p->p_cred->p_svgid = p->p_ucred->cr_gid;

	if (pr->ps_flags & PS_SUGIDEXEC) {
		int i, s = splclock();

		timeout_del(&p->p_realit_to);
		timerclear(&p->p_realtimer.it_interval);
		timerclear(&p->p_realtimer.it_value);
		for (i = 0; i < sizeof(p->p_stats->p_timer) /
		    sizeof(p->p_stats->p_timer[0]); i++) {
			timerclear(&p->p_stats->p_timer[i].it_interval);
			timerclear(&p->p_stats->p_timer[i].it_value);
		}
		splx(s);
	}

	uvm_km_free_wakeup(exec_map, (vaddr_t) argp, NCARGS);

	pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
	vn_close(pack.ep_vp, FREAD, cred, p);

	/*
	 * notify others that we exec'd
	 */
	KNOTE(&pr->ps_klist, NOTE_EXEC);

	/* setup new registers and do misc. setup. */
	if (pack.ep_emul->e_fixup != NULL) {
		if ((*pack.ep_emul->e_fixup)(p, &pack) != 0)
			goto free_pack_abort;
	}
#ifdef MACHINE_STACK_GROWS_UP
	(*pack.ep_emul->e_setregs)(p, &pack, (u_long)stack + slen, retval);
#else
	(*pack.ep_emul->e_setregs)(p, &pack, (u_long)stack, retval);
#endif

	/* map the process's signal trampoline code */
	if (exec_sigcode_map(p, pack.ep_emul))
		goto free_pack_abort;

#ifdef __HAVE_EXEC_MD_MAP
	/* perform md specific mappings that process might need */
	if (exec_md_map(p, &pack))
		goto free_pack_abort;
#endif

	if (p->p_flag & P_TRACED)
		psignal(p, SIGTRAP);

	free(pack.ep_hdr, M_EXEC);

	/*
	 * Call emulation specific exec hook. This can setup per-process
	 * p->p_emuldata or do any other per-process stuff an emulation needs.
	 *
	 * If we are executing process of different emulation than the
	 * original forked process, call e_proc_exit() of the old emulation
	 * first, then e_proc_exec() of new emulation. If the emulation is
	 * same, the exec hook code should deallocate any old emulation
	 * resources held previously by this process.
	 */
	if (p->p_emul && p->p_emul->e_proc_exit &&
	    p->p_emul != pack.ep_emul)
		(*p->p_emul->e_proc_exit)(p);

	p->p_descfd = 255;
	if ((pack.ep_flags & EXEC_HASFD) && pack.ep_fd < 255)
		p->p_descfd = pack.ep_fd;

	/*
	 * Call exec hook. Emulation code may NOT store reference to anything
	 * from &pack.
	 */
	if (pack.ep_emul->e_proc_exec)
		(*pack.ep_emul->e_proc_exec)(p, &pack);

	/* update p_emul, the old value is no longer needed */
	p->p_emul = pack.ep_emul;

#ifdef KTRACE
	if (KTRPOINT(p, KTR_EMUL))
		ktremul(p, p->p_emul->e_name);
#endif

	atomic_clearbits_int(&p->p_flag, P_INEXEC);

#if NSYSTRACE > 0
	if (ISSET(p->p_flag, P_SYSTRACE) &&
	    wassugid && !ISSET(pr->ps_flags, PS_SUGID | PS_SUGIDEXEC))
		systrace_execve1(pathbuf, p);
#endif

	if (pathbuf != NULL)
		pool_put(&namei_pool, pathbuf);

	return (0);

bad:
	/* free the vmspace-creation commands, and release their references */
	kill_vmcmds(&pack.ep_vmcmds);
	/* kill any opened file descriptor, if necessary */
	if (pack.ep_flags & EXEC_HASFD) {
		pack.ep_flags &= ~EXEC_HASFD;
		(void) fdrelease(p, pack.ep_fd);
	}
	if (pack.ep_interp != NULL)
		pool_put(&namei_pool, pack.ep_interp);
	if (pack.ep_emul_arg != NULL)
		free(pack.ep_emul_arg, M_TEMP);
	/* close and put the exec'd file */
	vn_close(pack.ep_vp, FREAD, cred, p);
	pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
	uvm_km_free_wakeup(exec_map, (vaddr_t) argp, NCARGS);

 freehdr:
	free(pack.ep_hdr, M_EXEC);
#if NSYSTRACE > 0
 clrflag:
#endif
	atomic_clearbits_int(&p->p_flag, P_INEXEC);

	if (pathbuf != NULL)
		pool_put(&namei_pool, pathbuf);

	return (error);

exec_abort:
	/*
	 * the old process doesn't exist anymore.  exit gracefully.
	 * get rid of the (new) address space we have created, if any, get rid
	 * of our namei data and vnode, and exit noting failure
	 */
	uvm_deallocate(&vm->vm_map, VM_MIN_ADDRESS,
		VM_MAXUSER_ADDRESS - VM_MIN_ADDRESS);
	if (pack.ep_interp != NULL)
		pool_put(&namei_pool, pack.ep_interp);
	if (pack.ep_emul_arg != NULL)
		free(pack.ep_emul_arg, M_TEMP);
	pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
	vn_close(pack.ep_vp, FREAD, cred, p);
	uvm_km_free_wakeup(exec_map, (vaddr_t) argp, NCARGS);

free_pack_abort:
	free(pack.ep_hdr, M_EXEC);
	exit1(p, W_EXITCODE(0, SIGABRT), EXIT_NORMAL);

	/* NOTREACHED */
	atomic_clearbits_int(&p->p_flag, P_INEXEC);
	if (pathbuf != NULL)
		pool_put(&namei_pool, pathbuf);

	return (0);
}
Beispiel #21
0
void
m88110_trap(u_int type, struct trapframe *frame)
{
	struct proc *p;
	struct vm_map *map;
	vaddr_t va, pcb_onfault;
	vm_prot_t ftype;
	int fault_type;
	u_long fault_code;
	vaddr_t fault_addr;
	struct vmspace *vm;
	union sigval sv;
	int result;
#ifdef DDB
        int s;
	u_int psr;
#endif
	int sig = 0;

	uvmexp.traps++;
	if ((p = curproc) == NULL)
		p = &proc0;

	fault_type = SI_NOINFO;
	fault_code = 0;
	fault_addr = frame->tf_exip & XIP_ADDR;

	/*
	 * 88110 errata #16 (4.2) or #3 (5.1.1):
	 * ``bsr, br, bcnd, jsr and jmp instructions with the .n extension
	 *   can cause the enip value to be incremented by 4 incorrectly
	 *   if the instruction in the delay slot is the first word of a
	 *   page which misses in the mmu and results in a hardware
	 *   tablewalk which encounters an exception or an invalid
	 *   descriptor.  The exip value in this case will point to the
	 *   first word of the page, and the D bit will be set.
	 *
	 *   Note: if the instruction is a jsr.n r1, r1 will be overwritten
	 *   with erroneous data.  Therefore, no recovery is possible. Do
	 *   not allow this instruction to occupy the last word of a page.
	 *
	 *   Suggested fix: recover in general by backing up the exip by 4
	 *   and clearing the delay bit before an rte when the lower 3 hex
	 *   digits of the exip are 001.''
	 */
	if ((frame->tf_exip & PAGE_MASK) == 0x00000001 && type == T_INSTFLT) {
		u_int instr;

		/*
		 * Note that we have initialized fault_addr above, so that
		 * signals provide the correct address if necessary.
		 */
		frame->tf_exip = (frame->tf_exip & ~1) - 4;

		/*
		 * Check the instruction at the (backed up) exip.
		 * If it is a jsr.n, abort.
		 */
		if (!USERMODE(frame->tf_epsr)) {
			instr = *(u_int *)fault_addr;
			if (instr == 0xf400cc01)
				panic("mc88110 errata #16, exip %p enip %p",
				    (frame->tf_exip + 4) | 1, frame->tf_enip);
		} else {
			/* copyin here should not fail */
			if (copyin((const void *)frame->tf_exip, &instr,
			    sizeof instr) == 0 &&
			    instr == 0xf400cc01) {
				uprintf("mc88110 errata #16, exip %p enip %p",
				    (frame->tf_exip + 4) | 1, frame->tf_enip);
				sig = SIGILL;
			}
		}
	}

	if (USERMODE(frame->tf_epsr)) {
		type += T_USER;
		p->p_md.md_tf = frame;	/* for ptrace/signals */
	}

	if (sig != 0)
		goto deliver;

	switch (type) {
	default:
lose:
		panictrap(frame->tf_vector, frame);
		break;
		/*NOTREACHED*/

#ifdef DEBUG
	case T_110_DRM+T_USER:
	case T_110_DRM:
		printf("DMMU read miss: Hardware Table Searches should be enabled!\n");
		goto lose;
	case T_110_DWM+T_USER:
	case T_110_DWM:
		printf("DMMU write miss: Hardware Table Searches should be enabled!\n");
		goto lose;
	case T_110_IAM+T_USER:
	case T_110_IAM:
		printf("IMMU miss: Hardware Table Searches should be enabled!\n");
		goto lose;
#endif

#ifdef DDB
	case T_KDB_TRACE:
		s = splhigh();
		set_psr((psr = get_psr()) & ~PSR_IND);
		ddb_break_trap(T_KDB_TRACE, (db_regs_t*)frame);
		set_psr(psr);
		splx(s);
		return;
	case T_KDB_BREAK:
		s = splhigh();
		set_psr((psr = get_psr()) & ~PSR_IND);
		ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame);
		set_psr(psr);
		splx(s);
		return;
	case T_KDB_ENTRY:
		s = splhigh();
		set_psr((psr = get_psr()) & ~PSR_IND);
		ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame);
		set_psr(psr);
		/* skip trap instruction */
		m88110_skip_insn(frame);
		splx(s);
		return;
#endif /* DDB */
	case T_ILLFLT:
		/*
		 * The 88110 seems to trigger an instruction fault in
		 * supervisor mode when running the following sequence:
		 *
		 *	bcnd.n cond, reg, 1f
		 *	arithmetic insn
		 *	...
		 *  	the same exact arithmetic insn
		 *  1:	another arithmetic insn stalled by the previous one
		 *	...
		 *
		 * The exception is reported with exip pointing to the
		 * branch address. I don't know, at this point, if there
		 * is any better workaround than the aggressive one
		 * implemented below; I don't see how this could relate to
		 * any of the 88110 errata (although it might be related to
		 * branch prediction).
		 *
		 * For the record, the exact sequence triggering the
		 * spurious exception is:
		 *
		 *	bcnd.n	eq0, r2,  1f
		 *	 or	r25, r0,  r22
		 *	bsr	somewhere
		 *	or	r25, r0,  r22
		 *  1:	cmp	r13, r25, r20
		 *
		 * within the same cache line.
		 *
		 * Simply ignoring the exception and returning does not
		 * cause the exception to disappear. Clearing the
		 * instruction cache works, but on 88110+88410 systems,
		 * the 88410 needs to be invalidated as well. (note that
		 * the size passed to the flush routines does not matter
		 * since there is no way to flush a subset of the 88110
		 * I$ anyway)
		 */
	    {
		extern void *kernel_text, *etext;

		if (fault_addr >= (vaddr_t)&kernel_text &&
		    fault_addr < (vaddr_t)&etext) {
			cmmu_icache_inv(curcpu()->ci_cpuid,
			    trunc_page(fault_addr), PAGE_SIZE);
			cmmu_cache_wbinv(curcpu()->ci_cpuid,
			    trunc_page(fault_addr), PAGE_SIZE);
			return;
		}
	    }
		goto lose;
	case T_MISALGNFLT:
		printf("kernel misaligned access exception @%p\n",
		    frame->tf_exip);
		goto lose;
	case T_INSTFLT:
		/* kernel mode instruction access fault.
		 * Should never, never happen for a non-paged kernel.
		 */
#ifdef TRAPDEBUG
		printf("Kernel Instruction fault exip %x isr %x ilar %x\n",
		    frame->tf_exip, frame->tf_isr, frame->tf_ilar);
#endif
		goto lose;

	case T_DATAFLT:
		/* kernel mode data fault */

		/* data fault on the user address? */
		if ((frame->tf_dsr & CMMU_DSR_SU) == 0) {
			KERNEL_LOCK();
			goto m88110_user_fault;
		}

#ifdef TRAPDEBUG
		printf("Kernel Data access fault exip %x dsr %x dlar %x\n",
		    frame->tf_exip, frame->tf_dsr, frame->tf_dlar);
#endif

		fault_addr = frame->tf_dlar;
		if (frame->tf_dsr & CMMU_DSR_RW) {
			ftype = VM_PROT_READ;
			fault_code = VM_PROT_READ;
		} else {
			ftype = VM_PROT_READ|VM_PROT_WRITE;
			fault_code = VM_PROT_WRITE;
		}

		va = trunc_page((vaddr_t)fault_addr);

		KERNEL_LOCK();
		vm = p->p_vmspace;
		map = kernel_map;

		if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) {
			/*
			 * On a segment or a page fault, call uvm_fault() to
			 * resolve the fault.
			 */
			if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
				p->p_addr->u_pcb.pcb_onfault = 0;
			result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
			p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
			/*
			 * This could be a fault caused in copyout*()
			 * while accessing kernel space.
			 */
			if (result != 0 && pcb_onfault != 0) {
				frame->tf_exip = pcb_onfault;
				/*
				 * Continue as if the fault had been resolved.
				 */
				result = 0;
			}
			if (result == 0) {
				KERNEL_UNLOCK();
				return;
			}
		}
		KERNEL_UNLOCK();
		goto lose;
	case T_INSTFLT+T_USER:
		/* User mode instruction access fault */
		/* FALLTHROUGH */
	case T_DATAFLT+T_USER:
		KERNEL_LOCK();
m88110_user_fault:
		if (type == T_INSTFLT+T_USER) {
			ftype = VM_PROT_READ;
			fault_code = VM_PROT_READ;
#ifdef TRAPDEBUG
			printf("User Instruction fault exip %x isr %x ilar %x\n",
			    frame->tf_exip, frame->tf_isr, frame->tf_ilar);
#endif
		} else {
			fault_addr = frame->tf_dlar;
			if (frame->tf_dsr & CMMU_DSR_RW) {
				ftype = VM_PROT_READ;
				fault_code = VM_PROT_READ;
			} else {
				ftype = VM_PROT_READ|VM_PROT_WRITE;
				fault_code = VM_PROT_WRITE;
			}
#ifdef TRAPDEBUG
			printf("User Data access fault exip %x dsr %x dlar %x\n",
			    frame->tf_exip, frame->tf_dsr, frame->tf_dlar);
#endif
		}

		va = trunc_page((vaddr_t)fault_addr);

		vm = p->p_vmspace;
		map = &vm->vm_map;
		if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
			p->p_addr->u_pcb.pcb_onfault = 0;

		/*
		 * Call uvm_fault() to resolve non-bus error faults
		 * whenever possible.
		 */
		if (type == T_INSTFLT+T_USER) {
			/* instruction faults */
			if (frame->tf_isr &
			    (CMMU_ISR_BE | CMMU_ISR_SP | CMMU_ISR_TBE)) {
				/* bus error, supervisor protection */
				result = EACCES;
			} else
			if (frame->tf_isr & (CMMU_ISR_SI | CMMU_ISR_PI)) {
				/* segment or page fault */
				result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
			} else {
#ifdef TRAPDEBUG
				printf("Unexpected Instruction fault isr %x\n",
				    frame->tf_isr);
#endif
				KERNEL_UNLOCK();
				goto lose;
			}
		} else {
			/* data faults */
			if (frame->tf_dsr & CMMU_DSR_BE) {
				/* bus error */
				result = EACCES;
			} else
			if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) {
				/* segment or page fault */
				result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
			} else
			if (frame->tf_dsr & (CMMU_DSR_CP | CMMU_DSR_WA)) {
				/* copyback or write allocate error */
				result = EACCES;
			} else
			if (frame->tf_dsr & CMMU_DSR_WE) {
				/* write fault  */
				/* This could be a write protection fault or an
				 * exception to set the used and modified bits
				 * in the pte. Basically, if we got a write
				 * error, then we already have a pte entry that
				 * faulted in from a previous seg fault or page
				 * fault.
				 * Get the pte and check the status of the
				 * modified and valid bits to determine if this
				 * indeed a real write fault.  XXX smurph
				 */
				if (pmap_set_modify(map->pmap, va)) {
#ifdef TRAPDEBUG
					printf("Corrected userland write fault, pmap %p va %p\n",
					    map->pmap, va);
#endif
					result = 0;
				} else {
					/* must be a real wp fault */
#ifdef TRAPDEBUG
					printf("Uncorrected userland write fault, pmap %p va %p\n",
					    map->pmap, va);
#endif
					result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
				}
			} else {
#ifdef TRAPDEBUG
				printf("Unexpected Data access fault dsr %x\n",
				    frame->tf_dsr);
#endif
				KERNEL_UNLOCK();
				goto lose;
			}
		}
		p->p_addr->u_pcb.pcb_onfault = pcb_onfault;

		if ((caddr_t)va >= vm->vm_maxsaddr) {
			if (result == 0)
				uvm_grow(p, va);
			else if (result == EACCES)
				result = EFAULT;
		}
		KERNEL_UNLOCK();

		/*
		 * This could be a fault caused in copyin*()
		 * while accessing user space.
		 */
		if (result != 0 && pcb_onfault != 0) {
			frame->tf_exip = pcb_onfault;
			/*
			 * Continue as if the fault had been resolved.
			 */
			result = 0;
		}

		if (result != 0) {
			sig = result == EACCES ? SIGBUS : SIGSEGV;
			fault_type = result == EACCES ?
			    BUS_ADRERR : SEGV_MAPERR;
		}
		break;
	case T_MISALGNFLT+T_USER:
		/* Fix any misaligned ld.d or st.d instructions */
		sig = double_reg_fixup(frame);
		fault_type = BUS_ADRALN;
		if (sig == 0) {
			/* skip recovered instruction */
			m88110_skip_insn(frame);
			goto userexit;
		}
		break;
	case T_PRIVINFLT+T_USER:
		fault_type = ILL_PRVREG;
		/* FALLTHROUGH */
	case T_ILLFLT+T_USER:
#ifndef DDB
	case T_KDB_BREAK:
	case T_KDB_ENTRY:
	case T_KDB_TRACE:
#endif
	case T_KDB_BREAK+T_USER:
	case T_KDB_ENTRY+T_USER:
	case T_KDB_TRACE+T_USER:
		sig = SIGILL;
		break;
	case T_BNDFLT+T_USER:
		sig = SIGFPE;
		/* skip trap instruction */
		m88110_skip_insn(frame);
		break;
	case T_ZERODIV+T_USER:
		sig = SIGFPE;
		fault_type = FPE_INTDIV;
		/* skip trap instruction */
		m88110_skip_insn(frame);
		break;
	case T_OVFFLT+T_USER:
		sig = SIGFPE;
		fault_type = FPE_INTOVF;
		/* skip trap instruction */
		m88110_skip_insn(frame);
		break;
	case T_FPEPFLT+T_USER:
		m88110_fpu_exception(frame);
		goto userexit;
	case T_SIGSYS+T_USER:
		sig = SIGSYS;
		break;
	case T_STEPBPT+T_USER:
#ifdef PTRACE
		/*
		 * This trap is used by the kernel to support single-step
		 * debugging (although any user could generate this trap
		 * which should probably be handled differently). When a
		 * process is continued by a debugger with the PT_STEP
		 * function of ptrace (single step), the kernel inserts
		 * one or two breakpoints in the user process so that only
		 * one instruction (or two in the case of a delayed branch)
		 * is executed.  When this breakpoint is hit, we get the
		 * T_STEPBPT trap.
		 */
		{
			u_int instr;
			vaddr_t pc = PC_REGS(&frame->tf_regs);

			/* read break instruction */
			copyin((caddr_t)pc, &instr, sizeof(u_int));

			/* check and see if we got here by accident */
			if ((p->p_md.md_bp0va != pc &&
			     p->p_md.md_bp1va != pc) ||
			    instr != SSBREAKPOINT) {
				sig = SIGTRAP;
				fault_type = TRAP_TRACE;
				break;
			}

			/* restore original instruction and clear breakpoint */
			if (p->p_md.md_bp0va == pc) {
				ss_put_value(p, pc, p->p_md.md_bp0save);
				p->p_md.md_bp0va = 0;
			}
			if (p->p_md.md_bp1va == pc) {
				ss_put_value(p, pc, p->p_md.md_bp1save);
				p->p_md.md_bp1va = 0;
			}

			sig = SIGTRAP;
			fault_type = TRAP_BRKPT;
		}
#else
		sig = SIGTRAP;
		fault_type = TRAP_TRACE;
#endif
		break;
	case T_USERBPT+T_USER:
		/*
		 * This trap is meant to be used by debuggers to implement
		 * breakpoint debugging.  When we get this trap, we just
		 * return a signal which gets caught by the debugger.
		 */
		sig = SIGTRAP;
		fault_type = TRAP_BRKPT;
		break;
	}

	/*
	 * If trap from supervisor mode, just return
	 */
	if (type < T_USER)
		return;

	if (sig) {
deliver:
		sv.sival_ptr = (void *)fault_addr;
		KERNEL_LOCK();
		trapsignal(p, sig, fault_code, fault_type, sv);
		KERNEL_UNLOCK();
	}

userexit:
	userret(p);
}
Beispiel #22
0
static int
pci_ioctl(struct dev_ioctl_args *ap)
{
	device_t pcidev, brdev;
	void *confdata;
	const char *name;
	struct devlist *devlist_head;
	struct pci_conf_io *cio;
	struct pci_devinfo *dinfo;
	struct pci_io *io;
	struct pci_bar_io *bio;
	struct pci_match_conf *pattern_buf;
	struct resource_list_entry *rle;
	uint32_t value;
	size_t confsz, iolen, pbufsz;
	int error, ionum, i, num_patterns;
#ifdef PRE7_COMPAT
	struct pci_conf_old conf_old;
	struct pci_io iodata;
	struct pci_io_old *io_old;
	struct pci_match_conf_old *pattern_buf_old;

	io_old = NULL;
	pattern_buf_old = NULL;

	if (!(ap->a_fflag & FWRITE) && ap->a_cmd != PCIOCGETBAR &&
	    ap->a_cmd != PCIOCGETCONF && ap->a_cmd != PCIOCGETCONF_OLD)
		return EPERM;
#else
	if (!(ap->a_fflag & FWRITE) && ap->a_cmd != PCIOCGETBAR && ap->a_cmd != PCIOCGETCONF)
		return EPERM;
#endif

	switch(ap->a_cmd) {
#ifdef PRE7_COMPAT
	case PCIOCGETCONF_OLD:
		/* FALLTHROUGH */
#endif
	case PCIOCGETCONF:
		cio = (struct pci_conf_io *)ap->a_data;

		pattern_buf = NULL;
		num_patterns = 0;
		dinfo = NULL;

		cio->num_matches = 0;

		/*
		 * If the user specified an offset into the device list,
		 * but the list has changed since they last called this
		 * ioctl, tell them that the list has changed.  They will
		 * have to get the list from the beginning.
		 */
		if ((cio->offset != 0)
		 && (cio->generation != pci_generation)){
			cio->status = PCI_GETCONF_LIST_CHANGED;
			error = 0;
			break;
		}

		/*
		 * Check to see whether the user has asked for an offset
		 * past the end of our list.
		 */
		if (cio->offset >= pci_numdevs) {
			cio->status = PCI_GETCONF_LAST_DEVICE;
			error = 0;
			break;
		}

		/* get the head of the device queue */
		devlist_head = &pci_devq;

		/*
		 * Determine how much room we have for pci_conf structures.
		 * Round the user's buffer size down to the nearest
		 * multiple of sizeof(struct pci_conf) in case the user
		 * didn't specify a multiple of that size.
		 */
#ifdef PRE7_COMPAT
		if (ap->a_cmd == PCIOCGETCONF_OLD)
			confsz = sizeof(struct pci_conf_old);
		else
#endif
			confsz = sizeof(struct pci_conf);
		iolen = min(cio->match_buf_len - (cio->match_buf_len % confsz),
		    pci_numdevs * confsz);

		/*
		 * Since we know that iolen is a multiple of the size of
		 * the pciconf union, it's okay to do this.
		 */
		ionum = iolen / confsz;

		/*
		 * If this test is true, the user wants the pci_conf
		 * structures returned to match the supplied entries.
		 */
		if ((cio->num_patterns > 0) && (cio->num_patterns < pci_numdevs)
		 && (cio->pat_buf_len > 0)) {
			/*
			 * pat_buf_len needs to be:
			 * num_patterns * sizeof(struct pci_match_conf)
			 * While it is certainly possible the user just
			 * allocated a large buffer, but set the number of
			 * matches correctly, it is far more likely that
			 * their kernel doesn't match the userland utility
			 * they're using.  It's also possible that the user
			 * forgot to initialize some variables.  Yes, this
			 * may be overly picky, but I hazard to guess that
			 * it's far more likely to just catch folks that
			 * updated their kernel but not their userland.
			 */
#ifdef PRE7_COMPAT
			if (ap->a_cmd == PCIOCGETCONF_OLD)
				pbufsz = sizeof(struct pci_match_conf_old);
			else
#endif
				pbufsz = sizeof(struct pci_match_conf);
			if (cio->num_patterns * pbufsz != cio->pat_buf_len) {
				/* The user made a mistake, return an error. */
				cio->status = PCI_GETCONF_ERROR;
				error = EINVAL;
				break;
			}

			/*
			 * Allocate a buffer to hold the patterns.
			 */
#ifdef PRE7_COMPAT
			if (ap->a_cmd == PCIOCGETCONF_OLD) {
				pattern_buf_old = kmalloc(cio->pat_buf_len,
				    M_TEMP, M_WAITOK);
				error = copyin(cio->patterns,
				    pattern_buf_old, cio->pat_buf_len);
			} else
#endif
			{
				pattern_buf = kmalloc(cio->pat_buf_len, M_TEMP,
				    M_WAITOK);
				error = copyin(cio->patterns, pattern_buf,
				    cio->pat_buf_len);
			}
			if (error != 0) {
				error = EINVAL;
				goto getconfexit;
			}
			num_patterns = cio->num_patterns;
		} else if ((cio->num_patterns > 0)
			|| (cio->pat_buf_len > 0)) {
			/*
			 * The user made a mistake, spit out an error.
			 */
			cio->status = PCI_GETCONF_ERROR;
			error = EINVAL;
			break;
		}

		/*
		 * Go through the list of devices and copy out the devices
		 * that match the user's criteria.
		 */
		for (cio->num_matches = 0, error = 0, i = 0,
		     dinfo = STAILQ_FIRST(devlist_head);
		     (dinfo != NULL) && (cio->num_matches < ionum)
		     && (error == 0) && (i < pci_numdevs) && (dinfo != NULL);
		     dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {

			if (i < cio->offset)
				continue;

			/* Populate pd_name and pd_unit */
			name = NULL;
			if (dinfo->cfg.dev)
				name = device_get_name(dinfo->cfg.dev);
			if (name) {
				strncpy(dinfo->conf.pd_name, name,
					sizeof(dinfo->conf.pd_name));
				dinfo->conf.pd_name[PCI_MAXNAMELEN] = 0;
				dinfo->conf.pd_unit =
					device_get_unit(dinfo->cfg.dev);
			} else {
				dinfo->conf.pd_name[0] = '\0';
				dinfo->conf.pd_unit = 0;
			}

#ifdef PRE7_COMPAT
			if ((ap->a_cmd == PCIOCGETCONF_OLD &&
			    (pattern_buf_old == NULL ||
			    pci_conf_match_old(pattern_buf_old, num_patterns,
			    &dinfo->conf) == 0)) ||
			    (ap->a_cmd == PCIOCGETCONF &&
			    (pattern_buf == NULL ||
			    pci_conf_match(pattern_buf, num_patterns,
			    &dinfo->conf) == 0))) {
#else
			if (pattern_buf == NULL ||
			    pci_conf_match(pattern_buf, num_patterns,
			    &dinfo->conf) == 0) {
#endif
				/*
				 * If we've filled up the user's buffer,
				 * break out at this point.  Since we've
				 * got a match here, we'll pick right back
				 * up at the matching entry.  We can also
				 * tell the user that there are more matches
				 * left.
				 */
				if (cio->num_matches >= ionum)
					break;

#ifdef PRE7_COMPAT
				if (ap->a_cmd == PCIOCGETCONF_OLD) {
					conf_old.pc_sel.pc_bus =
					    dinfo->conf.pc_sel.pc_bus;
					conf_old.pc_sel.pc_dev =
					    dinfo->conf.pc_sel.pc_dev;
					conf_old.pc_sel.pc_func =
					    dinfo->conf.pc_sel.pc_func;
					conf_old.pc_hdr = dinfo->conf.pc_hdr;
					conf_old.pc_subvendor =
					    dinfo->conf.pc_subvendor;
					conf_old.pc_subdevice =
					    dinfo->conf.pc_subdevice;
					conf_old.pc_vendor =
					    dinfo->conf.pc_vendor;
					conf_old.pc_device =
					    dinfo->conf.pc_device;
					conf_old.pc_class =
					    dinfo->conf.pc_class;
					conf_old.pc_subclass =
					    dinfo->conf.pc_subclass;
					conf_old.pc_progif =
					    dinfo->conf.pc_progif;
					conf_old.pc_revid =
					    dinfo->conf.pc_revid;
					strncpy(conf_old.pd_name,
					    dinfo->conf.pd_name,
					    sizeof(conf_old.pd_name));
					conf_old.pd_name[PCI_MAXNAMELEN] = 0;
					conf_old.pd_unit =
					    dinfo->conf.pd_unit;
					confdata = &conf_old;
				} else
#endif
					confdata = &dinfo->conf;
				/* Only if we can copy it out do we count it. */
				if (!(error = copyout(confdata,
				    (caddr_t)cio->matches +
				    confsz * cio->num_matches, confsz)))
					cio->num_matches++;
			}
		}

		/*
		 * Set the pointer into the list, so if the user is getting
		 * n records at a time, where n < pci_numdevs,
		 */
		cio->offset = i;

		/*
		 * Set the generation, the user will need this if they make
		 * another ioctl call with offset != 0.
		 */
		cio->generation = pci_generation;

		/*
		 * If this is the last device, inform the user so he won't
		 * bother asking for more devices.  If dinfo isn't NULL, we
		 * know that there are more matches in the list because of
		 * the way the traversal is done.
		 */
		if (dinfo == NULL)
			cio->status = PCI_GETCONF_LAST_DEVICE;
		else
			cio->status = PCI_GETCONF_MORE_DEVS;

getconfexit:
		if (pattern_buf != NULL)
			kfree(pattern_buf, M_TEMP);
#ifdef PRE7_COMPAT
		if (pattern_buf_old != NULL)
			kfree(pattern_buf_old, M_TEMP);
#endif

		break;

#ifdef PRE7_COMPAT
	case PCIOCREAD_OLD:
	case PCIOCWRITE_OLD:
		io_old = (struct pci_io_old *)ap->a_data;
		iodata.pi_sel.pc_domain = 0;
		iodata.pi_sel.pc_bus = io_old->pi_sel.pc_bus;
		iodata.pi_sel.pc_dev = io_old->pi_sel.pc_dev;
		iodata.pi_sel.pc_func = io_old->pi_sel.pc_func;
		iodata.pi_reg = io_old->pi_reg;
		iodata.pi_width = io_old->pi_width;
		iodata.pi_data = io_old->pi_data;
		ap->a_data = (caddr_t)&iodata;
		/* FALLTHROUGH */
#endif
	case PCIOCREAD:
	case PCIOCWRITE:
		io = (struct pci_io *)ap->a_data;
		switch(io->pi_width) {
		case 4:
		case 2:
		case 1:
			/* Make sure register is not negative and aligned. */
			if (io->pi_reg < 0 ||
			    io->pi_reg & (io->pi_width - 1)) {
				error = EINVAL;
				break;
			}
			/*
			 * Assume that the user-level bus number is
			 * in fact the physical PCI bus number.
			 * Look up the grandparent, i.e. the bridge device,
			 * so that we can issue configuration space cycles.
			 */
			pcidev = pci_find_dbsf(io->pi_sel.pc_domain,
			    io->pi_sel.pc_bus, io->pi_sel.pc_dev,
			    io->pi_sel.pc_func);
			if (pcidev) {
				brdev = device_get_parent(
				    device_get_parent(pcidev));

#ifdef PRE7_COMPAT
				if (ap->a_cmd == PCIOCWRITE || ap->a_cmd == PCIOCWRITE_OLD)
#else
				if (ap->a_cmd == PCIOCWRITE)
#endif
					PCIB_WRITE_CONFIG(brdev,
							  io->pi_sel.pc_bus,
							  io->pi_sel.pc_dev,
							  io->pi_sel.pc_func,
							  io->pi_reg,
							  io->pi_data,
							  io->pi_width);
#ifdef PRE7_COMPAT
				else if (ap->a_cmd == PCIOCREAD_OLD)
					io_old->pi_data =
						PCIB_READ_CONFIG(brdev,
							  io->pi_sel.pc_bus,
							  io->pi_sel.pc_dev,
							  io->pi_sel.pc_func,
							  io->pi_reg,
							  io->pi_width);
#endif
				else
					io->pi_data =
						PCIB_READ_CONFIG(brdev,
							  io->pi_sel.pc_bus,
							  io->pi_sel.pc_dev,
							  io->pi_sel.pc_func,
							  io->pi_reg,
							  io->pi_width);
				error = 0;
			} else {
#ifdef COMPAT_FREEBSD4
				if (cmd == PCIOCREAD_OLD) {
					io_old->pi_data = -1;
					error = 0;
				} else
#endif
					error = ENODEV;
			}
			break;
		default:
			error = EINVAL;
			break;
		}
		break;

	case PCIOCGETBAR:
		bio = (struct pci_bar_io *)ap->a_data;

		/*
		 * Assume that the user-level bus number is
		 * in fact the physical PCI bus number.
		 */
		pcidev = pci_find_dbsf(bio->pbi_sel.pc_domain,
		    bio->pbi_sel.pc_bus, bio->pbi_sel.pc_dev,
		    bio->pbi_sel.pc_func);
		if (pcidev == NULL) {
			error = ENODEV;
			break;
		}
		dinfo = device_get_ivars(pcidev);
		
		/*
		 * Look for a resource list entry matching the requested BAR.
		 *
		 * XXX: This will not find BARs that are not initialized, but
		 * maybe that is ok?
		 */
		rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
		    bio->pbi_reg);
		if (rle == NULL)
			rle = resource_list_find(&dinfo->resources,
			    SYS_RES_IOPORT, bio->pbi_reg);
		if (rle == NULL || rle->res == NULL) {
			error = EINVAL;
			break;
		}

		/*
		 * Ok, we have a resource for this BAR.  Read the lower
		 * 32 bits to get any flags.
		 */
		value = pci_read_config(pcidev, bio->pbi_reg, 4);
		if (PCI_BAR_MEM(value)) {
			if (rle->type != SYS_RES_MEMORY) {
				error = EINVAL;
				break;
			}
			value &= ~PCIM_BAR_MEM_BASE;
		} else {
			if (rle->type != SYS_RES_IOPORT) {
				error = EINVAL;
				break;
			}
			value &= ~PCIM_BAR_IO_BASE;
		}
		bio->pbi_base = rman_get_start(rle->res) | value;
		bio->pbi_length = rman_get_size(rle->res);

		/*
		 * Check the command register to determine if this BAR
		 * is enabled.
		 */
		value = pci_read_config(pcidev, PCIR_COMMAND, 2);
		if (rle->type == SYS_RES_MEMORY)
			bio->pbi_enabled = (value & PCIM_CMD_MEMEN) != 0;
		else
			bio->pbi_enabled = (value & PCIM_CMD_PORTEN) != 0;
		error = 0;
		break;
	case PCIOCATTACHED:
		error = 0;
		io = (struct pci_io *)ap->a_data;
		pcidev = pci_find_dbsf(io->pi_sel.pc_domain, io->pi_sel.pc_bus,
				       io->pi_sel.pc_dev, io->pi_sel.pc_func);
		if (pcidev != NULL)
			io->pi_data = device_is_attached(pcidev);
		else
			error = ENODEV;
		break;
	default:
		error = ENOTTY;
		break;
	}

	return (error);
}
Beispiel #23
0
/*
 * VFS Operations.
 *
 * mount system call
 *
 * Parameters:
 *	data:	this is actually a (struct ext2_args *)
 */
static int
ext2_mount(struct mount *mp, char *path, caddr_t data, struct ucred *cred)
{
	struct vnode *devvp;
	struct ext2_args args;
	struct ext2mount *ump = NULL;
	struct ext2_sb_info *fs;
	size_t size;
	int error, flags;
	mode_t accessmode;
	struct nlookupdata nd;

	if ((error = copyin(data, (caddr_t)&args, sizeof (struct ext2_args))) != 0)
		return (error);

	/*
	 * If updating, check whether changing from read-only to
	 * read/write; if there is no device name, that's all we do.
	 */
	if (mp->mnt_flag & MNT_UPDATE) {
		ump = VFSTOEXT2(mp);
		fs = ump->um_e2fs;
		devvp = ump->um_devvp;
		error = 0;
		if (fs->s_rd_only == 0 && (mp->mnt_flag & MNT_RDONLY)) {
			flags = WRITECLOSE;
			if (mp->mnt_flag & MNT_FORCE)
				flags |= FORCECLOSE;
			if (vfs_busy(mp, LK_NOWAIT))
				return (EBUSY);
			error = ext2_flushfiles(mp, flags);
			vfs_unbusy(mp);
			if (!error && fs->s_wasvalid) {
				fs->s_es->s_state |= EXT2_VALID_FS;
				ext2_sbupdate(ump, MNT_WAIT);
			}
			fs->s_rd_only = 1;
			vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
			VOP_OPEN(devvp, FREAD, FSCRED, NULL);
			VOP_CLOSE(devvp, FREAD|FWRITE, NULL);
			vn_unlock(devvp);
		}
		if (!error && (mp->mnt_flag & MNT_RELOAD))
			error = ext2_reload(mp, cred);
		if (error)
			return (error);
		if (ext2_check_sb_compat(fs->s_es, devvp->v_rdev,
		    (mp->mnt_kern_flag & MNTK_WANTRDWR) == 0) != 0)
			return (EPERM);
		if (fs->s_rd_only && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
			/*
			 * If upgrade to read-write by non-root, then verify
			 * that user has necessary permissions on the device.
			 */
			if (cred->cr_uid != 0) {
				vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
				error = VOP_EACCESS(devvp, VREAD | VWRITE, cred);
				if (error) {
					vn_unlock(devvp);
					return (error);
				}
				vn_unlock(devvp);
			}

			if ((fs->s_es->s_state & EXT2_VALID_FS) == 0 ||
			    (fs->s_es->s_state & EXT2_ERROR_FS)) {
				if (mp->mnt_flag & MNT_FORCE) {
					kprintf(
"WARNING: %s was not properly dismounted\n",
					    fs->fs_fsmnt);
				} else {
					kprintf(
"WARNING: R/W mount of %s denied.  Filesystem is not clean - run fsck\n",
					    fs->fs_fsmnt);
					return (EPERM);
				}
			}
			fs->s_es->s_state &= ~EXT2_VALID_FS;
			ext2_sbupdate(ump, MNT_WAIT);
			fs->s_rd_only = 0;
			vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
			VOP_OPEN(devvp, FREAD|FWRITE, FSCRED, NULL);
			VOP_CLOSE(devvp, FREAD, NULL);
			vn_unlock(devvp);
		}
		if (args.fspec == NULL) {
			/*
			 * Process export requests.
			 */
			return (vfs_export(mp, &ump->um_export, &args.export));
		}
	}
Beispiel #24
0
static int
nfssvc_call(struct thread *p, struct nfssvc_args *uap, struct ucred *cred)
{
	int error = EINVAL;
	struct nfsd_idargs nid;

	if (uap->flag & NFSSVC_IDNAME) {
		error = copyin(uap->argp, (caddr_t)&nid, sizeof (nid));
		if (error)
			goto out;
		error = nfssvc_idname(&nid);
		goto out;
	} else if (uap->flag & NFSSVC_GETSTATS) {
		error = copyout(&newnfsstats,
		    CAST_USER_ADDR_T(uap->argp), sizeof (newnfsstats));
		if (error == 0) {
			if ((uap->flag & NFSSVC_ZEROCLTSTATS) != 0) {
				newnfsstats.attrcache_hits = 0;
				newnfsstats.attrcache_misses = 0;
				newnfsstats.lookupcache_hits = 0;
				newnfsstats.lookupcache_misses = 0;
				newnfsstats.direofcache_hits = 0;
				newnfsstats.direofcache_misses = 0;
				newnfsstats.accesscache_hits = 0;
				newnfsstats.accesscache_misses = 0;
				newnfsstats.biocache_reads = 0;
				newnfsstats.read_bios = 0;
				newnfsstats.read_physios = 0;
				newnfsstats.biocache_writes = 0;
				newnfsstats.write_bios = 0;
				newnfsstats.write_physios = 0;
				newnfsstats.biocache_readlinks = 0;
				newnfsstats.readlink_bios = 0;
				newnfsstats.biocache_readdirs = 0;
				newnfsstats.readdir_bios = 0;
				newnfsstats.rpcretries = 0;
				newnfsstats.rpcrequests = 0;
				newnfsstats.rpctimeouts = 0;
				newnfsstats.rpcunexpected = 0;
				newnfsstats.rpcinvalid = 0;
				bzero(newnfsstats.rpccnt,
				    sizeof(newnfsstats.rpccnt));
			}
			if ((uap->flag & NFSSVC_ZEROSRVSTATS) != 0) {
				newnfsstats.srvrpc_errs = 0;
				newnfsstats.srv_errs = 0;
				newnfsstats.srvcache_inproghits = 0;
				newnfsstats.srvcache_idemdonehits = 0;
				newnfsstats.srvcache_nonidemdonehits = 0;
				newnfsstats.srvcache_misses = 0;
				newnfsstats.srvcache_tcppeak = 0;
				newnfsstats.srvclients = 0;
				newnfsstats.srvopenowners = 0;
				newnfsstats.srvopens = 0;
				newnfsstats.srvlockowners = 0;
				newnfsstats.srvlocks = 0;
				newnfsstats.srvdelegates = 0;
				newnfsstats.clopenowners = 0;
				newnfsstats.clopens = 0;
				newnfsstats.cllockowners = 0;
				newnfsstats.cllocks = 0;
				newnfsstats.cldelegates = 0;
				newnfsstats.cllocalopenowners = 0;
				newnfsstats.cllocalopens = 0;
				newnfsstats.cllocallockowners = 0;
				newnfsstats.cllocallocks = 0;
				bzero(newnfsstats.srvrpccnt,
				    sizeof(newnfsstats.srvrpccnt));
				bzero(newnfsstats.cbrpccnt,
				    sizeof(newnfsstats.cbrpccnt));
			}
		}
		goto out;
	} else if (uap->flag & NFSSVC_NFSUSERDPORT) {
		u_short sockport;

		error = copyin(uap->argp, (caddr_t)&sockport,
		    sizeof (u_short));
		if (!error)
			error = nfsrv_nfsuserdport(sockport, p);
	} else if (uap->flag & NFSSVC_NFSUSERDDELPORT) {
		nfsrv_nfsuserddelport();
		error = 0;
	}

out:
	NFSEXITCODE(error);
	return (error);
}
Beispiel #25
0
static int
freebsd32_ioctl_pciocgetconf(struct thread *td,
    struct freebsd32_ioctl_args *uap, struct file *fp)
{
	struct pci_conf_io pci;
	struct pci_conf_io32 pci32;
	struct pci_match_conf32 pmc32;
	struct pci_match_conf32 *pmc32p;
	struct pci_match_conf pmc;
	struct pci_match_conf *pmcp;
	struct pci_conf32 pc32;
	struct pci_conf32 *pc32p;
	struct pci_conf pc;
	struct pci_conf *pcp;
	u_int32_t i;
	u_int32_t npat_to_convert;
	u_int32_t nmatch_to_convert;
	vm_offset_t addr;
	int error;

	if ((error = copyin(uap->data, &pci32, sizeof(pci32))) != 0)
		return (error);

	CP(pci32, pci, num_patterns);
	CP(pci32, pci, offset);
	CP(pci32, pci, generation);

	npat_to_convert = pci32.pat_buf_len / sizeof(struct pci_match_conf32);
	pci.pat_buf_len = npat_to_convert * sizeof(struct pci_match_conf);
	pci.patterns = NULL;
	nmatch_to_convert = pci32.match_buf_len / sizeof(struct pci_conf32);
	pci.match_buf_len = nmatch_to_convert * sizeof(struct pci_conf);
	pci.matches = NULL;

	if ((error = copyout_map(td, &addr, pci.pat_buf_len)) != 0)
		goto cleanup;
	pci.patterns = (struct pci_match_conf *)addr;
	if ((error = copyout_map(td, &addr, pci.match_buf_len)) != 0)
		goto cleanup;
	pci.matches = (struct pci_conf *)addr;

	npat_to_convert = min(npat_to_convert, pci.num_patterns);

	for (i = 0, pmc32p = (struct pci_match_conf32 *)PTRIN(pci32.patterns),
	     pmcp = pci.patterns;
	     i < npat_to_convert; i++, pmc32p++, pmcp++) {
		if ((error = copyin(pmc32p, &pmc32, sizeof(pmc32))) != 0)
			goto cleanup;
		CP(pmc32,pmc,pc_sel);
		strlcpy(pmc.pd_name, pmc32.pd_name, sizeof(pmc.pd_name));
		CP(pmc32,pmc,pd_unit);
		CP(pmc32,pmc,pc_vendor);
		CP(pmc32,pmc,pc_device);
		CP(pmc32,pmc,pc_class);
		CP(pmc32,pmc,flags);
		if ((error = copyout(&pmc, pmcp, sizeof(pmc))) != 0)
			goto cleanup;
	}

	if ((error = fo_ioctl(fp, PCIOCGETCONF, (caddr_t)&pci,
			      td->td_ucred, td)) != 0)
		goto cleanup;

	nmatch_to_convert = min(nmatch_to_convert, pci.num_matches);

	for (i = 0, pcp = pci.matches,
	     pc32p = (struct pci_conf32 *)PTRIN(pci32.matches);
	     i < nmatch_to_convert; i++, pcp++, pc32p++) {
		if ((error = copyin(pcp, &pc, sizeof(pc))) != 0)
			goto cleanup;
		CP(pc,pc32,pc_sel);
		CP(pc,pc32,pc_hdr);
		CP(pc,pc32,pc_subvendor);
		CP(pc,pc32,pc_subdevice);
		CP(pc,pc32,pc_vendor);
		CP(pc,pc32,pc_device);
		CP(pc,pc32,pc_class);
		CP(pc,pc32,pc_subclass);
		CP(pc,pc32,pc_progif);
		CP(pc,pc32,pc_revid);
		strlcpy(pc32.pd_name, pc.pd_name, sizeof(pc32.pd_name));
		CP(pc,pc32,pd_unit);
		if ((error = copyout(&pc32, pc32p, sizeof(pc32))) != 0)
			goto cleanup;
	}

	CP(pci, pci32, num_matches);
	CP(pci, pci32, offset);
	CP(pci, pci32, generation);
	CP(pci, pci32, status);

	error = copyout(&pci32, uap->data, sizeof(pci32));

cleanup:
	if (pci.patterns)
		copyout_unmap(td, (vm_offset_t)pci.patterns, pci.pat_buf_len);
	if (pci.matches)
		copyout_unmap(td, (vm_offset_t)pci.matches, pci.match_buf_len);

	return (error);
}
Beispiel #26
0
static int
ncp_conn_handler(struct proc *p, struct sncp_request_args *uap,
	struct ncp_conn *conn, struct ncp_handle *hp)
{
	int error=0, rqsize, subfn;
	struct ucred *cred;
	
	char *pdata;

	cred = p->p_ucred;
	error = copyin(&uap->ncpbuf->rqsize, &rqsize, sizeof(int));
	if (error) return(error);
	error = 0;
	pdata = uap->ncpbuf->packet;
	subfn = *(pdata++) & 0xff;
	rqsize--;
	switch (subfn) {
	    case NCP_CONN_READ: case NCP_CONN_WRITE: {
		struct ncp_rw rwrq;
		struct uio auio;
		struct iovec iov;
	
		if (rqsize != sizeof(rwrq)) return (EBADRPC);	
		error = copyin(pdata,&rwrq,rqsize);
		if (error) return (error);
		iov.iov_base = rwrq.nrw_base;
		iov.iov_len = rwrq.nrw_cnt;
		auio.uio_iov = &iov;
		auio.uio_iovcnt = 1;
		auio.uio_offset = rwrq.nrw_offset;
		auio.uio_resid = rwrq.nrw_cnt;
		auio.uio_segflg = UIO_USERSPACE;
		auio.uio_rw = (subfn == NCP_CONN_READ) ? UIO_READ : UIO_WRITE;
		auio.uio_procp = p;
		error = ncp_conn_lock(conn,p,cred,NCPM_EXECUTE);
		if (error) return(error);
		if (subfn == NCP_CONN_READ)
			error = ncp_read(conn, &rwrq.nrw_fh, &auio, cred);
		else
			error = ncp_write(conn, &rwrq.nrw_fh, &auio, cred);
		rwrq.nrw_cnt -= auio.uio_resid;
		ncp_conn_unlock(conn,p);
		p->p_retval[0] = rwrq.nrw_cnt;
		break;
	    } /* case int_read/write */
	    case NCP_CONN_SETFLAGS: {
		u_int16_t mask, flags;

		error = copyin(pdata,&mask, sizeof(mask));
		if (error) return error;
		pdata += sizeof(mask);
		error = copyin(pdata,&flags,sizeof(flags));
		if (error) return error;
		error = ncp_conn_lock(conn,p,cred,NCPM_WRITE);
		if (error) return error;
		if (mask & NCPFL_PERMANENT) {
			conn->flags &= ~NCPFL_PERMANENT;
			conn->flags |= (flags & NCPFL_PERMANENT);
		}
		if (mask & NCPFL_PRIMARY) {
			error = ncp_conn_setprimary(conn, flags & NCPFL_PRIMARY);
			if (error) {
				ncp_conn_unlock(conn,p);
				break;
			}
		}
		ncp_conn_unlock(conn,p);
		break;
	    }
	    case NCP_CONN_LOGIN: {
		struct ncp_conn_login la;

		if (rqsize != sizeof(la)) return (EBADRPC);	
		if ((error = copyin(pdata,&la,rqsize)) != 0) break;
		error = ncp_conn_lock(conn, p, cred, NCPM_EXECUTE | NCPM_WRITE);
		if (error) return error;
		error = ncp_login(conn, la.username, la.objtype, la.password, p, p->p_ucred);
		ncp_conn_unlock(conn, p);
		p->p_retval[0] = error;
		break;
	    }
	    case NCP_CONN_GETINFO: {
		struct ncp_conn_stat ncs;
		int len = sizeof(ncs);

		error = ncp_conn_lock(conn, p, p->p_ucred, NCPM_READ);
		if (error) return error;
		ncp_conn_getinfo(conn, &ncs);
		copyout(&len, &uap->ncpbuf->rpsize, sizeof(int));
		error = copyout(&ncs, &uap->ncpbuf->packet, len);
		ncp_conn_unlock(conn, p);
		break;
	    }
	    case NCP_CONN_GETUSER: {
		int len;

		error = ncp_conn_lock(conn, p, p->p_ucred, NCPM_READ);
		if (error) return error;
		len = (conn->li.user) ? strlen(conn->li.user) + 1 : 0;
		copyout(&len, &uap->ncpbuf->rpsize, sizeof(int));
		if (len) {
			error = copyout(conn->li.user, &uap->ncpbuf->packet, len);
		}
		ncp_conn_unlock(conn, p);
		break;
	    }
	    case NCP_CONN_CONN2REF: {
		int len = sizeof(int);

		error = ncp_conn_lock(conn, p, p->p_ucred, NCPM_READ);
		if (error) return error;
		copyout(&len, &uap->ncpbuf->rpsize, sizeof(int));
		if (len) {
			error = copyout(&conn->nc_id, &uap->ncpbuf->packet, len);
		}
		ncp_conn_unlock(conn, p);
		break;
	    }
	    case NCP_CONN_FRAG: {
		struct ncp_conn_frag nf;

		if (rqsize != sizeof(nf)) return (EBADRPC);	
		if ((error = copyin(pdata, &nf, rqsize)) != 0) break;
		error = ncp_conn_lock(conn, p, cred, NCPM_EXECUTE);
		if (error) return error;
		error = ncp_conn_frag_rq(conn, p, &nf);
		ncp_conn_unlock(conn, p);
		copyout(&nf, &pdata, sizeof(nf));
		p->p_retval[0] = error;
		break;
	    }
	    case NCP_CONN_DUP: {
		struct ncp_handle *newhp;
		int len = sizeof(NWCONN_HANDLE);

		error = ncp_conn_lock(conn, p, cred, NCPM_READ);
		if (error) break;
		copyout(&len, &uap->ncpbuf->rpsize, len);
		error = ncp_conn_gethandle(conn, p, &newhp);
		if (!error)
			error = copyout(&newhp->nh_id, uap->ncpbuf->packet, len);
		ncp_conn_unlock(conn,p);
		break;
	    }
	    case NCP_CONN_CONNCLOSE: {
		error = ncp_conn_lock(conn, p, cred, NCPM_EXECUTE);
		if (error) break;
		ncp_conn_puthandle(hp, p, 0);
		error = ncp_disconnect(conn);
		if (error)
			ncp_conn_unlock(conn, p);
		break;
	    }
	    default:
		    error = EOPNOTSUPP;
	}
	return error;
}
Beispiel #27
0
static int
freebsd32_ioctl_md(struct thread *td, struct freebsd32_ioctl_args *uap,
    struct file *fp)
{
	struct md_ioctl mdv;
	struct md_ioctl32 md32;
	u_long com = 0;
	int i, error;

	if (uap->com & IOC_IN) {
		if ((error = copyin(uap->data, &md32, sizeof(md32)))) {
			return (error);
		}
		CP(md32, mdv, md_version);
		CP(md32, mdv, md_unit);
		CP(md32, mdv, md_type);
		PTRIN_CP(md32, mdv, md_file);
		CP(md32, mdv, md_mediasize);
		CP(md32, mdv, md_sectorsize);
		CP(md32, mdv, md_options);
		CP(md32, mdv, md_base);
		CP(md32, mdv, md_fwheads);
		CP(md32, mdv, md_fwsectors);
	} else if (uap->com & IOC_OUT) {
		/*
		 * Zero the buffer so the user always
		 * gets back something deterministic.
		 */
		bzero(&mdv, sizeof mdv);
	}

	switch (uap->com) {
	case MDIOCATTACH_32:
		com = MDIOCATTACH;
		break;
	case MDIOCDETACH_32:
		com = MDIOCDETACH;
		break;
	case MDIOCQUERY_32:
		com = MDIOCQUERY;
		break;
	case MDIOCLIST_32:
		com = MDIOCLIST;
		break;
	default:
		panic("%s: unknown MDIOC %#x", __func__, uap->com);
	}
	error = fo_ioctl(fp, com, (caddr_t)&mdv, td->td_ucred, td);
	if (error == 0 && (com & IOC_OUT)) {
		CP(mdv, md32, md_version);
		CP(mdv, md32, md_unit);
		CP(mdv, md32, md_type);
		PTROUT_CP(mdv, md32, md_file);
		CP(mdv, md32, md_mediasize);
		CP(mdv, md32, md_sectorsize);
		CP(mdv, md32, md_options);
		CP(mdv, md32, md_base);
		CP(mdv, md32, md_fwheads);
		CP(mdv, md32, md_fwsectors);
		if (com == MDIOCLIST) {
			/*
			 * Use MDNPAD, and not MDNPAD32.  Padding is
			 * allocated and used by compat32 ABI.
			 */
			for (i = 0; i < MDNPAD; i++)
				CP(mdv, md32, md_pad[i]);
		}
		error = copyout(&md32, uap->data, sizeof(md32));
	}
	return error;
}
/**
 * Worker for VbgdDarwinIOCtl that takes the slow IOCtl functions.
 *
 * @returns Darwin errno.
 *
 * @param pSession  The session.
 * @param iCmd      The IOCtl command.
 * @param pData     Pointer to the kernel copy of the data buffer.
 * @param pProcess  The calling process.
 */
static int VbgdDarwinIOCtlSlow(PVBOXGUESTSESSION pSession, u_long iCmd, caddr_t pData, struct proc *pProcess)
{
    LogFlow(("VbgdDarwinIOCtlSlow: pSession=%p iCmd=%p pData=%p pProcess=%p\n", pSession, iCmd, pData, pProcess));


    /*
     * Buffered or unbuffered?
     */
    void *pvReqData;
    user_addr_t pUser = 0;
    void *pvPageBuf = NULL;
    uint32_t cbReq = IOCPARM_LEN(iCmd);
    if ((IOC_DIRMASK & iCmd) == IOC_INOUT)
    {
        /*
         * Raw buffered request data, common code validates it.
         */
        pvReqData = pData;
    }
    else if ((IOC_DIRMASK & iCmd) == IOC_VOID && !cbReq)
    {
        /*
         * Get the header and figure out how much we're gonna have to read.
         */
        VBGLBIGREQ Hdr;
        pUser = (user_addr_t)*(void **)pData;
        int rc = copyin(pUser, &Hdr, sizeof(Hdr));
        if (RT_UNLIKELY(rc))
        {
            Log(("VbgdDarwinIOCtlSlow: copyin(%llx,Hdr,) -> %#x; iCmd=%#lx\n", (unsigned long long)pUser, rc, iCmd));
            return rc;
        }
        if (RT_UNLIKELY(Hdr.u32Magic != VBGLBIGREQ_MAGIC))
        {
            Log(("VbgdDarwinIOCtlSlow: bad magic u32Magic=%#x; iCmd=%#lx\n", Hdr.u32Magic, iCmd));
            return EINVAL;
        }
        cbReq = Hdr.cbData;
        if (RT_UNLIKELY(cbReq > _1M*16))
        {
            Log(("VbgdDarwinIOCtlSlow: %#x; iCmd=%#lx\n", Hdr.cbData, iCmd));
            return EINVAL;
        }
        pUser = Hdr.pvDataR3;

        /*
         * Allocate buffer and copy in the data.
         */
        pvReqData = RTMemTmpAlloc(cbReq);
        if (!pvReqData)
            pvPageBuf = pvReqData = IOMallocAligned(RT_ALIGN_Z(cbReq, PAGE_SIZE), 8);
        if (RT_UNLIKELY(!pvReqData))
        {
            Log(("VbgdDarwinIOCtlSlow: failed to allocate buffer of %d bytes; iCmd=%#lx\n", cbReq, iCmd));
            return ENOMEM;
        }
        rc = copyin(pUser, pvReqData, Hdr.cbData);
        if (RT_UNLIKELY(rc))
        {
            Log(("VbgdDarwinIOCtlSlow: copyin(%llx,%p,%#x) -> %#x; iCmd=%#lx\n",
                 (unsigned long long)pUser, pvReqData, Hdr.cbData, rc, iCmd));
            if (pvPageBuf)
                IOFreeAligned(pvPageBuf, RT_ALIGN_Z(cbReq, PAGE_SIZE));
            else
                RTMemTmpFree(pvReqData);
            return rc;
        }
    }
    else
    {
        Log(("VbgdDarwinIOCtlSlow: huh? cbReq=%#x iCmd=%#lx\n", cbReq, iCmd));
        return EINVAL;
    }

    /*
     * Process the IOCtl.
     */
    size_t cbReqRet = 0;
    int rc = VbgdCommonIoCtl(iCmd, &g_DevExt, pSession, pvReqData, cbReq, &cbReqRet);
    if (RT_SUCCESS(rc))
    {
        /*
         * If not buffered, copy back the buffer before returning.
         */
        if (pUser)
        {
            if (cbReqRet > cbReq)
            {
                Log(("VbgdDarwinIOCtlSlow: too much output! %#x > %#x; uCmd=%#lx!\n", cbReqRet, cbReq, iCmd));
                cbReqRet = cbReq;
            }
            rc = copyout(pvReqData, pUser, cbReqRet);
            if (RT_UNLIKELY(rc))
                Log(("VbgdDarwinIOCtlSlow: copyout(%p,%llx,%#x) -> %d; uCmd=%#lx!\n",
                     pvReqData, (unsigned long long)pUser, cbReqRet, rc, iCmd));

            /* cleanup */
            if (pvPageBuf)
                IOFreeAligned(pvPageBuf, RT_ALIGN_Z(cbReq, PAGE_SIZE));
            else
                RTMemTmpFree(pvReqData);
        }
        else
            rc = 0;
    }
    else
    {
        /*
         * The request failed, just clean up.
         */
        if (pUser)
        {
            if (pvPageBuf)
                IOFreeAligned(pvPageBuf, RT_ALIGN_Z(cbReq, PAGE_SIZE));
            else
                RTMemTmpFree(pvReqData);
        }

        Log(("VbgdDarwinIOCtlSlow: pid=%d iCmd=%lx pData=%p failed, rc=%d\n", proc_pid(pProcess), iCmd, (void *)pData, rc));
        rc = EINVAL;
    }

    Log2(("VbgdDarwinIOCtlSlow: returns %d\n", rc));
    return rc;
}
/*
 * sysi86
 */
int
svr4_sys_sysarch(struct proc *p, void *v, register_t *retval)
{
	struct svr4_sys_sysarch_args *uap = v;
	int error;
#ifdef USER_LDT
	caddr_t sg = stackgap_init(p->p_emul);
#endif
	*retval = 0;	/* XXX: What to do */

	switch (SCARG(uap, op)) {
	case SVR4_SYSARCH_FPHW:
		return 0;

	case SVR4_SYSARCH_DSCR:
#ifdef USER_LDT
		if (user_ldt_enable == 0)
			return (ENOSYS);
		else {
			struct i386_set_ldt_args sa, *sap;
			struct sys_sysarch_args ua;

			struct svr4_ssd ssd;
			union descriptor bsd;

			if ((error = copyin(SCARG(uap, a1), &ssd,
					    sizeof(ssd))) != 0) {
				printf("Cannot copy arg1\n");
				return error;
			}

			printf("s=%x, b=%x, l=%x, a1=%x a2=%x\n",
			       ssd.selector, ssd.base, ssd.limit,
			       ssd.access1, ssd.access2);

			/* We can only set ldt's for now. */
			if (!ISLDT(ssd.selector)) {
				printf("Not an ldt\n");
				return EPERM;
			}

			/* Oh, well we don't cleanup either */
			if (ssd.access1 == 0)
				return 0;

			bsd.sd.sd_lobase = ssd.base & 0xffffff;
			bsd.sd.sd_hibase = (ssd.base >> 24) & 0xff;

			bsd.sd.sd_lolimit = ssd.limit & 0xffff;
			bsd.sd.sd_hilimit = (ssd.limit >> 16) & 0xf;

			bsd.sd.sd_type = ssd.access1 & 0x1f;
			bsd.sd.sd_dpl =  (ssd.access1 >> 5) & 0x3;
			bsd.sd.sd_p = (ssd.access1 >> 7) & 0x1;

			bsd.sd.sd_xx = ssd.access2 & 0x3;
			bsd.sd.sd_def32 = (ssd.access2 >> 2) & 0x1;
			bsd.sd.sd_gran = (ssd.access2 >> 3)& 0x1;

			sa.start = IDXSEL(ssd.selector);
			sa.desc = stackgap_alloc(&sg, sizeof(union descriptor));
			sa.num = 1;
			sap = stackgap_alloc(&sg,
					     sizeof(struct i386_set_ldt_args));

			if ((error = copyout(&sa, sap, sizeof(sa))) != 0) {
				printf("Cannot copyout args\n");
				return error;
			}

			SCARG(&ua, op) = I386_SET_LDT;
			SCARG(&ua, parms) = (char *) sap;

			if ((error = copyout(&bsd, sa.desc, sizeof(bsd))) != 0) {
				printf("Cannot copyout desc\n");
				return error;
			}

			return sys_sysarch(p, &ua, retval);
		}
#endif
	case SVR4_SYSARCH_GOSF:
		{
				/* just as SCO Openserver 5.0 says */
			char features[] = {1,1,1,1,1,1,1,1,2,1,1,1};

			if ((error = copyout(features, SCARG(uap, a1),
					     sizeof(features))) != 0) {
				printf("Cannot copyout vector\n");
				return error;
			}

			return 0;
		}

	default:
		printf("svr4_sysarch(%d), a1 %p\n", SCARG(uap, op),
		       SCARG(uap, a1));
		return 0;
	}
}
static int
x86_64_set_mtrr32(struct lwp *l, void *args, register_t *retval)
{
	struct x86_64_set_mtrr_args32 args32;
	struct mtrr32 *m32p, m32;
	struct mtrr *m64p, *mp;
	int error, i;
	int32_t n;
	size_t size;

	m64p = NULL;

	if (mtrr_funcs == NULL)
		return ENOSYS;

	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_MTRR_SET,
	    NULL, NULL, NULL, NULL);
	if (error)
		return (error);

	error = copyin(args, &args32, sizeof args32);
	if (error != 0)
		return error;

	error = copyin((void *)(uintptr_t)args32.n, &n, sizeof n);
	if (error != 0)
		return error;

	if (n <= 0 || n > (MTRR_I686_NFIXED_SOFT + MTRR_I686_NVAR_MAX)) {
		error = EINVAL;
		goto fail;
	}

	size = n * sizeof(struct mtrr);
	m64p = kmem_zalloc(size, KM_SLEEP);
	if (m64p == NULL) {
		error = ENOMEM;
		goto fail;
	}
	m32p = (struct mtrr32 *)(uintptr_t)args32.mtrrp;
	mp = m64p;
	for (i = 0; i < n; i++) {
		error = copyin(m32p, &m32, sizeof m32);
		if (error != 0)
			goto fail;
		mp->base = m32.base;
		mp->len = m32.len;
		mp->type = m32.type;
		mp->flags = m32.flags;
		mp->owner = m32.owner;
		m32p++;
		mp++;
	}

	error = mtrr_set(m64p, &n, l->l_proc, 0);
fail:
	if (m64p != NULL)
		kmem_free(m64p, size);
	if (error != 0)
		n = 0;
	copyout(&n, (void *)(uintptr_t)args32.n, sizeof n);
	return error;
}