Ejemplo n.º 1
0
static int
sync_icache(uintptr_t addr, size_t len)
{
	size_t size;
	vm_offset_t rv;

	/*
	 * Align starting address to even number because value of "1"
	 * is used as return value for success.
	 */
	len += addr & 1;
	addr &= ~1;

	/* Break whole range to pages. */
	do {
		size = PAGE_SIZE - (addr & PAGE_MASK);
		size = min(size, len);
		rv = dcache_wb_pou_checked(addr, size);
		if (rv == 1) /* see dcache_wb_pou_checked() */
			rv = icache_inv_pou_checked(addr, size);
		if (rv != 1) {
			if (!useracc((void *)addr, size, VM_PROT_READ)) {
				/* Invalid access */
				return (rv);
			}
			/* Valid but unmapped page - skip it. */
		}
		len -= size;
		addr += size;
	} while (len > 0);

	/* Invalidate branch predictor buffer. */
	bpb_inv_all();
	return (1);
}
Ejemplo n.º 2
0
static int sys_ioctl(__user void * user_args)
{
    struct _ioctl_get_args args;
    file_t * file;
    void * ioargs = NULL;
    int err, retval = -1;

    err = copyin(user_args, &args, sizeof(args));
    if (err) {
        set_errno(EFAULT);
        return -1;
    }

    file = fs_fildes_ref(curproc->files, args.fd, 1);
    if (!file) {
        set_errno(EBADF);
        return -1;
    }

    if (args.arg) {
        ioargs = kmalloc(args.arg_len);
        if (!ioargs) {
            set_errno(ENOMEM);
            goto out;
        }

        if (args.request & 1) { /* Get request */
            if (!useracc((__user void *)args.arg, args.arg_len,
                         VM_PROT_WRITE)) {
                set_errno(EFAULT);
                goto out;
            }
            /* Get request doesn't need copyin */
        } else { /* Set request */
            /* Set operation needs copyin */
            err = copyin((__user void *)args.arg, ioargs, args.arg_len);
            if (err) {
                set_errno(EFAULT);
                goto out;
            }
        }
    }

    /* Actual ioctl call */
    retval = file->vnode->vnode_ops->ioctl(file, args.request,
                                           ioargs, args.arg_len);
    if (retval < 0)
        set_errno(-retval);

    /* Copyout if request type was get. */
    if (args.request & 1)
        copyout(ioargs, (__user void *)args.arg, args.arg_len);

out:
    kfree(ioargs);
    fs_fildes_ref(curproc->files, args.fd, -1);
    return retval;
}
Ejemplo n.º 3
0
static int sys_lseek(void * user_args)
{
    struct _fs_lseek_args args;
    file_t * file;
    int retval = 0;

    if (!useracc(user_args, sizeof(args), VM_PROT_WRITE)) {
        /* No permission to read/write */
        set_errno(EFAULT);
        return -1;
    }
    copyin(user_args, &args, sizeof(args));

    /* Increment refcount for the file pointed by fd */
    file = fs_fildes_ref(curproc->files, args.fd, 1);
    if (!file) {
        set_errno(EBADF);
        return -1;
    }

    if (args.whence == SEEK_SET)
        file->seek_pos = args.offset;
    else if (args.whence == SEEK_CUR)
        file->seek_pos += args.offset;
    else if (args.whence == SEEK_END) {
        struct stat stat_buf;
        int err;

        err = file->vnode->vnode_ops->stat(file->vnode, &stat_buf);
        if (!err) {
            const off_t new_offset = stat_buf.st_size + args.offset;

            if (new_offset >= stat_buf.st_size)
                file->seek_pos = new_offset;
            else {
                set_errno(EOVERFLOW);
                retval = -1;
            }
        } else {
            set_errno(EBADF);
            retval = -1;
        }
    } else {
        set_errno(EINVAL);
        retval = -1;
    }

    /* Resulting offset is stored to args */
    args.offset = file->seek_pos;

    /* Decrement refcount for the file pointed by fd */
    fs_fildes_ref(curproc->files, args.fd, -1);

    copyout(&args, user_args, sizeof(args));
    return retval;
}
Ejemplo n.º 4
0
/*
 * Implement the software portion of an FBIOPUTCMAP-like ioctl.
 */
int
bt_putcmap(struct fbcmap *p, union bt_cmap *cm, int cmsize)
{
    u_int i;
    int start;
    int count;
    u_char *cp;

    start = p->index;
    count = p->count;
    if (start >= cmsize || start + count > cmsize)
	return (EINVAL);
    if (!useracc(p->red, count, B_READ) ||
	!useracc(p->green, count, B_READ) ||
	!useracc(p->blue, count, B_READ))
	return (EFAULT);
    for (cp = &cm->cm_map[start][0], i = 0; i < count; cp += 3, i++) {
	cp[0] = p->red[i];
	cp[1] = p->green[i];
	cp[2] = p->blue[i];
    }
    return (0);
}
Ejemplo n.º 5
0
static int sys_umask(void * user_args)
{
    struct _fs_umask_args args;

    if (!useracc(user_args, sizeof(args), VM_PROT_WRITE)) {
        set_errno(EFAULT);
        return -1;
    }

    copyin(user_args, &args, sizeof(args));
    args.oldumask = curproc->files->umask;
    curproc->files->umask = args.newumask;
    copyout(&args, user_args, sizeof(args));

    return 0;
}
Ejemplo n.º 6
0
static intptr_t sys_pipe(__user void * user_args)
{
    struct _ipc_pipe_args args;
    int err;

    if (!useracc(user_args, sizeof(args), VM_PROT_WRITE)) {
        set_errno(EFAULT);
        return -1;
    }
    copyin(user_args, &args, sizeof(args));

    err = fs_pipe_curproc_creat(curproc->files, args.fildes, args.len);
    if (err) {
        set_errno(-err);
        return -1;
    }

    copyout(&args, user_args, sizeof(args));

    return 0;
}
Ejemplo n.º 7
0
static int sys_read(void * user_args)
{
    struct _fs_readwrite_args args;
    char * buf = 0;
    int err, retval;

    err = copyin(user_args, &args, sizeof(args));
    if (err) {
        set_errno(EFAULT);
        retval = -1;
        goto out;
    }

    /* Buffer */
    if (!useracc(args.buf, args.nbytes, VM_PROT_WRITE)) {
        /* No permission to read/write */
        set_errno(EFAULT);
        retval = -1;
        goto out;
    }
    buf = kmalloc(args.nbytes);
    if (!buf) {
        set_errno(ENOMEM);
        retval = -1;
        goto out;
    }

    retval = fs_readwrite_cproc(args.fildes, buf, args.nbytes, O_RDONLY);
    if (retval < 0) {
        set_errno(-retval);
        retval = -1;
    }

    copyout(buf, args.buf, args.nbytes);
out:
    kfree(buf);
    return retval;
}
Ejemplo n.º 8
0
int
physio(dev_t dev, struct uio *uio, int ioflag)
{
	int i;
	int error;
	int spl;
	caddr_t sa;
	off_t blockno;
	u_int iolen;
	struct buf *bp;

	/* Keep the process UPAGES from being swapped. XXX: why ? */
	PHOLD(curproc);

	bp = getpbuf(NULL);
	sa = bp->b_data;
	error = bp->b_error = 0;

	/* XXX: sanity check */
	if(dev->si_iosize_max < PAGE_SIZE) {
		printf("WARNING: %s si_iosize_max=%d, using DFLTPHYS.\n",
		    devtoname(dev), dev->si_iosize_max);
		dev->si_iosize_max = DFLTPHYS;
	}

	for (i = 0; i < uio->uio_iovcnt; i++) {
		while (uio->uio_iov[i].iov_len) {
			if (uio->uio_rw == UIO_READ)
				bp->b_flags = B_PHYS | B_CALL | B_READ;
			else 
				bp->b_flags = B_PHYS | B_CALL | B_WRITE;
			bp->b_dev = dev;
			bp->b_iodone = physwakeup;
			bp->b_data = uio->uio_iov[i].iov_base;
			bp->b_bcount = uio->uio_iov[i].iov_len;
			bp->b_offset = uio->uio_offset;
			bp->b_saveaddr = sa;

			/* Don't exceed drivers iosize limit */
			if (bp->b_bcount > dev->si_iosize_max)
				bp->b_bcount = dev->si_iosize_max;

			/* 
			 * Make sure the pbuf can map the request
			 * XXX: The pbuf has kvasize = MAXPHYS so a request
			 * XXX: larger than MAXPHYS - PAGE_SIZE must be
			 * XXX: page aligned or it will be fragmented.
			 */
			iolen = ((vm_offset_t) bp->b_data) & PAGE_MASK;
			if ((bp->b_bcount + iolen) > bp->b_kvasize) {
				bp->b_bcount = bp->b_kvasize;
				if (iolen != 0)
					bp->b_bcount -= PAGE_SIZE;
			}
			bp->b_bufsize = bp->b_bcount;

			blockno = bp->b_offset >> DEV_BSHIFT;
			if ((daddr_t)blockno != blockno) {
				error = EINVAL; /* blockno overflow */
				goto doerror;
			}
			bp->b_blkno = blockno;

			if (uio->uio_segflg == UIO_USERSPACE) {
				if (!useracc(bp->b_data, bp->b_bufsize,
				    bp->b_flags & B_READ ?
				    VM_PROT_WRITE : VM_PROT_READ)) {
					error = EFAULT;
					goto doerror;
				}
				vmapbuf(bp);
			}

			BUF_STRATEGY(bp, 0);
			spl = splbio();
			while ((bp->b_flags & B_DONE) == 0)
				tsleep((caddr_t)bp, PRIBIO, "physstr", 0);
			splx(spl);

			if (uio->uio_segflg == UIO_USERSPACE)
				vunmapbuf(bp);
			iolen = bp->b_bcount - bp->b_resid;
			if (iolen == 0 && !(bp->b_flags & B_ERROR))
				goto doerror;	/* EOF */
			uio->uio_iov[i].iov_len -= iolen;
			uio->uio_iov[i].iov_base += iolen;
			uio->uio_resid -= iolen;
			uio->uio_offset += iolen;
			if( bp->b_flags & B_ERROR) {
				error = bp->b_error;
				goto doerror;
			}
		}
	}
doerror:
	relpbuf(bp, NULL);
	PRELE(curproc);
	return (error);
}
Ejemplo n.º 9
0
int
physio( void (*f_strategy)(buf_t), 
	buf_t bp,
	dev_t dev,
	int flags,
	u_int (*f_minphys)(buf_t),
	struct uio *uio,
	int blocksize)
{
	struct proc *p = current_proc();
	int error, i, buf_allocated, todo, iosize;
	int orig_bflags = 0;
	int64_t done;

	error = 0;
	flags &= B_READ | B_WRITE;
	buf_allocated = 0;

	/*
	 * [check user read/write access to the data buffer]
	 *
	 * Check each iov one by one.  Note that we know if we're reading or
	 * writing, so we ignore the uio's rw parameter.  Also note that if
	 * we're doing a read, that's a *write* to user-space.
	 */
	for (i = 0; i < uio->uio_iovcnt; i++) {
		if (UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) {
			user_addr_t base;
			user_size_t len;
			
			if (uio_getiov(uio, i, &base, &len) ||
				!useracc(base,
					len,
		    		(flags == B_READ) ? B_WRITE : B_READ))
			return (EFAULT);
		}
	}
	/*
	 * Make sure we have a buffer, creating one if necessary.
	 */
	if (bp == NULL) {
		bp = buf_alloc((vnode_t)0);
		buf_allocated = 1;
	} else
	        orig_bflags = buf_flags(bp);
	/*
	 * at this point we should have a buffer
	 * that is marked BL_BUSY... we either 
	 * acquired it via buf_alloc, or it was
	 * passed into us... if it was passed
	 * in, it needs to already be owned by
	 * the caller (i.e. BL_BUSY is set)
	 */
	assert(bp->b_lflags & BL_BUSY);

	/*
	 * [set up the fixed part of the buffer for a transfer]
	 */
	bp->b_dev = dev;
	bp->b_proc = p;

	/*
	 * [mark the buffer busy for physical I/O]
	 * (i.e. set B_PHYS (because it's an I/O to user
	 * memory, and B_RAW, because B_RAW is to be
	 * "Set by physio for raw transfers.", in addition
	 * to the read/write flag.)
	 */
        buf_setflags(bp, B_PHYS | B_RAW);

	/*
	 * [while there is data to transfer and no I/O error]
	 * Note that I/O errors are handled with a 'goto' at the bottom
	 * of the 'while' loop.
	 */
	while (uio_resid(uio) > 0) {
			
			if ( (iosize = uio_curriovlen(uio)) > MAXPHYSIO_WIRED)
			        iosize = MAXPHYSIO_WIRED;
			/*
			 * make sure we're set to issue a fresh I/O
			 * in the right direction
			 */
			buf_reset(bp, flags);

			/* [set up the buffer for a maximum-sized transfer] */
 			buf_setblkno(bp, uio_offset(uio) / blocksize);
			buf_setcount(bp, iosize);
			buf_setdataptr(bp, (uintptr_t)CAST_DOWN(caddr_t, uio_curriovbase(uio)));
			
			/*
			 * [call f_minphys to bound the tranfer size]
			 * and remember the amount of data to transfer,
			 * for later comparison.
			 */
			(*f_minphys)(bp);
			todo = buf_count(bp);

			/*
			 * [lock the part of the user address space involved
			 *    in the transfer]
			 */

			if(UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) {
				error = vslock(CAST_USER_ADDR_T(buf_dataptr(bp)),
					       (user_size_t)todo);
				if (error)
					goto done;
			}
			
			/* [call f_strategy to start the transfer] */
			(*f_strategy)(bp);


			/* [wait for the transfer to complete] */
			error = (int)buf_biowait(bp);

			/*
			 * [unlock the part of the address space previously
			 *    locked]
			 */
			if(UIO_SEG_IS_USER_SPACE(uio->uio_segflg))
				vsunlock(CAST_USER_ADDR_T(buf_dataptr(bp)),
					 (user_size_t)todo,
					 (flags & B_READ));

			/*
			 * [deduct the transfer size from the total number
			 *    of data to transfer]
			 */
			done = buf_count(bp) - buf_resid(bp);
			uio_update(uio, done);

			/*
			 * Now, check for an error.
			 * Also, handle weird end-of-disk semantics.
			 */
			if (error || done < todo)
				goto done;
	}

done:
	if (buf_allocated)
	        buf_free(bp);
	else
		buf_setflags(bp, orig_bflags);

	return (error);
}
Ejemplo n.º 10
0
static int sys_filestat(void * user_args)
{
    struct _fs_stat_args * args = 0;
    vnode_t * vnode;
    struct stat stat_buf;
    int err, retval = -1;

    err = copyinstruct(user_args, (void **)(&args),
            sizeof(struct _fs_stat_args),
            GET_STRUCT_OFFSETS(struct _fs_stat_args,
                path, path_len));
    if (err) {
        set_errno(-err);
        goto out;
    }

    if (!useracc(args->buf, sizeof(struct stat), VM_PROT_WRITE)) {
        set_errno(EFAULT);
        goto out;
    }

    /* Validate path string */
    if (!strvalid(args->path, args->path_len)) {
        set_errno(ENAMETOOLONG);
        goto out;
    }

    if (args->flags & AT_FDARG) { /* by fildes */
        file_t * fildes;
        /* Note: AT_SYMLINK_NOFOLLOW == O_NOFOLLOW */
        const int ofalgs = (args->flags & AT_SYMLINK_NOFOLLOW);

        fildes = fs_fildes_ref(curproc->files, args->fd, 1);
        if (!fildes) {
            set_errno(EBADF);
            goto out;
        }

        err = fildes->vnode->vnode_ops->stat(fildes->vnode, &stat_buf);
        if (!err) {
            /* Check if fildes was opened with O_SEARCH or if not then if we
             * have a permission to search by file permissions. */
            if (fildes->oflags & O_SEARCH || chkperm_cproc(&stat_buf, O_EXEC))
                err = lookup_vnode(&vnode, fildes->vnode, args->path, ofalgs);
            else /* No permission to search */
                err = -EACCES;
        }

        fs_fildes_ref(curproc->files, args->fd, -1);
        if (err) { /* Handle previous error */
            set_errno(-err);
            goto out;
        }
    } else { /* search by path */
        if (fs_namei_proc(&vnode, -1, (char *)args->path, AT_FDCWD)) {
            set_errno(ENOENT);
            goto out;
        }
    }

    if ((args->flags & AT_FDARG) &&
        (args->flags & O_EXEC)) { /* Get stat of given fildes, which we have
                                   * have in stat_buf. */
        goto ready;
    }

    err = vnode->vnode_ops->stat(vnode, &stat_buf);
    if (err) {
        set_errno(-err);
        goto out;
    }

ready:
    copyout(&stat_buf, args->buf, sizeof(struct stat));
    retval = 0;
out:
    freecpystruct(args);
    return retval;
}
Ejemplo n.º 11
0
static int sys_getdents(void * user_args)
{
    struct _ds_getdents_args args;
    struct dirent * dents;
    size_t bytes_left;
    file_t * fildes;
    struct dirent d; /* Temp storage */
    int err, count = 0;

    err = copyin(user_args, &args, sizeof(args));
    if (err) {
        set_errno(EFAULT);
        return -1;
    }

    /* We must have a write access to the given buffer. */
    if (!useracc(args.buf, args.nbytes, VM_PROT_WRITE)) {
        set_errno(EFAULT);
        return -1;
    }

    fildes = fs_fildes_ref(curproc->files, args.fd, 1);
    if (!fildes) {
        set_errno(EBADF);
        return -1;
    }

    if (!S_ISDIR(fildes->vnode->vn_mode)) {
        count = -1;
        set_errno(ENOTDIR);
        goto out;
    }

    dents = kmalloc(args.nbytes);
    if (!dents) {
        count = -1;
        set_errno(ENOMEM);
        goto out;
    }

    /*
     * This is a bit tricky, if we are here for the first time there should be a
     * magic value 0x00000000FFFFFFFF set to seek_pos but we can't do a thing if
     * fildes was initialized incorrectly, so lets cross our fingers.
     */
    d.d_off = fildes->seek_pos;
    bytes_left = args.nbytes;
    while (bytes_left >= sizeof(struct dirent)) {
        vnode_t * vnode = fildes->vnode;
        if (vnode->vnode_ops->readdir(vnode, &d))
            break;
        dents[count++] = d;

        bytes_left -= (sizeof(struct dirent));
    }
    fildes->seek_pos = d.d_off;

    copyout(dents, args.buf, count * sizeof(struct dirent));
    kfree(dents);

out:
    fs_fildes_ref(curproc->files, args.fd, -1);
    return count;
}
Ejemplo n.º 12
-1
/*
 * Code that the child process
 * executes to implement the command
 * of the parent process in tracing.
 */
procxmt()
{
	register int i;
	register *p;
	register struct text *xp;

	if (ipc.ip_lock != u.u_procp->p_pid)
		return (0);
	u.u_procp->p_slptime = 0;
	i = ipc.ip_req;
	ipc.ip_req = 0;
	switch (i) {

	case PT_READ_I:			/* read the child's text space */
		if (!useracc((caddr_t)ipc.ip_addr, 4, B_READ))
			goto error;
		ipc.ip_data = fuiword((caddr_t)ipc.ip_addr);
		break;

	case PT_READ_D:			/* read the child's data space */
		if (!useracc((caddr_t)ipc.ip_addr, 4, B_READ))
			goto error;
		ipc.ip_data = fuword((caddr_t)ipc.ip_addr);
		break;

	case PT_READ_U:			/* read the child's u. */
		i = (int)ipc.ip_addr;
		if (i<0 || i >= ctob(UPAGES))
			goto error;
		ipc.ip_data = *(int *)PHYSOFF(&u, i);
		break;

	case PT_WRITE_I:		/* write the child's text space */
		/*
		 * If text, must assure exclusive use
		 */
		if (xp = u.u_procp->p_textp) {
			if (xp->x_count!=1 || xp->x_iptr->i_mode&ISVTX)
				goto error;
			xp->x_flag |= XTRC;
		}
		i = -1;
		if ((i = suiword((caddr_t)ipc.ip_addr, ipc.ip_data)) < 0) {
			if (chgprot((caddr_t)ipc.ip_addr, RW) &&
			    chgprot((caddr_t)ipc.ip_addr+(sizeof(int)-1), RW))
				i = suiword((caddr_t)ipc.ip_addr, ipc.ip_data);
			(void) chgprot((caddr_t)ipc.ip_addr, RO);
			(void) chgprot((caddr_t)ipc.ip_addr+(sizeof(int)-1), RO);
		}
		if (i < 0)
			goto error;
		if (xp)
			xp->x_flag |= XWRIT;
		break;

	case PT_WRITE_D:		/* write the child's data space */
		if (suword((caddr_t)ipc.ip_addr, 0) < 0)
			goto error;
		(void) suword((caddr_t)ipc.ip_addr, ipc.ip_data);
		break;

	case PT_WRITE_U:		/* write the child's u. */
		i = (int)ipc.ip_addr;
		p = (int *)PHYSOFF(&u, i);
		for (i=0; i<NIPCREG; i++)
			if (p == &u.u_ar0[ipcreg[i]])
				goto ok;
		if (p == &u.u_ar0[PS]) {
			ipc.ip_data |= PSL_USERSET;
			ipc.ip_data &=  ~PSL_USERCLR;
			goto ok;
		}
		goto error;

	ok:
		*p = ipc.ip_data;
		break;

	case PT_STEP:			/* single step the child */
	case PT_CONTINUE:		/* continue the child */
		if ((int)ipc.ip_addr != 1)
			u.u_ar0[PC] = (int)ipc.ip_addr;
		if ((unsigned)ipc.ip_data > NSIG)
			goto error;
		u.u_procp->p_cursig = ipc.ip_data;	/* see issig */
		if (i == PT_STEP) 
			u.u_ar0[PS] |= PSL_T;
		wakeup((caddr_t)&ipc);
		return (1);

	case PT_KILL:			/* kill the child process */
		wakeup((caddr_t)&ipc);
		exit(u.u_procp->p_cursig);

	default:
	error:
		ipc.ip_req = -1;
	}
	wakeup((caddr_t)&ipc);
	return (0);
}