Example #1
0
int sys_read(int fd, void *buf, size_t buflen) {
    struct fd* tmp;
    if (!buf || buf == NULL || !valid_address_check(curproc->p_addrspace, (vaddr_t)buf)){      // else if invalid buffer
        errno = EFAULT;
        return -1;
    }
    if (fd < 0 || fd >= MAX_fd_table){       // if fd < 0 || fd > MAX_fd_table or 
        errno = EBADF;
        return -1;
    }
	else if (fd == STDOUT_FILENO || fd == STDERR_FILENO){      // fd == STDOUT_FILENO || STDERR_FILENO
            errno = EIO;
            return -1;
        }
        else if (fd >= 3 && fd < MAX_fd_table){
            tmp = curproc->fd_table[fd];
            if (tmp == NULL || (tmp->file_flag & O_WRONLY)){        // or if file is not readable
                errno = EBADF;  // error
                return -1;
            }
        }

        struct uio u;
        struct iovec iov;
        struct addrspace *as ;
        as = as_create();

        iov.iov_ubase = buf;
        iov.iov_len = buflen;

        u.uio_iov = &iov;
        u.uio_iovcnt = 1;
        u.uio_offset = tmp->offset;
        u.uio_resid = buflen;
        u.uio_segflg = UIO_USERSPACE;
        u.uio_rw = UIO_READ;
        u.uio_space = curproc_getas();
	
	if (fd == STDIN_FILENO){
		struct vnode *vn;
		char *console = NULL; // console string ("con:")
            console = kstrdup("con:"); // set to console
            vfs_open(console,O_RDONLY,0,&vn); // open the console vnode
            kfree(console); // free the console
            int result = VOP_READ(vn,&u);
            if(result < 0){
                    errno = EIO; //A hardware I/O error occurred writing the data
                    return -1;
            }
	} else{
	        int retval = VOP_READ(tmp->file, &u);
		if (retval < 0){
			errno = EIO;
			return -1;
		}
	}
	return buflen - u.uio_resid ;
}
Example #2
0
/*
 * Package up an I/O request on a vnode into a uio and do it.
 */
int
vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len, off_t offset,
    enum uio_seg segflg, int ioflg, struct ucred *cred, size_t *aresid,
    struct proc *p)
{
	struct uio auio;
	struct iovec aiov;
	int error;

	if ((ioflg & IO_NODELOCKED) == 0)
		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
	auio.uio_iov = &aiov;
	auio.uio_iovcnt = 1;
	aiov.iov_base = base;
	aiov.iov_len = len;
	auio.uio_resid = len;
	auio.uio_offset = offset;
	auio.uio_segflg = segflg;
	auio.uio_rw = rw;
	auio.uio_procp = p;
	if (rw == UIO_READ) {
		error = VOP_READ(vp, &auio, ioflg, cred);
	} else {
		error = VOP_WRITE(vp, &auio, ioflg, cred);
	}
	if (aresid)
		*aresid = auio.uio_resid;
	else
		if (auio.uio_resid && error == 0)
			error = EIO;
	if ((ioflg & IO_NODELOCKED) == 0)
		VOP_UNLOCK(vp, 0, p);
	return (error);
}
Example #3
0
static int ToMem(u_int32_t flatloc, vaddr_t vaddr)
{
    assert(lock_do_i_hold(&vmlock));
    assert((vaddr & 0xfffff000) == vaddr);
    assert((flatloc & 0xfffff000) == flatloc);
    
    struct uio ku;
    int result;
    u_int32_t bitmaploc, diskloc, diskindex;
    bitmaploc = flatloc/PAGE_SIZE;
    
    assert (bitmap_isset(diskmap, bitmaploc));
    
    diskindex = flatloc / DISKSPACE;
    diskloc = flatloc - diskindex * DISKSPACE;
    
    mk_kuio(&ku, (void*)vaddr , PAGE_SIZE, diskloc, UIO_READ);
    result = VOP_READ(disk[diskindex], &ku);
    if (result) {
        panic(strerror(result));
    }
    
    bitmap_unmark(diskmap, bitmaploc);
    return result;
}
Example #4
0
/*
 * File pointers can no longer get ripped up by revoke so
 * we don't need to lock access to the vp.
 *
 * f_offset updates are not guaranteed against multiple readers
 */
static int
vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
{
	struct vnode *vp;
	int error, ioflag;

	KASSERT(uio->uio_td == curthread,
		("uio_td %p is not td %p", uio->uio_td, curthread));
	vp = (struct vnode *)fp->f_data;

	ioflag = 0;
	if (flags & O_FBLOCKING) {
		/* ioflag &= ~IO_NDELAY; */
	} else if (flags & O_FNONBLOCKING) {
		ioflag |= IO_NDELAY;
	} else if (fp->f_flag & FNONBLOCK) {
		ioflag |= IO_NDELAY;
	}
	if (fp->f_flag & O_DIRECT) {
		ioflag |= IO_DIRECT;
	}
	if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
		uio->uio_offset = vn_get_fpf_offset(fp);
	vn_lock(vp, LK_SHARED | LK_RETRY);
	ioflag |= sequential_heuristic(uio, fp);

	error = VOP_READ(vp, uio, ioflag, cred);
	fp->f_nextoff = uio->uio_offset;
	vn_unlock(vp);
	if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
		vn_set_fpf_offset(fp, uio->uio_offset);
	return (error);
}
Example #5
0
int
vdsp_is_iso(struct vdsp_softc *sc)
{
	struct proc *p = curproc;
	struct iovec iov;
	struct uio uio;
	struct iso_volume_descriptor *vdp;
	int err;

	if (sc->sc_vp == NULL)
		return (0);

	vdp = malloc(sizeof(*vdp), M_DEVBUF, M_WAITOK);

	iov.iov_base = vdp;
	iov.iov_len = sizeof(*vdp);
	uio.uio_iov = &iov;
	uio.uio_iovcnt = 1;
	uio.uio_offset = 16 * ISO_DEFAULT_BLOCK_SIZE;
	uio.uio_resid = sizeof(*vdp);
	uio.uio_segflg = UIO_SYSSPACE;
	uio.uio_rw = UIO_READ;
	uio.uio_procp = p;

	vn_lock(sc->sc_vp, LK_EXCLUSIVE | LK_RETRY, p);
	err = VOP_READ(sc->sc_vp, &uio, 0, p->p_ucred);
	VOP_UNLOCK(sc->sc_vp, 0, p);

	if (err == 0 && memcmp(vdp->id, ISO_STANDARD_ID, sizeof(vdp->id)))
		err = ENOENT;

	free(vdp, M_DEVBUF, 0);
	return (err == 0);
}
Example #6
0
/*
 * Return target name of a symbolic link
 */
int
ufs_readlink(void *v)
{
	struct vop_readlink_args /* {
		struct vnode	*a_vp;
		struct uio	*a_uio;
		kauth_cred_t	a_cred;
	} */ *ap = v;
	struct vnode	*vp = ap->a_vp;
	struct inode	*ip = VTOI(vp);
	struct ufsmount	*ump = VFSTOUFS(vp->v_mount);
	int		isize;

	/*
	 * The test against um_maxsymlinklen is off by one; it should
	 * theoretically be <=, not <. However, it cannot be changed
	 * as that would break compatibility with existing fs images.
	 */

	isize = ip->i_size;
	if (isize < ump->um_maxsymlinklen ||
	    (ump->um_maxsymlinklen == 0 && DIP(ip, blocks) == 0)) {
		uiomove((char *)SHORTLINK(ip), isize, ap->a_uio);
		return (0);
	}
	return (VOP_READ(vp, ap->a_uio, 0, ap->a_cred));
}
Example #7
0
/*
 * old style vnode pager input routine
 */
static int
vnode_pager_input_old(vm_object_t object, vm_page_t m)
{
	struct uio auio;
	struct iovec aiov;
	int error;
	int size;
	struct sf_buf *sf;
	struct vnode *vp;

	VM_OBJECT_ASSERT_WLOCKED(object);
	error = 0;

	/*
	 * Return failure if beyond current EOF
	 */
	if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) {
		return VM_PAGER_BAD;
	} else {
		size = PAGE_SIZE;
		if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size)
			size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex);
		vp = object->handle;
		VM_OBJECT_WUNLOCK(object);

		/*
		 * Allocate a kernel virtual address and initialize so that
		 * we can use VOP_READ/WRITE routines.
		 */
		sf = sf_buf_alloc(m, 0);

		aiov.iov_base = (caddr_t)sf_buf_kva(sf);
		aiov.iov_len = size;
		auio.uio_iov = &aiov;
		auio.uio_iovcnt = 1;
		auio.uio_offset = IDX_TO_OFF(m->pindex);
		auio.uio_segflg = UIO_SYSSPACE;
		auio.uio_rw = UIO_READ;
		auio.uio_resid = size;
		auio.uio_td = curthread;

		error = VOP_READ(vp, &auio, 0, curthread->td_ucred);
		if (!error) {
			int count = size - auio.uio_resid;

			if (count == 0)
				error = EINVAL;
			else if (count != PAGE_SIZE)
				bzero((caddr_t)sf_buf_kva(sf) + count,
				    PAGE_SIZE - count);
		}
		sf_buf_free(sf);

		VM_OBJECT_WLOCK(object);
	}
	KASSERT(m->dirty == 0, ("vnode_pager_input_old: page %p is dirty", m));
	if (!error)
		m->valid = VM_PAGE_BITS_ALL;
	return error ? VM_PAGER_ERROR : VM_PAGER_OK;
}
Example #8
0
int sys_read(int fid, void* buffer, size_t nbytes, int* retval) {
	if(!buffer || buffer == NULL) {
		return EFAULT;
	}

	struct openfile* readfile = (struct openfile*)ftGet(curthread->ft,fid);
	
	if (readfile == NULL) {
		return EBADF;
	}

	struct uio userio;
	mk_kuio(&userio, buffer,nbytes,readfile->offset,UIO_READ);

	int error = VOP_READ(readfile->data,&userio);

	if (error) {
		return error;
	}

	readfile->offset = userio.uio_offset;

	*retval = userio.uio_resid;

	return 0;
}
Example #9
0
char* read_seg_from_swap(vaddr_t vaddr){
	kprintf("About to read segment from swap. given vaddress: %lu \n", (unsigned long)vaddr);
	char buf[SEG_SZ];
		
	struct iovec iov;
	struct uio u;

	int i, result, segNum= -1;
	
	for(i=0;i<TABLE_SZ;i++){
		if(addrptr[i]!=-1 && vaddr>=addrptr[i] && vaddr<addrptr[i]+SEG_SZ){
			segNum = i;
		}
	}

	
	if(segNum==-1){}//kprintf("segment not found\n");}
	else kprintf("found seg no %d \n",segNum);
	
	u.uio_iovcnt = 1;
	u.uio_resid = SEG_SZ;          // amount to read from the file
	u.uio_offset = offset[segNum];
	//u.uio_segflg = is_executable ? UIO_USERISPACE : UIO_USERSPACE;
	u.uio_rw = UIO_READ;
	u.uio_space = curthread->t_addrspace;

	uio_kinit(&iov, &u, buf, SEG_SZ, 0, UIO_READ);
	result = VOP_READ(swap_file, &u);
	return buf;

}
Example #10
0
/*
 * Swapin()
 * -----------------------
 * 1. Sanity checks: We can't swap the pages holding the page table itself. 
 *    So, check if the paddr lie outside of coremap or not.
 * 2. We use mk_kuio to intiate a read from disk to physical memory.
 * 3. Remove the mapping of the page in the swaparea and unmark the swapmap 
 *    bitmap.
 * 4. Read into the page from disk.
 */
void swapin(u_int32_t paddr, u_int32_t chunk)
{
    /*
     * sanity check: make sure that we are not touching kernel space or the page
     * table itself .That is the page should be within the coremap memory 
     * starting from coremap_base
     */    
    assert(paddr >= coremap_base);
    
    int spl=splhigh();
    /*
     * Initialize the read I/O into kernel buffer of size PAGE_SIZE starting 
     * from paddr from the swaparea starting from offset indexed by chunk.
     */    
    struct uio swap_uio;
    mk_kuio(&swap_uio, /*kernel buffer*/(void*)PADDR_TO_KVADDR(paddr & PAGE_FRAME), 
                       /*Size of the buffer to read into*/PAGE_SIZE, 
                       /*Starting offset of the swap area for read out */chunk, UIO_READ);        
    
    /*
     * Remove the mapping of the chunk to page in the swaparea and unmark the 
     * swap_memmap bitmap to free the chunk.
     */
    remove_spage(chunk);
    splx(spl);
    
    //Now we read the page from memory into kernel buffer pointed with paddr
    int result=VOP_READ(swap_fp, &swap_uio);
    if(result) 
        panic("VM: SWAPIN Failed");    
}
static void swaponepageout(struct page* pg, paddr_t phyaddr) {
	int swapPageindex = pg->pt_pagebase;
	struct iovec iov;
	struct uio kuio;
	iov.iov_kbase = (void*) PADDR_TO_KVADDR(phyaddr);
	iov.iov_len = PAGE_SIZE; // length of the memory space
	kuio.uio_iov = &iov;
	kuio.uio_iovcnt = 1;
	kuio.uio_resid = PAGE_SIZE; // amount to write to the file
	kuio.uio_space = NULL;
	kuio.uio_offset = swap_map[swapPageindex].se_paddr * PAGE_SIZE;
	kuio.uio_segflg = UIO_SYSSPACE;
	kuio.uio_rw = UIO_READ;
	// 4. write them to disk
	int result = VOP_READ(swap_vnode, &kuio);
	if (result) {
		// release lock on the vnode
		panic("READ FAILED!\n");
		return;
	}

	swap_map[swapPageindex].se_used = SWAP_PAGE_FREE;
	kprintf("Swap out:\tswap= %x,\tpage=%x \n",swapPageindex,pg->pt_virtbase);
	pg->pt_state = PT_STATE_MAPPED;
	pg->pt_pagebase = phyaddr / PAGE_SIZE;


}
Example #12
0
void
vdsp_readlabel(struct vdsp_softc *sc)
{
	struct proc *p = curproc;
	struct iovec iov;
	struct uio uio;
	int err;

	if (sc->sc_vp == NULL)
		return;

	sc->sc_label = malloc(sizeof(*sc->sc_label), M_DEVBUF, M_WAITOK);

	iov.iov_base = sc->sc_label;
	iov.iov_len = sizeof(*sc->sc_label);
	uio.uio_iov = &iov;
	uio.uio_iovcnt = 1;
	uio.uio_offset = 0;
	uio.uio_resid = sizeof(*sc->sc_label);
	uio.uio_segflg = UIO_SYSSPACE;
	uio.uio_rw = UIO_READ;
	uio.uio_procp = p;

	vn_lock(sc->sc_vp, LK_EXCLUSIVE | LK_RETRY, p);
	err = VOP_READ(sc->sc_vp, &uio, 0, p->p_ucred);
	VOP_UNLOCK(sc->sc_vp, 0, p);
	if (err) {
		free(sc->sc_label, M_DEVBUF, 0);
		sc->sc_label = NULL;
	}
}
Example #13
0
/*
 * File table vnode read routine.
 */
static int
vn_read(file_t *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
    int flags)
{
	struct vnode *vp = (struct vnode *)fp->f_data;
	int error, ioflag, fflag;
	size_t count;

	ioflag = IO_ADV_ENCODE(fp->f_advice);
	fflag = fp->f_flag;
	if (fflag & FNONBLOCK)
		ioflag |= IO_NDELAY;
	if ((fflag & (FFSYNC | FRSYNC)) == (FFSYNC | FRSYNC))
		ioflag |= IO_SYNC;
	if (fflag & FALTIO)
		ioflag |= IO_ALTSEMANTICS;
	if (fflag & FDIRECT)
		ioflag |= IO_DIRECT;
	vn_lock(vp, LK_SHARED | LK_RETRY);
	uio->uio_offset = *offset;
	count = uio->uio_resid;
	error = VOP_READ(vp, uio, ioflag, cred);
	if (flags & FOF_UPDATE_OFFSET)
		*offset += count - uio->uio_resid;
	VOP_UNLOCK(vp);
	return (error);
}
Example #14
0
int
kobj_read_file_vnode(struct _buf *file, char *buf, unsigned size, unsigned off)
{
	struct vnode *vp = file->ptr;
	struct thread *td = curthread;
	struct uio auio;
	struct iovec aiov;
	int error, vfslocked;

	bzero(&aiov, sizeof(aiov));
	bzero(&auio, sizeof(auio));

	aiov.iov_base = buf;
	aiov.iov_len = size;

	auio.uio_iov = &aiov;
	auio.uio_offset = (off_t)off;
	auio.uio_segflg = UIO_SYSSPACE;
	auio.uio_rw = UIO_READ;
	auio.uio_iovcnt = 1;
	auio.uio_resid = size;
	auio.uio_td = td;

	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
	vn_lock(vp, LK_SHARED | LK_RETRY);
	error = VOP_READ(vp, &auio, IO_UNIT | IO_SYNC, td->td_ucred);
	VOP_UNLOCK(vp, 0);
	VFS_UNLOCK_GIANT(vfslocked);
	return (error != 0 ? -1 : size - auio.uio_resid);
}
Example #15
0
int sys_read(int fd, userptr_t buf, size_t nbytes, int * retval){
	
	int err;	
	/* Step 1: Check if the fd value is valid and that the file handle has been opened */
	
	if (fd >= OPEN_MAX || fd <= -1) {	// out of bounds of the file table array
		return EBADF;
	} else { // if within the range of the file table
		if (curthread->t_fdtable[fd] == NULL) {		// if entry corresponding to the fd number is NULL, then return bad fd error
			return EBADF;
		}
	}
		/* Now, checking for "is file handle allowed to be written into? " */
	
	struct fhandle * fdesc;
	fdesc = curthread->t_fdtable[fd];
	
	if (fdesc->flags == O_WRONLY) {
		return EBADF;
	} 
	
	/* Step 2: Check if the buffer location is valid in userspace */
	if (buf == NULL) {
		return EFAULT;
	}
	
	/* Synchronizing the read operation */
	lock_acquire(fdesc->lock);
	
	/* Step 4: Initialize iovec and uio with uio_kinit and correct arguments */
	struct iovec iov;
	struct uio user;
	
	/* get the file handle from the file table and initialize uio with the uio_kinit method using fhandle's offset value also*/
	iov.iov_kbase = buf;
	iov.iov_len = nbytes;
	user.uio_iov = &iov;
	user.uio_iovcnt = 1;
	user.uio_offset = fdesc->offset;
	user.uio_resid = nbytes;
	user.uio_rw = UIO_READ;
	user.uio_segflg = UIO_USERSPACE;
	user.uio_space = curthread->t_addrspace;
	
	/* use VOP_READ to read from the file */
	err = VOP_READ(fdesc->vn, &user);
	if (err) {
		return err;
	}
	
	/* Calculate the amount of bytes written and return success to user (indicated by 0)*/
	*retval = nbytes - user.uio_resid;
	fdesc->offset = user.uio_offset; 	// advance offset by amount of bytes read	
	
	/* releasing the lock*/
	lock_release(fdesc->lock);

	return 0;	
}
Example #16
0
/* Return target name of a symbolic link */
static int
reiserfs_readlink(struct vop_readlink_args *ap)
{
    struct vnode *vp = ap->a_vp;

    reiserfs_log(LOG_DEBUG, "redirect to VOP_READ()\n");
    return (VOP_READ(vp, ap->a_uio, 0, ap->a_cred));
}
Example #17
0
int
as_define_stack(struct addrspace *as, vaddr_t *stackptr)
{
	struct page *p;
	size_t npages;
	size_t curpage;
	struct uio ku;
	vaddr_t maxvaddr;
	vaddr_t lowerbound;
	int i;
	int result;
	unsigned int rval;
	vaddr_t stacktop;

	*stackptr = USERTOP;

	/* Do Stack ASLR */
	if (randvnode!=NULL)
	{
		mk_kuio(&ku, &rval, 4, 0, UIO_READ);

		result = VOP_READ(randvnode, &ku);
		if (result)
			return result;

		maxvaddr = (vaddr_t) 0;
		for(i=0;i<array_getnum(as->pages);i++)
		{
			p = (struct page *) array_getguy(as->pages, i);
			if (p->vaddr>maxvaddr)
				maxvaddr = p->vaddr;
		}
		
		lowerbound = maxvaddr + ((STACKSIZE * PAGE_SIZE) + PAGE_SIZE);
		rval %= USERTOP - (USERTOP - lowerbound);
		*stackptr = (lowerbound + rval) & PAGE_FRAME;
	}

	npages = (size_t) STACKSIZE;
	stacktop = *stackptr - PAGE_SIZE * npages;

	for(curpage=0;curpage<npages;curpage++)
	{
		p = (struct page *) kmalloc(sizeof(struct page));
		if (p==NULL)
			return ENOMEM;

		p->vaddr = stacktop + curpage * PAGE_SIZE;
		p->perms = P_R_B | P_W_B;
		array_add(as->pages, p);


		addpage(p->vaddr, curthread->t_pid, p->perms & P_R_B, 
			p->perms & P_W_B, p->perms & P_X_B, NULL);
	}

	return 0;
}
Example #18
0
static
int
load_each_segment(struct vnode *v, off_t offset, vaddr_t vaddr, paddr_t paddr, 
                  size_t memsize, size_t filesize,
                  int is_executable, int first_read)
{
	struct uio u;
	int result;
	size_t fillamt;
    int spl;
    
	if (filesize > memsize) {
		kprintf("ELF: warning: segment filesize > segment memsize\n");
		filesize = memsize;
	}
    
	DEBUG(DB_EXEC, "ELF: Loading %lu bytes to 0x%lx\n", 
	      (unsigned long) filesize, (unsigned long) vaddr);
    
    if(first_read == 0){
        
        u.uio_iovec.iov_ubase = (userptr_t)vaddr;
        u.uio_iovec.iov_len = memsize;   // length of the memory space
        u.uio_resid = filesize;          // amount to actually read
        u.uio_offset = offset;
        u.uio_segflg = is_executable ? UIO_USERISPACE : UIO_USERSPACE;
        u.uio_rw = UIO_READ;
        u.uio_space = curthread->t_vmspace;
        
    }else{
        
        return 0;
    }
	
    result = VOP_READ(v, &u);
    
	if (result) {
        
		return result;
	}
	if (u.uio_resid != 0) {
		/* short read; problem with executable? */
		kprintf("ELF: short read on segment - file truncated?\n");
		return ENOEXEC;
	}
    
    /* Fill the rest of the memory space (if any) with zeros */
	fillamt = memsize - filesize;
	if (fillamt > 0) {
		DEBUG(DB_EXEC, "ELF: Zero-filling %lu more bytes\n", 
		      (unsigned long) fillamt);
		u.uio_resid += fillamt;
		result = uiomovezeros(fillamt, &u);
	}
	return result;
}
Example #19
0
int
smb_vop_read(vnode_t *vp, uio_t *uiop, cred_t *cr)
{
	int error;

	(void) VOP_RWLOCK(vp, V_WRITELOCK_FALSE, &smb_ct);
	error = VOP_READ(vp, uiop, 0, cr, &smb_ct);
	VOP_RWUNLOCK(vp, V_WRITELOCK_FALSE, &smb_ct);
	return (error);
}
Example #20
0
/*
 * Load a segment at virtual address VADDR. The segment in memory
 * extends from VADDR up to (but not including) VADDR+MEMSIZE. The
 * segment on disk is located at file offset OFFSET and has length
 * FILESIZE.
 *
 * FILESIZE may be less than MEMSIZE; if so the remaining portion of
 * the in-memory segment should be zero-filled.
 *
 * Note that uiomove will catch it if someone tries to load an
 * executable whose load address is in kernel space. If you should
 * change this code to not use uiomove, be sure to check for this case
 * explicitly.
 */
int
load_segment(struct vnode *v, off_t offset, vaddr_t vaddr, 
	     size_t memsize, size_t filesize,
	     int is_executable)
{

	struct uio u; // Memory block
	int result;
	size_t fillamt;


	// The virtual memory has to be bigger then the file that we are loading into it
	if (filesize > memsize) {
		kprintf("ELF: warning: segment filesize > segment memsize\n");
		filesize = memsize;
	}

	DEBUG(DB_EXEC, "ELF: Loading %lu bytes to 0x%lx\n", 
	      (unsigned long) filesize, (unsigned long) vaddr);

	//u.uio_iovec.iov_ubase = (userptr_t)PADDR_TO_KVADDR(vaddr);
	u.uio_iovec.iov_ubase = (userptr_t)vaddr;
	u.uio_iovec.iov_len = memsize;   // length of the memory space
	u.uio_resid = filesize;          // amount to actually read
	u.uio_offset = offset;
	//u.uio_segflg = is_executable ? UIO_USERISPACE : UIO_USERSPACE;
	u.uio_segflg = UIO_SYSSPACE;
	u.uio_rw = UIO_READ;
	//u.uio_space = curthread->t_vmspace;
	u.uio_space = NULL;

	result = VOP_READ(v, &u);
	if (result) {
		return result;
	}

	DEBUG(1,"after read\n");

	if (u.uio_resid != 0) {
		/* short read; problem with executable? */
		kprintf("ELF: short read on segment - file truncated?\n");
		return ENOEXEC;
	}

	/* Fill the rest of the memory space (if any) with zeros */
	fillamt = memsize - filesize;
	if (fillamt > 0) {
		DEBUG(DB_EXEC, "ELF: Zero-filling %lu more bytes\n", 
		      (unsigned long) fillamt);
		u.uio_resid += fillamt;
		result = uiomovezeros(fillamt, &u);
	}
	
	return result;
}
int
sys_read(int fdesc,userptr_t ubuf,unsigned int nbytes,int *retval){


	if (!ubuf){
		return EFAULT;
	}

	struct filedescriptor* fDescriptor = getFileDescriptor(fdesc, curthread->t_fdManager);  
	if (fDescriptor == NULL){
		return EBADF;
	}

	switch (O_ACCMODE & fDescriptor->fdmode){
		case O_RDONLY:
			break;
		case O_RDWR:
			break;
		default:
			return EBADF;
	}

	struct uio u;
	struct iovec iov;
	iov.iov_ubase = ubuf;
  iov.iov_len = nbytes;
  u.uio_iov = &iov;
  u.uio_iovcnt = 1;
  u.uio_offset = fDescriptor->fdoff;  /* not needed for the console */
  u.uio_resid = nbytes;
  u.uio_segflg = UIO_USERSPACE;
  u.uio_rw = UIO_READ;
  u.uio_space = curproc->p_addrspace;

	lock_acquire(fDescriptor->fdlock);
	int readSize = VOP_READ(fDescriptor->fdvnode, &u);
	lock_release(fDescriptor->fdlock);

	if(readSize){
		return readSize;
	}

	readSize = nbytes - u.uio_resid;	  
	*retval = readSize;

	fDescriptor->fdoff += nbytes;



	return nbytes;


}
Example #22
0
//   spinlock_acquire(&ft->filetable_lock);
//   // update file seeking position
//   ft->ft_entries[fd]->pos += *retval;
//   spinlock_release(&ft->filetable_lock);
//   return 0;
// }
int
sys_read(int fd, userptr_t buf, size_t size, int *retval)
{
  struct uio user_uio;
  struct iovec user_iov;
  int result;

  // Check fd is a valid file handle
  if ((fd < 0) || (fd >= __OPEN_MAX)){
    return EBADF;
  }
  struct filetable *ft = curthread->t_filetable;
  spinlock_acquire(&ft->filetable_lock);

  // Check fd is a valid file handle in filetable and the vnode is not empty
  if ((ft->ft_entries[fd] == NULL) || (ft->ft_entries[fd]->file_vnode == NULL)) {
    spinlock_release(&ft->filetable_lock);
    return EBADF;
  }

  // Check the file handle has the permission to read
  int fl = ft->ft_entries[fd]->flags & O_ACCMODE;
  if ((fl != O_RDONLY) && (fl != O_RDWR)) {
    spinlock_release(&ft->filetable_lock);
    return EBADF;
  }

  // Init the offset by the position of the file handle
  int offset = ft->ft_entries[fd]->pos;

  /* set up a uio with the buffer, its size, and the current offset */
  mk_useruio(&user_iov, &user_uio, buf, size, offset, UIO_READ);
  // before read you do the vop_read you have to release the lock
  spinlock_release(&ft->filetable_lock);
  /* does the read */
  result = VOP_READ(ft->ft_entries[fd]->file_vnode, &user_uio);
  if (result) {
    spinlock_release(&ft->filetable_lock);
    return result;
  }

  /*
   * The amount read is the size of the buffer originally, minus
   * how much is left in it.
   */
  *retval = size - user_uio.uio_resid;

  ft->ft_entries[fd]->pos += *retval;
  spinlock_release(&ft->filetable_lock);

  return 0;
}
Example #23
0
/*
 * Read or write a vnode.  Called from kernel code.
 */
int
vn_rdwr(
    enum uio_rw rw,
    struct vnode *vp,
    caddr_t base,
    ssize_t len,
    offset_t offset,
    enum uio_seg seg,
    int ioflag,
    rlim64_t ulimit,	/* meaningful only if rw is UIO_WRITE */
    cred_t *cr,
    ssize_t *residp)
{
    struct uio uio;
    struct iovec iov;
    int error;

    if (rw == UIO_WRITE && ISROFILE(vp))
        return (EROFS);

    if (len < 0)
        return (EIO);

    iov.iov_base = base;
    iov.iov_len = len;
    uio.uio_iov = &iov;
    uio.uio_iovcnt = 1;
    uio.uio_loffset = offset;
    uio.uio_segflg = (short)seg;
    uio.uio_resid = len;
    uio.uio_llimit = ulimit;

    (void) VOP_RWLOCK(vp,
                      rw == UIO_WRITE ? V_WRITELOCK_TRUE : V_WRITELOCK_FALSE, NULL);
    if (rw == UIO_WRITE) {
        uio.uio_fmode = FWRITE;
        uio.uio_extflg = UIO_COPY_DEFAULT;
        error = VOP_WRITE(vp, &uio, ioflag, cr, NULL, NULL, NULL);
    } else {
        uio.uio_fmode = FREAD;
        uio.uio_extflg = UIO_COPY_CACHED;
        error = VOP_READ(vp, &uio, ioflag, cr, NULL);
    }
    VOP_RWUNLOCK(vp, rw == UIO_WRITE ? V_WRITELOCK_TRUE : V_WRITELOCK_FALSE,
                 NULL);
    if (residp)
        *residp = uio.uio_resid;
    else if (uio.uio_resid)
        error = EIO;

    return (error);
}
/*ARGSUSED*/
static int
cttyread(dev_t dev, struct uio *uio, int flag)
{
	struct vnode *ttyvp = cttyvp(curproc);
	int error;

	if (ttyvp == NULL)
		return (EIO);
	vn_lock(ttyvp, LK_EXCLUSIVE | LK_RETRY);
	error = VOP_READ(ttyvp, uio, flag, NOCRED);
	VOP_UNLOCK(ttyvp);
	return (error);
}
Example #25
0
/*
 * sys_read
 * calls VOP_READ.
 * 
 * A4: This is the "dumb" implementation of sys_write:
 * it only deals with file descriptors 1 and 2, and 
 * assumes they are permanently associated with the 
 * console vnode (which must have been previously initialized).
 *
 * In your implementation, you should use the file descriptor
 * to find a vnode from your file table, and then read from it.
 *
 * Note that any problems with the address supplied by the
 * user as "buf" will be handled by the VOP_READ / uio code
 * so you do not have to try to verify "buf" yourself.
 *
 * Most of this code should be replaced.
 */
int
sys_read(int fd, userptr_t buf, size_t size, int *retval)
{
	DEBUG(DB_VFS, "*** Reading fd %d\n", fd);
    
    struct uio user_uio;
	struct iovec user_iov;
	int result;
	int offset = 0;

    /* Check if fd is a valid file descriptor. */
    struct filetable *ft = curthread->t_filetable;
    spinlock_acquire(&ft->ft_spinlock);
    
    /* If fd is not a valid file descriptor, or was not opened for reading,
     * return error */
    if ((fd < 0) || (fd >= __OPEN_MAX) || (ft->ft_entries[fd] == NULL) ||
            (ft->ft_entries[fd]->ft_vnode == NULL)) {
        spinlock_release(&ft->ft_spinlock);
        return EBADF;
    }
    int how = ft->ft_entries[fd]->ft_flags & O_ACCMODE;
    if ((how != O_RDONLY) && (how != O_RDWR)) {
        spinlock_release(&ft->ft_spinlock);
        return EBADF;
    }

	/* set up a uio with the buffer, its size, and the current offset */
    offset = ft->ft_entries[fd]->ft_pos;
	mk_useruio(&user_iov, &user_uio, buf, size, offset, UIO_READ);

	/* does the read */
    spinlock_release(&ft->ft_spinlock);
	result = VOP_READ(ft->ft_entries[fd]->ft_vnode, &user_uio);
	if (result) {
		return result;
	}

	/*
	 * The amount read is the size of the buffer originally, minus
	 * how much is left in it.
	 */
	*retval = size - user_uio.uio_resid;
    
    /* Advance file seek position. */
    spinlock_acquire(&ft->ft_spinlock);
    ft->ft_entries[fd]->ft_pos += *retval;

    spinlock_release(&ft->ft_spinlock);
	return 0;
}
Example #26
0
int
sys_rw(int fd, userptr_t buf, size_t buf_len, int *err, int rw) {
  if (fd < 0 || fd >= MAX_FILE_DESCRIPTOR){
    *err = EBADF;
    return -1;
  }
  if (curthread->fd[fd] == NULL){
    *err = EBADF;
    return -1;
  }
  if (buf == NULL){
    *err = EFAULT;
    return -1;
  }

  lock_acquire(curthread->fd[fd]->mutex);
  
  if ((curthread->fd[fd]->status != rw) && (curthread->fd[fd]->status != O_RDWR)) { 
    *err = EBADF;
    lock_release(curthread->fd[fd]->mutex);
    return -1;
  }
  struct iovec iov;
  struct uio uio;

  iov.iov_ubase = buf;
  iov.iov_len = buf_len;
  uio.uio_iov = &iov;
  uio.uio_iovcnt = 1;
  uio.uio_offset = curthread->fd[fd]->offset;
  uio.uio_resid = buf_len;
  uio.uio_segflg = UIO_USERSPACE;
  uio.uio_space = curthread->t_addrspace;

  if (rw == O_RDONLY) {
    //uio_kinit(&iov, &uio, buf, buf_len, curthread->fd[fd]->offset, UIO_READ);
    uio.uio_rw = UIO_READ;
    *err = VOP_READ(curthread->fd[fd]->file,&uio);
  }
  else {
    uio.uio_rw = UIO_WRITE;
    *err = VOP_WRITE(curthread->fd[fd]->file,&uio);
  }
  int diff = uio.uio_offset - curthread->fd[fd]->offset;

  if (curthread->fd[fd]->update_pos)
    curthread->fd[fd]->offset = uio.uio_offset;  

  lock_release(curthread->fd[fd]->mutex);
  return diff;
}
Example #27
0
/*
 * Return target name of a symbolic link
 */
static int
ext2_readlink(struct vop_readlink_args *ap)
{
	struct vnode *vp = ap->a_vp;
	struct inode *ip = VTOI(vp);
	int isize;

	isize = ip->i_size;
	if (isize < vp->v_mount->mnt_maxsymlinklen) {
		uiomove((char *)ip->i_shortlink, isize, ap->a_uio);
		return (0);
	}
	return (VOP_READ(vp, ap->a_uio, 0, ap->a_cred));
}
Example #28
0
int
RUMP_VOP_READ(struct vnode *vp,
    struct uio *uio,
    int ioflag,
    struct kauth_cred *cred)
{
	int error;

	rump_schedule();
	error = VOP_READ(vp, uio, ioflag, cred);
	rump_unschedule();

	return error;
}
Example #29
0
/*
 * Package up an I/O request on a vnode into a uio and do it.
 */
int
vn_rdwr(enum uio_rw rw, struct vnode *vp, void *base, int len, off_t offset,
    enum uio_seg segflg, int ioflg, kauth_cred_t cred, size_t *aresid,
    struct lwp *l)
{
	struct uio auio;
	struct iovec aiov;
	int error;

	if ((ioflg & IO_NODELOCKED) == 0) {
		if (rw == UIO_READ) {
			vn_lock(vp, LK_SHARED | LK_RETRY);
		} else /* UIO_WRITE */ {
			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
		}
	}
	auio.uio_iov = &aiov;
	auio.uio_iovcnt = 1;
	aiov.iov_base = base;
	aiov.iov_len = len;
	auio.uio_resid = len;
	auio.uio_offset = offset;
	auio.uio_rw = rw;
	if (segflg == UIO_SYSSPACE) {
		UIO_SETUP_SYSSPACE(&auio);
	} else {
		auio.uio_vmspace = l->l_proc->p_vmspace;
	}

	if ((error = enforce_rlimit_fsize(vp, &auio, ioflg)) != 0)
		goto out;

	if (rw == UIO_READ) {
		error = VOP_READ(vp, &auio, ioflg, cred);
	} else {
		error = VOP_WRITE(vp, &auio, ioflg, cred);
	}

	if (aresid)
		*aresid = auio.uio_resid;
	else
		if (auio.uio_resid && error == 0)
			error = EIO;

 out:
	if ((ioflg & IO_NODELOCKED) == 0) {
		VOP_UNLOCK(vp);
	}
	return (error);
}
Example #30
0
/*ARGSUSED*/
int
cttyread(dev_t dev, struct uio *uio, int flag)
{
    struct proc *p = uio->uio_procp;
    struct vnode *ttyvp = cttyvp(uio->uio_procp);
    int error;

    if (ttyvp == NULL)
        return (EIO);
    vn_lock(ttyvp, LK_EXCLUSIVE | LK_RETRY, p);
    error = VOP_READ(ttyvp, uio, flag, NOCRED);
    VOP_UNLOCK(ttyvp, 0, p);
    return (error);
}