Esempio n. 1
0
int
nwfs_readvnode(struct vnode *vp, struct uio *uiop, struct ucred *cred)
{
	struct nwmount *nmp = VFSTONWFS(vp->v_mount);
	struct nwnode *np = VTONW(vp);
	struct vattr vattr;
	int error;

	if (vp->v_type != VREG && vp->v_type != VDIR) {
		kprintf("%s: vn types other than VREG or VDIR are unsupported !\n",__func__);
		return EIO;
	}
	if (uiop->uio_resid == 0) return 0;
	if (uiop->uio_offset < 0) return EINVAL;
	if (vp->v_type == VDIR) {
		error = nwfs_readvdir(vp, uiop, cred);
		return error;
	}
	if (np->n_flag & NMODIFIED) {
		nwfs_attr_cacheremove(vp);
		error = VOP_GETATTR(vp, &vattr);
		if (error) return (error);
		np->n_mtime = vattr.va_mtime.tv_sec;
	} else {
		error = VOP_GETATTR(vp, &vattr);
		if (error) return (error);
		if (np->n_mtime != vattr.va_mtime.tv_sec) {
			error = nwfs_vinvalbuf(vp, V_SAVE, 1);
			if (error) return (error);
			np->n_mtime = vattr.va_mtime.tv_sec;
		}
	}
	error = ncp_read(NWFSTOCONN(nmp), &np->n_fh, uiop,cred);
	return (error);
}
Esempio n. 2
0
static int
ncp_file_read(struct inode *inode, struct file *file, char *buf, int count)
{
    int bufsize, already_read;
    off_t pos;
    int errno;

    DPRINTK("ncp_file_read: enter %s\n", NCP_ISTRUCT(inode)->entryName);

    if (inode == NULL)
    {
        DPRINTK("ncp_file_read: inode = NULL\n");
        return -EINVAL;
    }
    if (!ncp_conn_valid(NCP_SERVER(inode)))
    {
        return -EIO;
    }

    if (!S_ISREG(inode->i_mode))
    {
        DPRINTK("ncp_file_read: read from non-file, mode %07o\n",
                inode->i_mode);
        return -EINVAL;
    }

    pos = file->f_pos;

    if (pos + count > inode->i_size)
    {
        count = inode->i_size - pos;
    }

    if (count <= 0)
    {
        return 0;
    }

    if ((errno = ncp_make_open(inode, O_RDONLY)) != 0)
    {
        return errno;
    }

    bufsize = NCP_SERVER(inode)->buffer_size;

    already_read = 0;

    /* First read in as much as possible for each bufsize. */
    while (already_read < count)
    {
        int read_this_time;
        int to_read = min(bufsize - (pos % bufsize),
                          count - already_read);

        if (ncp_read(NCP_SERVER(inode), NCP_FINFO(inode)->file_handle,
                     pos, to_read, buf, &read_this_time) != 0)
        {
            return -EIO; /* This is not exact, i know.. */
        }

        pos += read_this_time;
        buf += read_this_time;
        already_read += read_this_time;

        if (read_this_time < to_read)
        {
            break;
        }
    }

    file->f_pos = pos;

    if (!IS_RDONLY(inode))
    {
        inode->i_atime = CURRENT_TIME;
    }

    inode->i_dirt = 1;

    DPRINTK("ncp_file_read: exit %s\n", NCP_ISTRUCT(inode)->entryName);

    return already_read;
}
Esempio n. 3
0
/*
 * Vnode op for VM getpages.
 * Wish wish .... get rid from multiple IO routines
 *
 * nwfs_getpages(struct vnode *a_vp, vm_page_t *a_m, int a_count,
 *		 int a_reqpage, vm_ooffset_t a_offset)
 */
int
nwfs_getpages(struct vop_getpages_args *ap)
{
#ifndef NWFS_RWCACHE
	return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count,
					    ap->a_reqpage, ap->a_seqaccess);
#else
	int i, error, npages;
	size_t nextoff, toff;
	size_t count;
	size_t size;
	struct uio uio;
	struct iovec iov;
	vm_offset_t kva;
	struct buf *bp;
	struct vnode *vp;
	struct thread *td = curthread;	/* XXX */
	struct ucred *cred;
	struct nwmount *nmp;
	struct nwnode *np;
	vm_page_t *pages;

	KKASSERT(td->td_proc);
	cred = td->td_proc->p_ucred;

	vp = ap->a_vp;
	np = VTONW(vp);
	nmp = VFSTONWFS(vp->v_mount);
	pages = ap->a_m;
	count = (size_t)ap->a_count;

	if (vp->v_object == NULL) {
		kprintf("nwfs_getpages: called with non-merged cache vnode??\n");
		return VM_PAGER_ERROR;
	}

	bp = getpbuf_kva(&nwfs_pbuf_freecnt);
	npages = btoc(count);
	kva = (vm_offset_t) bp->b_data;
	pmap_qenter(kva, pages, npages);

	iov.iov_base = (caddr_t) kva;
	iov.iov_len = count;
	uio.uio_iov = &iov;
	uio.uio_iovcnt = 1;
	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
	uio.uio_resid = count;
	uio.uio_segflg = UIO_SYSSPACE;
	uio.uio_rw = UIO_READ;
	uio.uio_td = td;

	error = ncp_read(NWFSTOCONN(nmp), &np->n_fh, &uio,cred);
	pmap_qremove(kva, npages);

	relpbuf(bp, &nwfs_pbuf_freecnt);

	if (error && (uio.uio_resid == count)) {
		kprintf("nwfs_getpages: error %d\n",error);
		for (i = 0; i < npages; i++) {
			if (ap->a_reqpage != i)
				vnode_pager_freepage(pages[i]);
		}
		return VM_PAGER_ERROR;
	}

	size = count - uio.uio_resid;

	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
		vm_page_t m;
		nextoff = toff + PAGE_SIZE;
		m = pages[i];

		m->flags &= ~PG_ZERO;

		/*
		 * NOTE: pmap dirty bit should have already been cleared.
		 *	 We do not clear it here.
		 */
		if (nextoff <= size) {
			m->valid = VM_PAGE_BITS_ALL;
			m->dirty = 0;
		} else {
			int nvalid = ((size + DEV_BSIZE - 1) - toff) &
				      ~(DEV_BSIZE - 1);
			vm_page_set_validclean(m, 0, nvalid);
		}
		
		if (i != ap->a_reqpage) {
			/*
			 * Whether or not to leave the page activated is up in
			 * the air, but we should put the page on a page queue
			 * somewhere (it already is in the object).  Result:
			 * It appears that emperical results show that
			 * deactivating pages is best.
			 */

			/*
			 * Just in case someone was asking for this page we
			 * now tell them that it is ok to use.
			 */
			if (!error) {
				if (m->flags & PG_REFERENCED)
					vm_page_activate(m);
				else
					vm_page_deactivate(m);
				vm_page_wakeup(m);
			} else {
				vnode_pager_freepage(m);
			}
		}
	}
	return 0;
#endif /* NWFS_RWCACHE */
}
Esempio n. 4
0
/*
 * Do an I/O operation to/from a cache block.
 */
int
nwfs_doio(struct vnode *vp, struct bio *bio, struct ucred *cr, struct thread *td)
{
	struct buf *bp = bio->bio_buf;
	struct uio *uiop;
	struct nwnode *np;
	struct nwmount *nmp;
	int error = 0;
	struct uio uio;
	struct iovec io;

	np = VTONW(vp);
	nmp = VFSTONWFS(vp->v_mount);
	uiop = &uio;
	uiop->uio_iov = &io;
	uiop->uio_iovcnt = 1;
	uiop->uio_segflg = UIO_SYSSPACE;
	uiop->uio_td = td;

	if (bp->b_cmd == BUF_CMD_READ) {
	    io.iov_len = uiop->uio_resid = (size_t)bp->b_bcount;
	    io.iov_base = bp->b_data;
	    uiop->uio_rw = UIO_READ;
	    switch (vp->v_type) {
	      case VREG:
		uiop->uio_offset = bio->bio_offset;
		error = ncp_read(NWFSTOCONN(nmp), &np->n_fh, uiop, cr);
		if (error)
			break;
		if (uiop->uio_resid) {
			size_t left = uiop->uio_resid;
			size_t nread = bp->b_bcount - left;
			if (left > 0)
				bzero((char *)bp->b_data + nread, left);
		}
		break;
/*	    case VDIR:
		nfsstats.readdir_bios++;
		uiop->uio_offset = bio->bio_offset;
		if (nmp->nm_flag & NFSMNT_RDIRPLUS) {
			error = nfs_readdirplusrpc(vp, uiop, cr);
			if (error == NFSERR_NOTSUPP)
				nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
		}
		if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
			error = nfs_readdirrpc(vp, uiop, cr);
		if (error == 0 && uiop->uio_resid == (size_t)bp->b_bcount)
			bp->b_flags |= B_INVAL;
		break;
*/
	    default:
		kprintf("nwfs_doio:  type %x unexpected\n",vp->v_type);
		break;
	    }
	    if (error) {
		bp->b_flags |= B_ERROR;
		bp->b_error = error;
	    }
	} else { /* write */
	    KKASSERT(bp->b_cmd == BUF_CMD_WRITE);
	    if (bio->bio_offset + bp->b_dirtyend > np->n_size)
		bp->b_dirtyend = np->n_size - bio->bio_offset;

	    if (bp->b_dirtyend > bp->b_dirtyoff) {
		io.iov_len = uiop->uio_resid =
			(size_t)(bp->b_dirtyend - bp->b_dirtyoff);
		uiop->uio_offset = bio->bio_offset + bp->b_dirtyoff;
		io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
		uiop->uio_rw = UIO_WRITE;
		error = ncp_write(NWFSTOCONN(nmp), &np->n_fh, uiop, cr);

		/*
		 * For an interrupted write, the buffer is still valid
		 * and the write hasn't been pushed to the server yet,
		 * so we can't set B_ERROR and report the interruption
		 * by setting B_EINTR. For the async case, B_EINTR
		 * is not relevant, so the rpc attempt is essentially
		 * a noop.  For the case of a V3 write rpc not being
		 * committed to stable storage, the block is still
		 * dirty and requires either a commit rpc or another
		 * write rpc with iomode == NFSV3WRITE_FILESYNC before
		 * the block is reused. This is indicated by setting
		 * the B_DELWRI and B_NEEDCOMMIT flags.
		 */
    		if (error == EINTR
		    || (!error && (bp->b_flags & B_NEEDCOMMIT))) {

			crit_enter();
			bp->b_flags &= ~(B_INVAL|B_NOCACHE);
			if ((bp->b_flags & B_PAGING) == 0)
			    bdirty(bp);
			bp->b_flags |= B_EINTR;
			crit_exit();
	    	} else {
			if (error) {
				bp->b_flags |= B_ERROR;
				bp->b_error /*= np->n_error */= error;
/*				np->n_flag |= NWRITEERR;*/
			}
			bp->b_dirtyoff = bp->b_dirtyend = 0;
		}
	    } else {
		bp->b_resid = 0;
		biodone(bio);
		return (0);
	    }
	}
	bp->b_resid = (int)uiop->uio_resid;
	biodone(bio);
	return (error);
}
Esempio n. 5
0
static int
ncp_conn_handler(struct proc *p, struct sncp_request_args *uap,
	struct ncp_conn *conn, struct ncp_handle *hp)
{
	int error=0, rqsize, subfn;
	struct ucred *cred;
	
	char *pdata;

	cred = p->p_ucred;
	error = copyin(&uap->ncpbuf->rqsize, &rqsize, sizeof(int));
	if (error) return(error);
	error = 0;
	pdata = uap->ncpbuf->packet;
	subfn = *(pdata++) & 0xff;
	rqsize--;
	switch (subfn) {
	    case NCP_CONN_READ: case NCP_CONN_WRITE: {
		struct ncp_rw rwrq;
		struct uio auio;
		struct iovec iov;
	
		if (rqsize != sizeof(rwrq)) return (EBADRPC);	
		error = copyin(pdata,&rwrq,rqsize);
		if (error) return (error);
		iov.iov_base = rwrq.nrw_base;
		iov.iov_len = rwrq.nrw_cnt;
		auio.uio_iov = &iov;
		auio.uio_iovcnt = 1;
		auio.uio_offset = rwrq.nrw_offset;
		auio.uio_resid = rwrq.nrw_cnt;
		auio.uio_segflg = UIO_USERSPACE;
		auio.uio_rw = (subfn == NCP_CONN_READ) ? UIO_READ : UIO_WRITE;
		auio.uio_procp = p;
		error = ncp_conn_lock(conn,p,cred,NCPM_EXECUTE);
		if (error) return(error);
		if (subfn == NCP_CONN_READ)
			error = ncp_read(conn, &rwrq.nrw_fh, &auio, cred);
		else
			error = ncp_write(conn, &rwrq.nrw_fh, &auio, cred);
		rwrq.nrw_cnt -= auio.uio_resid;
		ncp_conn_unlock(conn,p);
		p->p_retval[0] = rwrq.nrw_cnt;
		break;
	    } /* case int_read/write */
	    case NCP_CONN_SETFLAGS: {
		u_int16_t mask, flags;

		error = copyin(pdata,&mask, sizeof(mask));
		if (error) return error;
		pdata += sizeof(mask);
		error = copyin(pdata,&flags,sizeof(flags));
		if (error) return error;
		error = ncp_conn_lock(conn,p,cred,NCPM_WRITE);
		if (error) return error;
		if (mask & NCPFL_PERMANENT) {
			conn->flags &= ~NCPFL_PERMANENT;
			conn->flags |= (flags & NCPFL_PERMANENT);
		}
		if (mask & NCPFL_PRIMARY) {
			error = ncp_conn_setprimary(conn, flags & NCPFL_PRIMARY);
			if (error) {
				ncp_conn_unlock(conn,p);
				break;
			}
		}
		ncp_conn_unlock(conn,p);
		break;
	    }
	    case NCP_CONN_LOGIN: {
		struct ncp_conn_login la;

		if (rqsize != sizeof(la)) return (EBADRPC);	
		if ((error = copyin(pdata,&la,rqsize)) != 0) break;
		error = ncp_conn_lock(conn, p, cred, NCPM_EXECUTE | NCPM_WRITE);
		if (error) return error;
		error = ncp_login(conn, la.username, la.objtype, la.password, p, p->p_ucred);
		ncp_conn_unlock(conn, p);
		p->p_retval[0] = error;
		break;
	    }
	    case NCP_CONN_GETINFO: {
		struct ncp_conn_stat ncs;
		int len = sizeof(ncs);

		error = ncp_conn_lock(conn, p, p->p_ucred, NCPM_READ);
		if (error) return error;
		ncp_conn_getinfo(conn, &ncs);
		copyout(&len, &uap->ncpbuf->rpsize, sizeof(int));
		error = copyout(&ncs, &uap->ncpbuf->packet, len);
		ncp_conn_unlock(conn, p);
		break;
	    }
	    case NCP_CONN_GETUSER: {
		int len;

		error = ncp_conn_lock(conn, p, p->p_ucred, NCPM_READ);
		if (error) return error;
		len = (conn->li.user) ? strlen(conn->li.user) + 1 : 0;
		copyout(&len, &uap->ncpbuf->rpsize, sizeof(int));
		if (len) {
			error = copyout(conn->li.user, &uap->ncpbuf->packet, len);
		}
		ncp_conn_unlock(conn, p);
		break;
	    }
	    case NCP_CONN_CONN2REF: {
		int len = sizeof(int);

		error = ncp_conn_lock(conn, p, p->p_ucred, NCPM_READ);
		if (error) return error;
		copyout(&len, &uap->ncpbuf->rpsize, sizeof(int));
		if (len) {
			error = copyout(&conn->nc_id, &uap->ncpbuf->packet, len);
		}
		ncp_conn_unlock(conn, p);
		break;
	    }
	    case NCP_CONN_FRAG: {
		struct ncp_conn_frag nf;

		if (rqsize != sizeof(nf)) return (EBADRPC);	
		if ((error = copyin(pdata, &nf, rqsize)) != 0) break;
		error = ncp_conn_lock(conn, p, cred, NCPM_EXECUTE);
		if (error) return error;
		error = ncp_conn_frag_rq(conn, p, &nf);
		ncp_conn_unlock(conn, p);
		copyout(&nf, &pdata, sizeof(nf));
		p->p_retval[0] = error;
		break;
	    }
	    case NCP_CONN_DUP: {
		struct ncp_handle *newhp;
		int len = sizeof(NWCONN_HANDLE);

		error = ncp_conn_lock(conn, p, cred, NCPM_READ);
		if (error) break;
		copyout(&len, &uap->ncpbuf->rpsize, len);
		error = ncp_conn_gethandle(conn, p, &newhp);
		if (!error)
			error = copyout(&newhp->nh_id, uap->ncpbuf->packet, len);
		ncp_conn_unlock(conn,p);
		break;
	    }
	    case NCP_CONN_CONNCLOSE: {
		error = ncp_conn_lock(conn, p, cred, NCPM_EXECUTE);
		if (error) break;
		ncp_conn_puthandle(hp, p, 0);
		error = ncp_disconnect(conn);
		if (error)
			ncp_conn_unlock(conn, p);
		break;
	    }
	    default:
		    error = EOPNOTSUPP;
	}
	return error;
}
Esempio n. 6
0
/*
 * Fill in the supplied page for mmap
 */
static unsigned long ncp_file_mmap_nopage(struct vm_area_struct *area,
				     unsigned long address, int no_share)
{
	struct file *file = area->vm_file;
	struct dentry *dentry = file->f_dentry;
	struct inode *inode = dentry->d_inode;
	unsigned long page;
	unsigned int clear;
	unsigned long tmp;
	int bufsize;
	int pos;
	mm_segment_t fs;

	page = __get_free_page(GFP_KERNEL);
	if (!page)
		return page;
	address &= PAGE_MASK;
	pos = address - area->vm_start + area->vm_offset;

	clear = 0;
	if (address + PAGE_SIZE > area->vm_end) {
		clear = address + PAGE_SIZE - area->vm_end;
	}
	/* what we can read in one go */
	bufsize = NCP_SERVER(inode)->buffer_size;

	fs = get_fs();
	set_fs(get_ds());

	if (ncp_make_open(inode, O_RDONLY) < 0) {
		clear = PAGE_SIZE;
	} else {
		int already_read = 0;
		int count = PAGE_SIZE - clear;
		int to_read;

		while (already_read < count) {
			int read_this_time;

			if ((pos % bufsize) != 0) {
				to_read = bufsize - (pos % bufsize);
			} else {
				to_read = bufsize;
			}

			to_read = min(to_read, count - already_read);

			if (ncp_read(NCP_SERVER(inode),
				     NCP_FINFO(inode)->file_handle,
				     pos, to_read,
				     (char *) (page + already_read),
				     &read_this_time) != 0) {
				read_this_time = 0;
			}
			pos += read_this_time;
			already_read += read_this_time;

			if (read_this_time < to_read) {
				break;
			}
		}

	}

	set_fs(fs);

	tmp = page + PAGE_SIZE;
	while (clear--) {
		*(char *) --tmp = 0;
	}
	return page;
}