Ejemplo n.º 1
0
/*
 * Similar to fbread() but we call segmap_pagecreate instead of using
 * segmap_fault for SOFTLOCK to create the pages without using VOP_GETPAGE
 * and then we zero up to the length rounded to a page boundary.
 * XXX - this won't work right when bsize < PAGESIZE!!!
 */
void
fbzero(vnode_t *vp, offset_t off, uint_t len, struct fbuf **fbpp)
{
	caddr_t addr;
	ulong_t o, zlen;
	struct fbuf *fbp;

	o = (ulong_t)(off & MAXBOFFSET);
	if (o + len > MAXBSIZE)
		cmn_err(CE_PANIC, "fbzero: Bad offset/length");

	if (segmap_kpm) {
		addr = segmap_getmapflt(segkmap, vp, off & (offset_t)MAXBMASK,
				MAXBSIZE, SM_PAGECREATE, S_WRITE) + o;
	} else {
		addr = segmap_getmap(segkmap, vp, off & (offset_t)MAXBMASK) + o;
	}

	*fbpp = fbp = kmem_alloc(sizeof (struct fbuf), KM_SLEEP);
	fbp->fb_addr = addr;
	fbp->fb_count = len;

	(void) segmap_pagecreate(segkmap, addr, len, 1);

	/*
	 * Now we zero all the memory in the mapping we are interested in.
	 */
	zlen = (caddr_t)ptob(btopr((uintptr_t)(len + addr))) - addr;
	if (zlen < len || (o + zlen > MAXBSIZE))
		cmn_err(CE_PANIC, "fbzero: Bad zlen");
	bzero(addr, zlen);
}
Ejemplo n.º 2
0
/*
 * Return a pointer to locked kernel virtual address for
 * the given <vp, off> for len bytes.  It is not allowed to
 * have the offset cross a MAXBSIZE boundary over len bytes.
 */
int
fbread(vnode_t *vp, offset_t off, uint_t len, enum seg_rw rw,
	struct fbuf **fbpp)
{
	caddr_t addr;
	ulong_t o;
	struct fbuf *fbp;
	faultcode_t err;
	caddr_t	raddr;
	uint_t	rsize;
	uintptr_t pgoff = PAGEOFFSET;

	o = (ulong_t)(off & (offset_t)MAXBOFFSET);
	if (o + len > MAXBSIZE)
		cmn_err(CE_PANIC, "fbread");

	if (segmap_kpm) {
		addr = segmap_getmapflt(segkmap, vp, off & (offset_t)MAXBMASK,
					MAXBSIZE, SM_LOCKPROTO, rw);
	} else {
		addr = segmap_getmapflt(segkmap, vp,
				off & (offset_t)MAXBMASK, MAXBSIZE, 0, rw);
	}

	raddr = (caddr_t)((uintptr_t)(addr + o) & ~pgoff);
	rsize = (((uintptr_t)(addr + o) + len + pgoff) & ~pgoff) -
	    (uintptr_t)raddr;

	err = segmap_fault(kas.a_hat, segkmap, raddr, rsize, F_SOFTLOCK, rw);
	if (err) {
		(void) segmap_release(segkmap, addr, 0);
		if (FC_CODE(err) == FC_OBJERR)
			return (FC_ERRNO(err));
		else
			return (EIO);
	}

	*fbpp = fbp = kmem_alloc(sizeof (struct fbuf), KM_SLEEP);
	fbp->fb_addr = addr + o;
	fbp->fb_count = len;
	return (0);
}
Ejemplo n.º 3
0
/*
 * Zero out zbytes worth of data. Caller should be aware that this
 * routine may enter back into the fs layer (xxx_getpage). Locks
 * that the xxx_getpage routine may need should not be held while
 * calling this.
 */
void
pvn_vpzero(struct vnode *vp, u_offset_t vplen, size_t zbytes)
{
    caddr_t addr;

    ASSERT(vp->v_type != VCHR);

    if (vp->v_pages == NULL)
        return;

    /*
     * zbytes may be zero but there still may be some portion of
     * a page which needs clearing (since zbytes is a function
     * of filesystem block size, not pagesize.)
     */
    if (zbytes == 0 && (PAGESIZE - (vplen & PAGEOFFSET)) == 0)
        return;

    /*
     * We get the last page and handle the partial
     * zeroing via kernel mappings.  This will make the page
     * dirty so that we know that when this page is written
     * back, the zeroed information will go out with it.  If
     * the page is not currently in memory, then the kzero
     * operation will cause it to be brought it.  We use kzero
     * instead of bzero so that if the page cannot be read in
     * for any reason, the system will not panic.  We need
     * to zero out a minimum of the fs given zbytes, but we
     * might also have to do more to get the entire last page.
     */

    if ((zbytes + (vplen & MAXBOFFSET)) > MAXBSIZE)
        panic("pvn_vptrunc zbytes");
    addr = segmap_getmapflt(segkmap, vp, vplen,
                            MAX(zbytes, PAGESIZE - (vplen & PAGEOFFSET)), 1, S_WRITE);
    (void) kzero(addr + (vplen & MAXBOFFSET),
                 MAX(zbytes, PAGESIZE - (vplen & PAGEOFFSET)));
    (void) segmap_release(segkmap, addr, SM_WRITE | SM_ASYNC);
}
/*
 * fscache_info_sync
 * Writes out the fs_info data if necessary.
 */
static int
fscache_info_sync(fscache_t *fscp)
{
	caddr_t addr;
	int error = 0;

	mutex_enter(&fscp->fs_fslock);

	if (fscp->fs_cache->c_flags & CACHE_NOFILL) {
		error = EROFS;
		goto out;
	}

	/* if the data is dirty and we have the file vnode */
	if ((fscp->fs_flags & CFS_FS_DIRTYINFO) && fscp->fs_infovp) {
		addr = segmap_getmapflt(segkmap, fscp->fs_infovp, 0,
					MAXBSIZE, 1, S_WRITE);

		/*LINTED alignment okay*/
		*(cachefs_fsinfo_t *)addr = fscp->fs_info;
		error = segmap_release(segkmap, addr, SM_WRITE);

		if (error) {
			cmn_err(CE_WARN,
			    "cachefs: Can not write to info file.");
		} else {
			fscp->fs_flags &= ~CFS_FS_DIRTYINFO;
		}
	}

out:

	mutex_exit(&fscp->fs_fslock);

	return (error);
}
Ejemplo n.º 5
0
/*ARGSUSED*/
static int
bootfs_read(vnode_t *vp, struct uio *uiop, int ioflag, cred_t *cr,
    caller_context_t *ct)
{
	int err;
	ssize_t sres = uiop->uio_resid;
	bootfs_node_t *bnp = vp->v_data;

	if (vp->v_type == VDIR)
		return (EISDIR);

	if (vp->v_type != VREG)
		return (EINVAL);

	if (uiop->uio_loffset < 0)
		return (EINVAL);

	if (uiop->uio_loffset >= bnp->bvn_size)
		return (0);

	err = 0;
	while (uiop->uio_resid != 0) {
		caddr_t base;
		long offset, frem;
		ulong_t poff, segoff;
		size_t bytes;
		int relerr;

		offset = uiop->uio_loffset;
		poff = offset & PAGEOFFSET;
		bytes = MIN(PAGESIZE - poff, uiop->uio_resid);

		frem = bnp->bvn_size - offset;
		if (frem <= 0) {
			err = 0;
			break;
		}

		/* Don't read past EOF */
		bytes = MIN(bytes, frem);

		/*
		 * Segmaps are likely larger than our page size, so make sure we
		 * have the proper offfset into the resulting segmap data.
		 */
		segoff = (offset & PAGEMASK) & MAXBOFFSET;

		base = segmap_getmapflt(segkmap, vp, offset & MAXBMASK, bytes,
		    1, S_READ);

		err = uiomove(base + segoff + poff, bytes, UIO_READ, uiop);
		relerr = segmap_release(segkmap, base, 0);

		if (err == 0)
			err = relerr;

		if (err != 0)
			break;
	}

	/* Even if we had an error in a partial read, return success */
	if (uiop->uio_resid > sres)
		err = 0;

	gethrestime(&bnp->bvn_attr.va_atime);

	return (err);
}
/*
 * Tries to find the fscache directory indicated by fsid.
 */
int
fscdir_find(cachefscache_t *cachep, ino64_t fsid, fscache_t *fscp)
{
	int error;
	vnode_t *infovp = NULL;
	vnode_t *fscdirvp = NULL;
	vnode_t *attrvp = NULL;
	char dirname[CFS_FRONTFILE_NAME_SIZE];
	cfs_cid_t cid;
	cachefs_fsinfo_t fsinfo;
	caddr_t addr;

	ASSERT(MUTEX_HELD(&cachep->c_fslistlock));
	ASSERT(fscp->fs_infovp == NULL);
	ASSERT(fscp->fs_fscdirvp == NULL);
	ASSERT(fscp->fs_fsattrdir == NULL);

	/* convert the fsid value to the name of the directory */
	cid.cid_flags = 0;
	cid.cid_fileno = fsid;
	make_ascii_name(&cid, dirname);

	/* try to find the directory */
	error = VOP_LOOKUP(cachep->c_dirvp, dirname, &fscdirvp, NULL,
			0, NULL, kcred, NULL, NULL, NULL);
	if (error)
		goto out;

	/* this better be a directory or we are hosed */
	if (fscdirvp->v_type != VDIR) {
		cmn_err(CE_WARN, "cachefs: fscdir_find_a: cache corruption"
			" run fsck, %s", dirname);
		error = ENOTDIR;
		goto out;
	}

	/* try to find the info file */
	error = VOP_LOOKUP(fscdirvp, CACHEFS_FSINFO, &infovp,
	    NULL, 0, NULL, kcred, NULL, NULL, NULL);
	if (error) {
		cmn_err(CE_WARN, "cachefs: fscdir_find_b: cache corruption"
			" run fsck, %s", dirname);
		goto out;
	}

	/* read in info struct */
	addr = segmap_getmapflt(segkmap, infovp, (offset_t)0,
				MAXBSIZE, 1, S_READ);

	/*LINTED alignment okay*/
	fsinfo = *(cachefs_fsinfo_t *)addr;
	error =  segmap_release(segkmap, addr, 0);
	if (error) {
		cmn_err(CE_WARN, "cachefs: fscdir_find_c: cache corruption"
			" run fsck, %s", dirname);
		goto out;
	}

	/* try to find the attrcache directory */
	error = VOP_LOOKUP(fscdirvp, ATTRCACHE_NAME,
	    &attrvp, NULL, 0, NULL, kcred, NULL, NULL, NULL);
	if (error) {
		cmn_err(CE_WARN, "cachefs: fscdir_find_d: cache corruption"
			" run fsck, %s", dirname);
		goto out;
	}

	mutex_enter(&fscp->fs_fslock);
	fscp->fs_info = fsinfo;
	fscp->fs_cfsid = fsid;
	fscp->fs_fscdirvp = fscdirvp;
	fscp->fs_fsattrdir = attrvp;
	fscp->fs_infovp = infovp;
	mutex_exit(&fscp->fs_fslock);

out:
	if (error) {
		if (infovp)
			VN_RELE(infovp);
		if (fscdirvp)
			VN_RELE(fscdirvp);
	}
	return (error);
}