Exemple #1
0
/*ARGSUSED*/
int
spec_sync(struct vfs *vfsp,
	short	flag,
	struct cred *cr)
{
	struct snode *sync_list;
	register struct snode **spp, *sp, *spnext;
	register struct vnode *vp;

	if (mutex_tryenter(&spec_syncbusy) == 0)
		return (0);

	if (flag & SYNC_ATTR) {
		mutex_exit(&spec_syncbusy);
		return (0);
	}
	mutex_enter(&stable_lock);
	sync_list = NULL;
	/*
	 * Find all the snodes that are dirty and add them to the sync_list
	 */
	for (spp = stable; spp < &stable[STABLESIZE]; spp++) {
		for (sp = *spp; sp != NULL; sp = sp->s_next) {
			vp = STOV(sp);
			/*
			 * Don't bother sync'ing a vp if it's
			 * part of a virtual swap device.
			 */
			if (IS_SWAPVP(vp))
				continue;

			if (vp->v_type == VBLK && vn_has_cached_data(vp)) {
				/*
				 * Prevent vp from going away before we
				 * we get a chance to do a VOP_PUTPAGE
				 * via sync_list processing
				 */
				VN_HOLD(vp);
				sp->s_list = sync_list;
				sync_list = sp;
			}
		}
	}
	mutex_exit(&stable_lock);
	/*
	 * Now write out all the snodes we marked asynchronously.
	 */
	for (sp = sync_list; sp != NULL; sp = spnext) {
		spnext = sp->s_list;
		vp = STOV(sp);
		(void) VOP_PUTPAGE(vp, (offset_t)0, (uint_t)0, B_ASYNC, cr);
		VN_RELE(vp);		/* Release our hold on vnode */
	}
	mutex_exit(&spec_syncbusy);
	return (0);
}
Exemple #2
0
/*
 * Handle pages for this vnode on either side of the page "pp"
 * which has been locked by the caller.  This routine will also
 * do klustering in the range [vp_off, vp_off + vp_len] up
 * until a page which is not found.  The offset and length
 * of pages included is returned in "*offp" and "*lenp".
 *
 * Returns a list of dirty locked pages all ready to be
 * written back.
 */
page_t *
pvn_write_kluster(
    struct vnode *vp,
    page_t *pp,
    u_offset_t *offp,		/* return values */
    size_t *lenp,			/* return values */
    u_offset_t vp_off,
    size_t vp_len,
    int flags)
{
    u_offset_t off;
    page_t *dirty;
    size_t deltab, deltaf;
    se_t se;
    u_offset_t vp_end;

    off = pp->p_offset;

    /*
     * Kustering should not be done if we are invalidating
     * pages since we could destroy pages that belong to
     * some other process if this is a swap vnode.
     */
    if (pvn_write_noklust || ((flags & B_INVAL) && IS_SWAPVP(vp))) {
        *offp = off;
        *lenp = PAGESIZE;
        return (pp);
    }

    if (flags & (B_FREE | B_INVAL))
        se = SE_EXCL;
    else
        se = SE_SHARED;

    dirty = pp;
    /*
     * Scan backwards looking for pages to kluster by incrementing
     * "deltab" and comparing "off" with "vp_off + deltab" to
     * avoid "signed" versus "unsigned" conversion problems.
     */
    for (deltab = PAGESIZE; off >= vp_off + deltab; deltab += PAGESIZE) {
        pp = page_lookup_nowait(vp, off - deltab, se);
        if (pp == NULL)
            break;		/* page not found */
        if (pvn_getdirty(pp, flags | B_DELWRI) == 0)
            break;
        page_add(&dirty, pp);
    }
    deltab -= PAGESIZE;

    vp_end = vp_off + vp_len;
    /* now scan forwards looking for pages to kluster */
    for (deltaf = PAGESIZE; off + deltaf < vp_end; deltaf += PAGESIZE) {
        pp = page_lookup_nowait(vp, off + deltaf, se);
        if (pp == NULL)
            break;		/* page not found */
        if (pvn_getdirty(pp, flags | B_DELWRI) == 0)
            break;
        page_add(&dirty, pp);
        dirty = dirty->p_next;
    }

    *offp = off - deltab;
    *lenp = deltab + deltaf;
    return (dirty);
}
Exemple #3
0
/*
 * Verify that the information in the configuration file regarding the
 * location for the statefile is still valid, depending on cf_type.
 * for CFT_UFS, cf_fs must still be a mounted filesystem, it must be
 *	mounted on the same device as when pmconfig was last run,
 *	and the translation of that device to a node in the prom's
 *	device tree must be the same as when pmconfig was last run.
 * for CFT_SPEC and CFT_ZVOL, cf_path must be the path to a block
 *      special file, it must have no file system mounted on it,
 *	and the translation of that device to a node in the prom's
 *	device tree must be the same as when pmconfig was last run.
 */
static int
cpr_verify_statefile_path(void)
{
	struct cprconfig *cf = &cprconfig;
	static const char long_name[] = "Statefile pathname is too long.\n";
	static const char lookup_fmt[] = "Lookup failed for "
	    "cpr statefile device %s.\n";
	static const char path_chg_fmt[] = "Device path for statefile "
	    "has changed from %s to %s.\t%s\n";
	static const char rerun[] = "Please rerun pmconfig(1m).";
	struct vfs *vfsp = NULL, *vfsp_save = rootvfs;
	ufsvfs_t *ufsvfsp = (ufsvfs_t *)rootvfs->vfs_data;
	ufsvfs_t *ufsvfsp_save = ufsvfsp;
	int error;
	struct vnode *vp;
	char *slash, *tail, *longest;
	char *errstr;
	int found = 0;
	union {
		char un_devpath[OBP_MAXPATHLEN];
		char un_sfpath[MAXNAMELEN];
	} un;
#define	devpath	un.un_devpath
#define	sfpath	un.un_sfpath

	ASSERT(cprconfig_loaded);
	/*
	 * We need not worry about locking or the timing of releasing
	 * the vnode, since we are single-threaded now.
	 */

	switch (cf->cf_type) {
	case CFT_SPEC:
		error = i_devname_to_promname(cf->cf_devfs, devpath,
		    OBP_MAXPATHLEN);
		if (error || strcmp(devpath, cf->cf_dev_prom)) {
			cpr_err(CE_CONT, path_chg_fmt,
			    cf->cf_dev_prom, devpath, rerun);
			return (error);
		}
		/*FALLTHROUGH*/
	case CFT_ZVOL:
		if (strlen(cf->cf_path) > sizeof (sfpath)) {
			cpr_err(CE_CONT, long_name);
			return (ENAMETOOLONG);
		}
		if ((error = lookupname(cf->cf_devfs,
		    UIO_SYSSPACE, FOLLOW, NULLVPP, &vp)) != 0) {
			cpr_err(CE_CONT, lookup_fmt, cf->cf_devfs);
			return (error);
		}
		if (vp->v_type != VBLK)
			errstr = "statefile must be a block device";
		else if (vfs_devismounted(vp->v_rdev))
			errstr = "statefile device must not "
			    "have a file system mounted on it";
		else if (IS_SWAPVP(vp))
			errstr = "statefile device must not "
			    "be configured as swap file";
		else
			errstr = NULL;

		VN_RELE(vp);
		if (errstr) {
			cpr_err(CE_CONT, "%s.\n", errstr);
			return (ENOTSUP);
		}

		return (error);
	case CFT_UFS:
		break;		/* don't indent all the original code */
	default:
		cpr_err(CE_PANIC, "invalid cf_type");
	}

	/*
	 * The original code for UFS statefile
	 */
	if (strlen(cf->cf_fs) + strlen(cf->cf_path) + 2 > sizeof (sfpath)) {
		cpr_err(CE_CONT, long_name);
		return (ENAMETOOLONG);
	}

	bzero(sfpath, sizeof (sfpath));
	(void) strcpy(sfpath, cpr_cprconfig_to_path());

	if (*sfpath != '/') {
		cpr_err(CE_CONT, "Statefile pathname %s "
		    "must begin with a /\n", sfpath);
		return (EINVAL);
	}

	/*
	 * Find the longest prefix of the statefile pathname which
	 * is the mountpoint of a filesystem.  This string must
	 * match the cf_fs field we read from the config file.  Other-
	 * wise the user has changed things without running pmconfig.
	 */
	tail = longest = sfpath + 1;	/* pt beyond the leading "/" */
	while ((slash = strchr(tail, '/')) != NULL) {
		*slash = '\0';	  /* temporarily terminate the string */
		if ((error = lookupname(sfpath,
		    UIO_SYSSPACE, FOLLOW, NULLVPP, &vp)) != 0) {
			*slash = '/';
			cpr_err(CE_CONT, "A directory in the "
			    "statefile path %s was not found.\n", sfpath);
			VN_RELE(vp);

			return (error);
		}

		vfs_list_read_lock();
		vfsp = rootvfs;
		do {
			ufsvfsp = (struct ufsvfs *)vfsp->vfs_data;
			if (ufsvfsp != NULL && ufsvfsp->vfs_root == vp) {
				found = 1;
				break;
			}
			vfsp = vfsp->vfs_next;
		} while (vfsp != rootvfs);
		vfs_list_unlock();

		/*
		 * If we have found a filesystem mounted on the current
		 * path prefix, remember the end of the string in
		 * "longest".  If it happens to be the the exact fs
		 * saved in the configuration file, save the current
		 * ufsvfsp so we can make additional checks further down.
		 */
		if (found) {
			longest = slash;
			if (strcmp(cf->cf_fs, sfpath) == 0) {
				ufsvfsp_save = ufsvfsp;
				vfsp_save = vfsp;
			}
			found = 0;
		}

		VN_RELE(vp);
		*slash = '/';
		tail = slash + 1;
	}
	*longest = '\0';
	if (cpr_is_ufs(vfsp_save) == 0 || strcmp(cf->cf_fs, sfpath)) {
		cpr_err(CE_CONT, "Filesystem containing "
		    "the statefile when pmconfig was run (%s) has "
		    "changed to %s. %s\n", cf->cf_fs, sfpath, rerun);
		return (EINVAL);
	}

	if ((error = lookupname(cf->cf_devfs,
	    UIO_SYSSPACE, FOLLOW, NULLVPP, &vp)) != 0) {
		cpr_err(CE_CONT, lookup_fmt, cf->cf_devfs);
		return (error);
	}

	if (ufsvfsp_save->vfs_devvp->v_rdev != vp->v_rdev) {
		cpr_err(CE_CONT, "Filesystem containing "
		    "statefile no longer mounted on device %s. "
		    "See power.conf(4).", cf->cf_devfs);
		VN_RELE(vp);
		return (ENXIO);
	}
	VN_RELE(vp);

	error = i_devname_to_promname(cf->cf_devfs, devpath, OBP_MAXPATHLEN);
	if (error || strcmp(devpath, cf->cf_dev_prom)) {
		cpr_err(CE_CONT, path_chg_fmt,
		    cf->cf_dev_prom, devpath, rerun);
		return (error);
	}

	return (0);
}
/*
 * Flush all vnodes in this (or every) vfs.
 * Used by nfs_sync and by nfs_unmount.
 */
void
r4flush(struct vfs *vfsp, cred_t *cr)
{
	int index;
	rnode4_t *rp;
	vnode_t *vp, **vplist;
	long num, cnt;

	/*
	 * Check to see whether there is anything to do.
	 */
	num = rnode4_new;
	if (num == 0)
		return;

	/*
	 * Allocate a slot for all currently active rnodes on the
	 * supposition that they all may need flushing.
	 */
	vplist = kmem_alloc(num * sizeof (*vplist), KM_SLEEP);
	cnt = 0;

	/*
	 * Walk the hash queues looking for rnodes with page
	 * lists associated with them.  Make a list of these
	 * files.
	 */
	for (index = 0; index < rtable4size; index++) {
		rw_enter(&rtable4[index].r_lock, RW_READER);
		for (rp = rtable4[index].r_hashf;
		    rp != (rnode4_t *)(&rtable4[index]);
		    rp = rp->r_hashf) {
			vp = RTOV4(rp);
			/*
			 * Don't bother sync'ing a vp if it
			 * is part of virtual swap device or
			 * if VFS is read-only
			 */
			if (IS_SWAPVP(vp) || vn_is_readonly(vp))
				continue;
			/*
			 * If flushing all mounted file systems or
			 * the vnode belongs to this vfs, has pages
			 * and is marked as either dirty or mmap'd,
			 * hold and add this vnode to the list of
			 * vnodes to flush.
			 */
			if ((vfsp == NULL || vp->v_vfsp == vfsp) &&
			    nfs4_has_pages(vp) &&
			    ((rp->r_flags & R4DIRTY) || rp->r_mapcnt > 0)) {
				VN_HOLD(vp);
				vplist[cnt++] = vp;
				if (cnt == num) {
					rw_exit(&rtable4[index].r_lock);
					goto toomany;
				}
			}
		}
		rw_exit(&rtable4[index].r_lock);
	}
toomany:

	/*
	 * Flush and release all of the files on the list.
	 */
	while (cnt-- > 0) {
		vp = vplist[cnt];
		(void) VOP_PUTPAGE(vp, (u_offset_t)0, 0, B_ASYNC, cr, NULL);
		VN_RELE(vp);
	}

	/*
	 * Free the space allocated to hold the list.
	 */
	kmem_free(vplist, num * sizeof (*vplist));
}