Exemplo n.º 1
0
AcpiOsReleaseObject(ACPI_CACHE_T *Cache, void *Object)
#endif
{
	struct acpiobjhead *head = (void *)((char *)Object - OBJHEADSIZE);

#if ACPI_DEBUG_CACHE
	if (head->cache != Cache) {
		kprintf("%s: object %p belongs to %p, not %p\n",
			__func__, Object, head->cache, Cache);
	}
#endif
	if (head->state != TRACK_ALLOCATED) {
		if (head->state == TRACK_FREED) {
#if ACPI_DEBUG_CACHE
			kprintf("%s: Double Free %p, %s:%d, first %s:%d\n",
				__func__, Object, func, line, head->func,
				head->line);
#else
			kprintf("%s: Double Free %p\n", __func__, Object);
#endif
		} else
			kprintf("AcpiOsReleaseObject: Bad object %p (%08x)\n",
				Object, head->state);
		return AE_OK;
	}
	head->state = TRACK_FREED;
#if ACPI_DEBUG_CACHE
	head->func = func;
	head->line = line;
#endif
	objcache_put(Cache->cache, head);
	return AE_OK;
}
Exemplo n.º 2
0
/*
 * Allocates a new directory entry for the node node with a name of name.
 * The new directory entry is returned in *de.
 *
 * The link count of node is increased by one to reflect the new object
 * referencing it.
 *
 * Returns zero on success or an appropriate error code on failure.
 */
int
tmpfs_alloc_dirent(struct tmpfs_mount *tmp, struct tmpfs_node *node,
		   const char *name, uint16_t len, struct tmpfs_dirent **de)
{
	struct tmpfs_dirent *nde;

	nde = objcache_get(tmp->tm_dirent_pool, M_WAITOK);
	nde->td_name = kmalloc(len + 1, tmp->tm_name_zone, M_WAITOK | M_NULLOK);
	if (nde->td_name == NULL) {
		objcache_put(tmp->tm_dirent_pool, nde);
		*de = NULL;
		return (ENOSPC);
	}
	nde->td_namelen = len;
	bcopy(name, nde->td_name, len);
	nde->td_name[len] = '\0';

	nde->td_node = node;

	TMPFS_NODE_LOCK(node);
	++node->tn_links;
	TMPFS_NODE_UNLOCK(node);

	*de = nde;

	return 0;
}
Exemplo n.º 3
0
/*
 * Release reference to park structure.
 */
static void
puffs_msgpark_release1(struct puffs_msgpark *park, int howmany)
{
	struct puffs_req *preq = park->park_preq;
	struct puffs_req *creq = park->park_creq;
	int refcnt;

	KKASSERT(lockstatus(&park->park_mtx, curthread) == LK_EXCLUSIVE);
	refcnt = park->park_refcount -= howmany;
	lockmgr(&park->park_mtx, LK_RELEASE);

	KKASSERT(refcnt >= 0);

	if (refcnt == 0) {
		if (preq)
			kfree(preq, M_PUFFS);
#if 1
		if (creq)
			kfree(creq, M_PUFFS);
#endif
		objcache_put(parkpc, park);

#ifdef PUFFSDEBUG
		totalpark--;
#endif
	}
}
Exemplo n.º 4
0
int main(int argc, char *argv[])
{
	static char k1[] = { 'a' };
	static char k2[] = { 'a', 'a' };
	static char k3[] = { 'a', '\0', 'a' };
	struct objcache cache;
	struct objcache_entry *ep1, *ep2, *ep3;
	int rc;

	g_thread_init(NULL);
	rc = objcache_init(&cache);
	OK(rc==0);

	ep1 = objcache_get(&cache, k1, sizeof(k1));
	OK(ep1 != NULL);

	ep2 = objcache_get(&cache, k2, sizeof(k2));
	OK(ep2 != NULL);

	ep3 = objcache_get(&cache, k3, sizeof(k3));
	OK(ep3 != NULL);

	rc = objcache_count(&cache);
	OK(rc == 3);

	OK(ep1->ref == 1);	/* no collisions, else improve hash */

	objcache_put(&cache, ep1);
	objcache_put(&cache, ep2);
	objcache_put(&cache, ep3);

	ep2 = objcache_get(&cache, k2, sizeof(k2));
	OK(ep2 != NULL);
	OK(ep2->ref == 1);	/* new */
	objcache_put(&cache, ep2);

	rc = objcache_count(&cache);
	OK(rc == 0);

	objcache_fini(&cache);
	return 0;
}
Exemplo n.º 5
0
/*
 * Read the contents of a symlink, allocate a path buffer out of the
 * namei_oc and initialize the supplied nlcomponent with the result.
 *
 * If an error occurs no buffer will be allocated or returned in the nlc.
 */
int
nreadsymlink(struct nlookupdata *nd, struct nchandle *nch, 
		struct nlcomponent *nlc)
{
    struct vnode *vp;
    struct iovec aiov;
    struct uio auio;
    int linklen;
    int error;
    char *cp;

    nlc->nlc_nameptr = NULL;
    nlc->nlc_namelen = 0;
    if (nch->ncp->nc_vp == NULL)
	return(ENOENT);
    if ((error = cache_vget(nch, nd->nl_cred, LK_SHARED, &vp)) != 0)
	return(error);
    cp = objcache_get(namei_oc, M_WAITOK);
    aiov.iov_base = cp;
    aiov.iov_len = MAXPATHLEN;
    auio.uio_iov = &aiov;
    auio.uio_iovcnt = 1;
    auio.uio_offset = 0;
    auio.uio_rw = UIO_READ;
    auio.uio_segflg = UIO_SYSSPACE;
    auio.uio_td = nd->nl_td;
    auio.uio_resid = MAXPATHLEN - 1;
    error = VOP_READLINK(vp, &auio, nd->nl_cred);
    if (error)
	goto fail;
    linklen = MAXPATHLEN - 1 - auio.uio_resid;
    if (varsym_enable) {
	linklen = varsymreplace(cp, linklen, MAXPATHLEN - 1);
	if (linklen < 0) {
	    error = ENAMETOOLONG;
	    goto fail;
	}
    }
    cp[linklen] = 0;
    nlc->nlc_nameptr = cp;
    nlc->nlc_namelen = linklen;
    vput(vp);
    return(0);
fail:
    objcache_put(namei_oc, cp);
    vput(vp);
    return(error);
}
Exemplo n.º 6
0
void cli_out_end(struct client *cli)
{
	if (!cli)
		return;

	if (cli->out_bo) {
		fs_obj_free(cli->out_bo);
		cli->out_bo = NULL;
	}
	if (cli->out_ce) {
		objcache_put(&chunkd_srv.actives, cli->out_ce);
		cli->out_ce = NULL;
	}

	free(cli->out_user);
	cli->out_user = NULL;
}
Exemplo n.º 7
0
/*
 * Cleanup a nlookupdata structure after we are through with it.  This may
 * be called on any nlookupdata structure initialized with nlookup_init().
 * Calling nlookup_done() is mandatory in all cases except where nlookup_init()
 * returns an error, even if as a consumer you believe you have taken all
 * dynamic elements out of the nlookupdata structure.
 */
void
nlookup_done(struct nlookupdata *nd)
{
    if (nd->nl_nch.ncp) {
	if (nd->nl_flags & NLC_NCPISLOCKED) {
	    nd->nl_flags &= ~NLC_NCPISLOCKED;
	    cache_unlock(&nd->nl_nch);
	}
	if (nd->nl_flags & NLC_NCDIR) {
		cache_drop_ncdir(&nd->nl_nch);
		nd->nl_flags &= ~NLC_NCDIR;
	} else {
		cache_drop(&nd->nl_nch);	/* NULL's out the nch */
	}
    }
    if (nd->nl_rootnch.ncp)
	cache_drop_and_cache(&nd->nl_rootnch);
    if (nd->nl_jailnch.ncp)
	cache_drop_and_cache(&nd->nl_jailnch);
    if ((nd->nl_flags & NLC_HASBUF) && nd->nl_path) {
	objcache_put(namei_oc, nd->nl_path);
	nd->nl_path = NULL;
    }
    if (nd->nl_cred) {
	if ((nd->nl_flags & NLC_BORROWCRED) == 0)
	    crfree(nd->nl_cred);
	nd->nl_cred = NULL;
	nd->nl_flags &= ~NLC_BORROWCRED;
    }
    if (nd->nl_open_vp) {
	if (nd->nl_flags & NLC_LOCKVP) {
		vn_unlock(nd->nl_open_vp);
		nd->nl_flags &= ~NLC_LOCKVP;
	}
	vn_close(nd->nl_open_vp, nd->nl_vp_fmode, NULL);
	nd->nl_open_vp = NULL;
    }
    if (nd->nl_dvp) {
	vrele(nd->nl_dvp);
	nd->nl_dvp = NULL;
    }
    nd->nl_flags = 0;	/* clear remaining flags (just clear everything) */
}
Exemplo n.º 8
0
/*
 * Frees a directory entry.  It is the caller's responsibility to destroy
 * the node referenced by it if needed.
 *
 * The link count of node is decreased by one to reflect the removal of an
 * object that referenced it.  This only happens if 'node_exists' is true;
 * otherwise the function will not access the node referred to by the
 * directory entry, as it may already have been released from the outside.
 */
void
tmpfs_free_dirent(struct tmpfs_mount *tmp, struct tmpfs_dirent *de)
{
	struct tmpfs_node *node;

	node = de->td_node;

	TMPFS_NODE_LOCK(node);
	TMPFS_ASSERT_ELOCKED(node);
	KKASSERT(node->tn_links > 0);
	node->tn_links--;
	TMPFS_NODE_UNLOCK(node);

	kfree(de->td_name, tmp->tm_name_zone);
	de->td_namelen = 0;
	de->td_name = NULL;
	de->td_node = NULL;
	objcache_put(tmp->tm_dirent_pool, de);
}
static int
hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data,
		 struct ucred *cred)
{
	struct hammer_mount_info info;
	hammer_mount_t hmp;
	hammer_volume_t rootvol;
	struct vnode *rootvp;
	struct vnode *devvp = NULL;
	const char *upath;	/* volume name in userspace */
	char *path;		/* volume name in system space */
	int error;
	int i;
	int master_id;
	char *next_volume_ptr = NULL;

	/*
	 * Accept hammer_mount_info.  mntpt is NULL for root mounts at boot.
	 */
	if (mntpt == NULL) {
		bzero(&info, sizeof(info));
		info.asof = 0;
		info.hflags = 0;
		info.nvolumes = 1;

		next_volume_ptr = mp->mnt_stat.f_mntfromname;

		/* Count number of volumes separated by ':' */
		for (char *p = next_volume_ptr; *p != '\0'; ++p) {
			if (*p == ':') {
				++info.nvolumes;
			}
		}

		mp->mnt_flag &= ~MNT_RDONLY; /* mount R/W */
	} else {
		if ((error = copyin(data, &info, sizeof(info))) != 0)
			return (error);
	}

	/*
	 * updating or new mount
	 */
	if (mp->mnt_flag & MNT_UPDATE) {
		hmp = (void *)mp->mnt_data;
		KKASSERT(hmp != NULL);
	} else {
		if (info.nvolumes <= 0 || info.nvolumes > HAMMER_MAX_VOLUMES)
			return (EINVAL);
		hmp = NULL;
	}

	/*
	 * master-id validation.  The master id may not be changed by a
	 * mount update.
	 */
	if (info.hflags & HMNT_MASTERID) {
		if (hmp && hmp->master_id != info.master_id) {
			kprintf("hammer: cannot change master id "
				"with mount update\n");
			return(EINVAL);
		}
		master_id = info.master_id;
		if (master_id < -1 || master_id >= HAMMER_MAX_MASTERS)
			return (EINVAL);
	} else {
		if (hmp)
			master_id = hmp->master_id;
		else
			master_id = 0;
	}

	/*
	 * Internal mount data structure
	 */
	if (hmp == NULL) {
		hmp = kmalloc(sizeof(*hmp), M_HAMMER, M_WAITOK | M_ZERO);
		mp->mnt_data = (qaddr_t)hmp;
		hmp->mp = mp;
		/*TAILQ_INIT(&hmp->recycle_list);*/

		/*
		 * Make sure kmalloc type limits are set appropriately.
		 *
		 * Our inode kmalloc group is sized based on maxvnodes
		 * (controlled by the system, not us).
		 */
		kmalloc_create(&hmp->m_misc, "HAMMER-others");
		kmalloc_create(&hmp->m_inodes, "HAMMER-inodes");

		kmalloc_raise_limit(hmp->m_inodes, 0);	/* unlimited */

		hmp->root_btree_beg.localization = 0x00000000U;
		hmp->root_btree_beg.obj_id = -0x8000000000000000LL;
		hmp->root_btree_beg.key = -0x8000000000000000LL;
		hmp->root_btree_beg.create_tid = 1;
		hmp->root_btree_beg.delete_tid = 1;
		hmp->root_btree_beg.rec_type = 0;
		hmp->root_btree_beg.obj_type = 0;

		hmp->root_btree_end.localization = 0xFFFFFFFFU;
		hmp->root_btree_end.obj_id = 0x7FFFFFFFFFFFFFFFLL;
		hmp->root_btree_end.key = 0x7FFFFFFFFFFFFFFFLL;
		hmp->root_btree_end.create_tid = 0xFFFFFFFFFFFFFFFFULL;
		hmp->root_btree_end.delete_tid = 0;   /* special case */
		hmp->root_btree_end.rec_type = 0xFFFFU;
		hmp->root_btree_end.obj_type = 0;

		hmp->krate.freq = 1;	/* maximum reporting rate (hz) */
		hmp->krate.count = -16;	/* initial burst */

		hmp->sync_lock.refs = 1;
		hmp->free_lock.refs = 1;
		hmp->undo_lock.refs = 1;
		hmp->blkmap_lock.refs = 1;
		hmp->snapshot_lock.refs = 1;
		hmp->volume_lock.refs = 1;

		TAILQ_INIT(&hmp->delay_list);
		TAILQ_INIT(&hmp->flush_group_list);
		TAILQ_INIT(&hmp->objid_cache_list);
		TAILQ_INIT(&hmp->undo_lru_list);
		TAILQ_INIT(&hmp->reclaim_list);

		RB_INIT(&hmp->rb_dedup_crc_root);
		RB_INIT(&hmp->rb_dedup_off_root);	
		TAILQ_INIT(&hmp->dedup_lru_list);
	}
	hmp->hflags &= ~HMNT_USERFLAGS;
	hmp->hflags |= info.hflags & HMNT_USERFLAGS;

	hmp->master_id = master_id;

	if (info.asof) {
		mp->mnt_flag |= MNT_RDONLY;
		hmp->asof = info.asof;
	} else {
		hmp->asof = HAMMER_MAX_TID;
	}

	hmp->volume_to_remove = -1;

	/*
	 * Re-open read-write if originally read-only, or vise-versa.
	 *
	 * When going from read-only to read-write execute the stage2
	 * recovery if it has not already been run.
	 */
	if (mp->mnt_flag & MNT_UPDATE) {
		lwkt_gettoken(&hmp->fs_token);
		error = 0;
		if (hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
			kprintf("HAMMER read-only -> read-write\n");
			hmp->ronly = 0;
			RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
				hammer_adjust_volume_mode, NULL);
			rootvol = hammer_get_root_volume(hmp, &error);
			if (rootvol) {
				hammer_recover_flush_buffers(hmp, rootvol, 1);
				error = hammer_recover_stage2(hmp, rootvol);
				bcopy(rootvol->ondisk->vol0_blockmap,
				      hmp->blockmap,
				      sizeof(hmp->blockmap));
				hammer_rel_volume(rootvol, 0);
			}
			RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
				hammer_reload_inode, NULL);
			/* kernel clears MNT_RDONLY */
		} else if (hmp->ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
			kprintf("HAMMER read-write -> read-only\n");
			hmp->ronly = 1;	/* messy */
			RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
				hammer_reload_inode, NULL);
			hmp->ronly = 0;
			hammer_flusher_sync(hmp);
			hammer_flusher_sync(hmp);
			hammer_flusher_sync(hmp);
			hmp->ronly = 1;
			RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
				hammer_adjust_volume_mode, NULL);
		}
		lwkt_reltoken(&hmp->fs_token);
		return(error);
	}

	RB_INIT(&hmp->rb_vols_root);
	RB_INIT(&hmp->rb_inos_root);
	RB_INIT(&hmp->rb_redo_root);
	RB_INIT(&hmp->rb_nods_root);
	RB_INIT(&hmp->rb_undo_root);
	RB_INIT(&hmp->rb_resv_root);
	RB_INIT(&hmp->rb_bufs_root);
	RB_INIT(&hmp->rb_pfsm_root);

	hmp->ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);

	RB_INIT(&hmp->volu_root);
	RB_INIT(&hmp->undo_root);
	RB_INIT(&hmp->data_root);
	RB_INIT(&hmp->meta_root);
	RB_INIT(&hmp->lose_root);
	TAILQ_INIT(&hmp->iorun_list);

	lwkt_token_init(&hmp->fs_token, "hammerfs");
	lwkt_token_init(&hmp->io_token, "hammerio");

	lwkt_gettoken(&hmp->fs_token);

	/*
	 * Load volumes
	 */
	path = objcache_get(namei_oc, M_WAITOK);
	hmp->nvolumes = -1;
	for (i = 0; i < info.nvolumes; ++i) {
		if (mntpt == NULL) {
			/*
			 * Root mount.
			 */
			KKASSERT(next_volume_ptr != NULL);
			strcpy(path, "");
			if (*next_volume_ptr != '/') {
				/* relative path */
				strcpy(path, "/dev/");
			}
			int k;
			for (k = strlen(path); k < MAXPATHLEN-1; ++k) {
				if (*next_volume_ptr == '\0') {
					break;
				} else if (*next_volume_ptr == ':') {
					++next_volume_ptr;
					break;
				} else {
					path[k] = *next_volume_ptr;
					++next_volume_ptr;
				}
			}
			path[k] = '\0';

			error = 0;
			cdev_t dev = kgetdiskbyname(path);
			error = bdevvp(dev, &devvp);
			if (error) {
				kprintf("hammer_mountroot: can't find devvp\n");
			}
		} else {
			error = copyin(&info.volumes[i], &upath,
				       sizeof(char *));
			if (error == 0)
				error = copyinstr(upath, path,
						  MAXPATHLEN, NULL);
		}
		if (error == 0)
			error = hammer_install_volume(hmp, path, devvp);
		if (error)
			break;
	}
	objcache_put(namei_oc, path);

	/*
	 * Make sure we found a root volume
	 */
	if (error == 0 && hmp->rootvol == NULL) {
		kprintf("hammer_mount: No root volume found!\n");
		error = EINVAL;
	}

	/*
	 * Check that all required volumes are available
	 */
	if (error == 0 && hammer_mountcheck_volumes(hmp)) {
		kprintf("hammer_mount: Missing volumes, cannot mount!\n");
		error = EINVAL;
	}

	if (error) {
		/* called with fs_token held */
		hammer_free_hmp(mp);
		return (error);
	}

	/*
	 * No errors, setup enough of the mount point so we can lookup the
	 * root vnode.
	 */
	mp->mnt_iosize_max = MAXPHYS;
	mp->mnt_kern_flag |= MNTK_FSMID;
	mp->mnt_kern_flag |= MNTK_THR_SYNC;	/* new vsyncscan semantics */

	/*
	 * MPSAFE code.  Note that VOPs and VFSops which are not MPSAFE
	 * will acquire a per-mount token prior to entry and release it
	 * on return, so even if we do not specify it we no longer get
	 * the BGL regardlless of how we are flagged.
	 */
	mp->mnt_kern_flag |= MNTK_ALL_MPSAFE;
	/*MNTK_RD_MPSAFE | MNTK_GA_MPSAFE | MNTK_IN_MPSAFE;*/

	/* 
	 * note: f_iosize is used by vnode_pager_haspage() when constructing
	 * its VOP_BMAP call.
	 */
	mp->mnt_stat.f_iosize = HAMMER_BUFSIZE;
	mp->mnt_stat.f_bsize = HAMMER_BUFSIZE;

	mp->mnt_vstat.f_frsize = HAMMER_BUFSIZE;
	mp->mnt_vstat.f_bsize = HAMMER_BUFSIZE;

	mp->mnt_maxsymlinklen = 255;
	mp->mnt_flag |= MNT_LOCAL;

	vfs_add_vnodeops(mp, &hammer_vnode_vops, &mp->mnt_vn_norm_ops);
	vfs_add_vnodeops(mp, &hammer_spec_vops, &mp->mnt_vn_spec_ops);
	vfs_add_vnodeops(mp, &hammer_fifo_vops, &mp->mnt_vn_fifo_ops);

	/*
	 * The root volume's ondisk pointer is only valid if we hold a
	 * reference to it.
	 */
	rootvol = hammer_get_root_volume(hmp, &error);
	if (error)
		goto failed;

	/*
	 * Perform any necessary UNDO operations.  The recovery code does
	 * call hammer_undo_lookup() so we have to pre-cache the blockmap,
	 * and then re-copy it again after recovery is complete.
	 *
	 * If this is a read-only mount the UNDO information is retained
	 * in memory in the form of dirty buffer cache buffers, and not
	 * written back to the media.
	 */
	bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
	      sizeof(hmp->blockmap));

	/*
	 * Check filesystem version
	 */
	hmp->version = rootvol->ondisk->vol_version;
	if (hmp->version < HAMMER_VOL_VERSION_MIN ||
	    hmp->version > HAMMER_VOL_VERSION_MAX) {
		kprintf("HAMMER: mount unsupported fs version %d\n",
			hmp->version);
		error = ERANGE;
		goto done;
	}

	/*
	 * The undo_rec_limit limits the size of flush groups to avoid
	 * blowing out the UNDO FIFO.  This calculation is typically in
	 * the tens of thousands and is designed primarily when small
	 * HAMMER filesystems are created.
	 */
	hmp->undo_rec_limit = hammer_undo_max(hmp) / 8192 + 100;
	if (hammer_debug_general & 0x0001)
		kprintf("HAMMER: undo_rec_limit %d\n", hmp->undo_rec_limit);

	/*
	 * NOTE: Recover stage1 not only handles meta-data recovery, it
	 * 	 also sets hmp->undo_seqno for HAMMER VERSION 4+ filesystems.
	 */
	error = hammer_recover_stage1(hmp, rootvol);
	if (error) {
		kprintf("Failed to recover HAMMER filesystem on mount\n");
		goto done;
	}

	/*
	 * Finish setup now that we have a good root volume.
	 *
	 * The top 16 bits of fsid.val[1] is a pfs id.
	 */
	ksnprintf(mp->mnt_stat.f_mntfromname,
		  sizeof(mp->mnt_stat.f_mntfromname), "%s",
		  rootvol->ondisk->vol_name);
	mp->mnt_stat.f_fsid.val[0] =
		crc32((char *)&rootvol->ondisk->vol_fsid + 0, 8);
	mp->mnt_stat.f_fsid.val[1] =
		crc32((char *)&rootvol->ondisk->vol_fsid + 8, 8);
	mp->mnt_stat.f_fsid.val[1] &= 0x0000FFFF;

	mp->mnt_vstat.f_fsid_uuid = rootvol->ondisk->vol_fsid;
	mp->mnt_vstat.f_fsid = crc32(&mp->mnt_vstat.f_fsid_uuid,
				     sizeof(mp->mnt_vstat.f_fsid_uuid));

	/*
	 * Certain often-modified fields in the root volume are cached in
	 * the hammer_mount structure so we do not have to generate lots
	 * of little UNDO structures for them.
	 *
	 * Recopy after recovery.  This also has the side effect of
	 * setting our cached undo FIFO's first_offset, which serves to
	 * placemark the FIFO start for the NEXT flush cycle while the
	 * on-disk first_offset represents the LAST flush cycle.
	 */
	hmp->next_tid = rootvol->ondisk->vol0_next_tid;
	hmp->flush_tid1 = hmp->next_tid;
	hmp->flush_tid2 = hmp->next_tid;
	bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
	      sizeof(hmp->blockmap));
	hmp->copy_stat_freebigblocks = rootvol->ondisk->vol0_stat_freebigblocks;

	hammer_flusher_create(hmp);

	/*
	 * Locate the root directory using the root cluster's B-Tree as a
	 * starting point.  The root directory uses an obj_id of 1.
	 *
	 * FUTURE: Leave the root directory cached referenced but unlocked
	 * in hmp->rootvp (need to flush it on unmount).
	 */
	error = hammer_vfs_vget(mp, NULL, 1, &rootvp);
	if (error)
		goto done;
	vput(rootvp);
	/*vn_unlock(hmp->rootvp);*/
	if (hmp->ronly == 0)
		error = hammer_recover_stage2(hmp, rootvol);

	/*
	 * If the stage2 recovery fails be sure to clean out all cached
	 * vnodes before throwing away the mount structure or bad things
	 * will happen.
	 */
	if (error)
		vflush(mp, 0, 0);

done:
	if ((mp->mnt_flag & MNT_UPDATE) == 0) {
		/* New mount */

		/* Populate info for mount point (NULL pad)*/
		bzero(mp->mnt_stat.f_mntonname, MNAMELEN);
		size_t size;
		if (mntpt) {
			copyinstr(mntpt, mp->mnt_stat.f_mntonname,
							MNAMELEN -1, &size);
		} else { /* Root mount */
			mp->mnt_stat.f_mntonname[0] = '/';
		}
	}
	(void)VFS_STATFS(mp, &mp->mnt_stat, cred);
	hammer_rel_volume(rootvol, 0);
failed:
	/*
	 * Cleanup and return.
	 */
	if (error) {
		/* called with fs_token held */
		hammer_free_hmp(mp);
	} else {
		lwkt_reltoken(&hmp->fs_token);
	}
	return (error);
}
Exemplo n.º 10
0
/*
 * Retire a XOP.  Used by both the VOP frontend and by the XOP backend.
 */
void
hammer2_xop_retire(hammer2_xop_head_t *xop, uint32_t mask)
{
	hammer2_xop_group_t *xgrp;
	hammer2_chain_t *chain;
	int i;

	xgrp = xop->xgrp;

	/*
	 * Remove the frontend or remove a backend feeder.  When removing
	 * the frontend we must wakeup any backend feeders who are waiting
	 * for FIFO space.
	 *
	 * XXX optimize wakeup.
	 */
	KKASSERT(xop->run_mask & mask);
	if (atomic_fetchadd_int(&xop->run_mask, -mask) != mask) {
		if (mask == HAMMER2_XOPMASK_VOP)
			wakeup(xop);
		return;
	}

	/*
	 * Cleanup the collection cluster.
	 */
	for (i = 0; i < xop->cluster.nchains; ++i) {
		xop->cluster.array[i].flags = 0;
		chain = xop->cluster.array[i].chain;
		if (chain) {
			xop->cluster.array[i].chain = NULL;
			hammer2_chain_unlock(chain);
			hammer2_chain_drop(chain);
		}
	}

	/*
	 * Cleanup the fifos, use check_counter to optimize the loop.
	 */
	mask = xop->chk_mask;
	for (i = 0; mask && i < HAMMER2_MAXCLUSTER; ++i) {
		hammer2_xop_fifo_t *fifo = &xop->collect[i];
		while (fifo->ri != fifo->wi) {
			chain = fifo->array[fifo->ri & HAMMER2_XOPFIFO_MASK];
			if (chain) {
				hammer2_chain_unlock(chain);
				hammer2_chain_drop(chain);
			}
			++fifo->ri;
			if (fifo->wi - fifo->ri < HAMMER2_XOPFIFO / 2)
				wakeup(xop);	/* XXX optimize */
		}
		mask &= ~(1U << i);
	}

	/*
	 * The inode is only held at this point, simply drop it.
	 */
	if (xop->ip) {
		hammer2_inode_drop(xop->ip);
		xop->ip = NULL;
	}
	if (xop->ip2) {
		hammer2_inode_drop(xop->ip2);
		xop->ip2 = NULL;
	}
	if (xop->ip3) {
		hammer2_inode_drop(xop->ip3);
		xop->ip3 = NULL;
	}
	if (xop->name) {
		kfree(xop->name, M_HAMMER2);
		xop->name = NULL;
		xop->name_len = 0;
	}
	if (xop->name2) {
		kfree(xop->name2, M_HAMMER2);
		xop->name2 = NULL;
		xop->name2_len = 0;
	}

	objcache_put(cache_xops, xop);
}
Exemplo n.º 11
0
int
nfs_nget(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp)
{
	struct nfsnode *np, *np2;
	struct nfsnodehashhead *nhpp;
	struct vnode *vp;
	int error;
	int lkflags;
	struct nfsmount *nmp;

	/*
	 * Calculate nfs mount point and figure out whether the rslock should
	 * be interruptable or not.
	 */
	nmp = VFSTONFS(mntp);
	if (nmp->nm_flag & NFSMNT_INT)
		lkflags = LK_PCATCH;
	else
		lkflags = 0;

	lwkt_gettoken(&nfsnhash_token);

retry:
	nhpp = NFSNOHASH(fnv_32_buf(fhp->fh_bytes, fhsize, FNV1_32_INIT));
loop:
	for (np = nhpp->lh_first; np; np = np->n_hash.le_next) {
		if (mntp != NFSTOV(np)->v_mount || np->n_fhsize != fhsize ||
		    bcmp((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize)) {
			continue;
		}
		vp = NFSTOV(np);
		if (vget(vp, LK_EXCLUSIVE))
			goto loop;
		for (np = nhpp->lh_first; np; np = np->n_hash.le_next) {
			if (mntp == NFSTOV(np)->v_mount &&
			    np->n_fhsize == fhsize &&
			    bcmp((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize) == 0
			) {
				break;
			}
		}
		if (np == NULL || NFSTOV(np) != vp) {
			vput(vp);
			goto loop;
		}
		*npp = np;
		lwkt_reltoken(&nfsnhash_token);
		return(0);
	}

	/*
	 * Obtain a lock to prevent a race condition if the getnewvnode()
	 * or MALLOC() below happens to block.
	 */
	if (lockmgr(&nfsnhash_lock, LK_EXCLUSIVE | LK_SLEEPFAIL))
		goto loop;

	/*
	 * Allocate before getnewvnode since doing so afterward
	 * might cause a bogus v_data pointer to get dereferenced
	 * elsewhere if objcache should block.
	 */
	np = objcache_get(nfsnode_objcache, M_WAITOK);
		
	error = getnewvnode(VT_NFS, mntp, &vp, 0, 0);
	if (error) {
		lockmgr(&nfsnhash_lock, LK_RELEASE);
		*npp = NULL;
		objcache_put(nfsnode_objcache, np);
		lwkt_reltoken(&nfsnhash_token);
		return (error);
	}

	/*
	 * Initialize most of (np).
	 */
	bzero(np, sizeof (*np));
	if (fhsize > NFS_SMALLFH) {
		MALLOC(np->n_fhp, nfsfh_t *, fhsize, M_NFSBIGFH, M_WAITOK);
	} else {
Exemplo n.º 12
0
/*
 * Do a generic nlookup.  Note that the passed nd is not nlookup_done()'d
 * on return, even if an error occurs.  If no error occurs or NLC_CREATE
 * is flagged and ENOENT is returned, then the returned nl_nch is always
 * referenced and locked exclusively.
 *
 * WARNING: For any general error other than ENOENT w/NLC_CREATE, the
 *	    the resulting nl_nch may or may not be locked and if locked
 *	    might be locked either shared or exclusive.
 *
 * Intermediate directory elements, including the current directory, require
 * execute (search) permission.  nlookup does not examine the access 
 * permissions on the returned element.
 *
 * If NLC_CREATE is set the last directory must allow node creation,
 * and an error code of 0 will be returned for a non-existant
 * target (not ENOENT).
 *
 * If NLC_RENAME_DST is set the last directory mut allow node deletion,
 * plus the sticky check is made, and an error code of 0 will be returned
 * for a non-existant target (not ENOENT).
 *
 * If NLC_DELETE is set the last directory mut allow node deletion,
 * plus the sticky check is made.
 *
 * If NLC_REFDVP is set nd->nl_dvp will be set to the directory vnode
 * of the returned entry.  The vnode will be referenced, but not locked,
 * and will be released by nlookup_done() along with everything else.
 *
 * NOTE: As an optimization we attempt to obtain a shared namecache lock
 *	 on any intermediate elements.  On success, the returned element
 *	 is ALWAYS locked exclusively.
 */
int
nlookup(struct nlookupdata *nd)
{
    globaldata_t gd = mycpu;
    struct nlcomponent nlc;
    struct nchandle nch;
    struct nchandle par;
    struct nchandle nctmp;
    struct mount *mp;
    struct vnode *hvp;		/* hold to prevent recyclement */
    int wasdotordotdot;
    char *ptr;
    char *nptr;
    int error;
    int len;
    int dflags;
    int hit = 1;
    int saveflag = nd->nl_flags & ~NLC_NCDIR;
    boolean_t doretry = FALSE;
    boolean_t inretry = FALSE;

nlookup_start:
#ifdef KTRACE
    if (KTRPOINT(nd->nl_td, KTR_NAMEI))
	ktrnamei(nd->nl_td->td_lwp, nd->nl_path);
#endif
    bzero(&nlc, sizeof(nlc));

    /*
     * Setup for the loop.  The current working namecache element is
     * always at least referenced.  We lock it as required, but always
     * return a locked, resolved namecache entry.
     */
    nd->nl_loopcnt = 0;
    if (nd->nl_dvp) {
	vrele(nd->nl_dvp);
	nd->nl_dvp = NULL;
    }
    ptr = nd->nl_path;

    /*
     * Loop on the path components.  At the top of the loop nd->nl_nch
     * is ref'd and unlocked and represents our current position.
     */
    for (;;) {
	/*
	 * Make sure nl_nch is locked so we can access the vnode, resolution
	 * state, etc.
	 */
	if ((nd->nl_flags & NLC_NCPISLOCKED) == 0) {
		nd->nl_flags |= NLC_NCPISLOCKED;
		cache_lock_maybe_shared(&nd->nl_nch, wantsexcllock(nd, ptr));
	}

	/*
	 * Check if the root directory should replace the current
	 * directory.  This is done at the start of a translation
	 * or after a symbolic link has been found.  In other cases
	 * ptr will never be pointing at a '/'.
	 */
	if (*ptr == '/') {
	    do {
		++ptr;
	    } while (*ptr == '/');
	    cache_unlock(&nd->nl_nch);
	    cache_get_maybe_shared(&nd->nl_rootnch, &nch,
				   wantsexcllock(nd, ptr));
	    if (nd->nl_flags & NLC_NCDIR) {
		    cache_drop_ncdir(&nd->nl_nch);
		    nd->nl_flags &= ~NLC_NCDIR;
	    } else {
		    cache_drop(&nd->nl_nch);
	    }
	    nd->nl_nch = nch;		/* remains locked */

	    /*
	     * Fast-track termination.  There is no parent directory of
	     * the root in the same mount from the point of view of
	     * the caller so return EACCES if NLC_REFDVP is specified,
	     * and EEXIST if NLC_CREATE is also specified.
	     * e.g. 'rmdir /' or 'mkdir /' are not allowed.
	     */
	    if (*ptr == 0) {
		if (nd->nl_flags & NLC_REFDVP)
			error = (nd->nl_flags & NLC_CREATE) ? EEXIST : EACCES;
		else
			error = 0;
		break;
	    }
	    continue;
	}

	/*
	 * Pre-calculate next path component so we can check whether the
	 * current component directory is the last directory in the path
	 * or not.
	 */
	for (nptr = ptr; *nptr && *nptr != '/'; ++nptr)
		;

	/*
	 * Check directory search permissions (nd->nl_nch is locked & refd).
	 * This will load dflags to obtain directory-special permissions to
	 * be checked along with the last component.
	 *
	 * We only need to pass-in &dflags for the second-to-last component.
	 * Optimize by passing-in NULL for any prior components, which may
	 * allow the code to bypass the naccess() call.
	 */
	dflags = 0;
	if (*nptr == '/')
	    error = naccess(&nd->nl_nch, NLC_EXEC, nd->nl_cred, NULL);
	else
	    error = naccess(&nd->nl_nch, NLC_EXEC, nd->nl_cred, &dflags);
	if (error) {
	    if (keeperror(nd, error))
		    break;
	    error = 0;
	}

	/*
	 * Extract the next (or last) path component.  Path components are
	 * limited to 255 characters.
	 */
	nlc.nlc_nameptr = ptr;
	nlc.nlc_namelen = nptr - ptr;
	ptr = nptr;
	if (nlc.nlc_namelen >= 256) {
	    error = ENAMETOOLONG;
	    break;
	}

	/*
	 * Lookup the path component in the cache, creating an unresolved
	 * entry if necessary.  We have to handle "." and ".." as special
	 * cases.
	 *
	 * When handling ".." we have to detect a traversal back through a
	 * mount point.   If we are at the root, ".." just returns the root.
	 *
	 * When handling "." or ".." we also have to recalculate dflags
	 * since our dflags will be for some sub-directory instead of the
	 * parent dir.
	 *
	 * This subsection returns a locked, refd 'nch' unless it errors out,
	 * and an unlocked but still ref'd nd->nl_nch.
	 *
	 * The namecache topology is not allowed to be disconnected, so 
	 * encountering a NULL parent will generate EINVAL.  This typically
	 * occurs when a directory is removed out from under a process.
	 *
	 * WARNING! The unlocking of nd->nl_nch is sensitive code.
	 */
	KKASSERT(nd->nl_flags & NLC_NCPISLOCKED);

	if (nlc.nlc_namelen == 1 && nlc.nlc_nameptr[0] == '.') {
	    cache_unlock(&nd->nl_nch);
	    nd->nl_flags &= ~NLC_NCPISLOCKED;
	    cache_get_maybe_shared(&nd->nl_nch, &nch, wantsexcllock(nd, ptr));
	    wasdotordotdot = 1;
	} else if (nlc.nlc_namelen == 2 && 
		   nlc.nlc_nameptr[0] == '.' && nlc.nlc_nameptr[1] == '.') {
	    if (nd->nl_nch.mount == nd->nl_rootnch.mount &&
		nd->nl_nch.ncp == nd->nl_rootnch.ncp
	    ) {
		/*
		 * ".." at the root returns the root
		 */
		cache_unlock(&nd->nl_nch);
		nd->nl_flags &= ~NLC_NCPISLOCKED;
		cache_get_maybe_shared(&nd->nl_nch, &nch,
				       wantsexcllock(nd, ptr));
	    } else {
		/*
		 * Locate the parent ncp.  If we are at the root of a
		 * filesystem mount we have to skip to the mounted-on
		 * point in the underlying filesystem.
		 *
		 * Expect the parent to always be good since the
		 * mountpoint doesn't go away.  XXX hack.  cache_get()
		 * requires the ncp to already have a ref as a safety.
		 *
		 * However, a process which has been broken out of a chroot
		 * will wind up with a NULL parent if it tries to '..' above
		 * the real root, deal with the case.  Note that this does
		 * not protect us from a jail breakout, it just stops a panic
		 * if the jail-broken process tries to '..' past the real
		 * root.
		 */
		nctmp = nd->nl_nch;
		while (nctmp.ncp == nctmp.mount->mnt_ncmountpt.ncp) {
			nctmp = nctmp.mount->mnt_ncmounton;
			if (nctmp.ncp == NULL)
				break;
		}
		if (nctmp.ncp == NULL) {
			if (curthread->td_proc) {
				kprintf("vfs_nlookup: '..' traverse broke "
					"jail: pid %d (%s)\n",
					curthread->td_proc->p_pid,
					curthread->td_comm);
			}
			nctmp = nd->nl_rootnch;
		} else {
			nctmp.ncp = nctmp.ncp->nc_parent;
		}
		cache_hold(&nctmp);
		cache_unlock(&nd->nl_nch);
		nd->nl_flags &= ~NLC_NCPISLOCKED;
		cache_get_maybe_shared(&nctmp, &nch, wantsexcllock(nd, ptr));
		cache_drop(&nctmp);		/* NOTE: zero's nctmp */
	    }
	    wasdotordotdot = 2;
	} else {
	    /*
	     * Must unlock nl_nch when traversing down the path.  However,
	     * the child ncp has not yet been found/created and the parent's
	     * child list might be empty.  Thus releasing the lock can
	     * allow a race whereby the parent ncp's vnode is recycled.
	     * This case can occur especially when maxvnodes is set very low.
	     *
	     * We need the parent's ncp to remain resolved for all normal
	     * filesystem activities, so we vhold() the vp during the lookup
	     * to prevent recyclement due to vnlru / maxvnodes.
	     *
	     * If we race an unlink or rename the ncp might be marked
	     * DESTROYED after resolution, requiring a retry.
	     */
	    if ((hvp = nd->nl_nch.ncp->nc_vp) != NULL)
		vhold(hvp);
	    cache_unlock(&nd->nl_nch);
	    nd->nl_flags &= ~NLC_NCPISLOCKED;
	    error = cache_nlookup_maybe_shared(&nd->nl_nch, &nlc,
					       wantsexcllock(nd, ptr), &nch);
	    if (error == EWOULDBLOCK) {
		    nch = cache_nlookup(&nd->nl_nch, &nlc);
		    if (nch.ncp->nc_flag & NCF_UNRESOLVED)
			hit = 0;
		    for (;;) {
			error = cache_resolve(&nch, nd->nl_cred);
			if (error != EAGAIN &&
			    (nch.ncp->nc_flag & NCF_DESTROYED) == 0) {
				if (error == ESTALE) {
				    if (!inretry)
					error = ENOENT;
				    doretry = TRUE;
				}
				break;
			}
			kprintf("[diagnostic] nlookup: relookup %*.*s\n",
				nch.ncp->nc_nlen, nch.ncp->nc_nlen,
				nch.ncp->nc_name);
			cache_put(&nch);
			nch = cache_nlookup(&nd->nl_nch, &nlc);
		    }
	    }
	    if (hvp)
		vdrop(hvp);
	    wasdotordotdot = 0;
	}

	/*
	 * If the last component was "." or ".." our dflags no longer
	 * represents the parent directory and we have to explicitly
	 * look it up.
	 *
	 * Expect the parent to be good since nch is locked.
	 */
	if (wasdotordotdot && error == 0) {
	    dflags = 0;
	    if ((par.ncp = nch.ncp->nc_parent) != NULL) {
		par.mount = nch.mount;
		cache_hold(&par);
		cache_lock_maybe_shared(&par, wantsexcllock(nd, ptr));
		error = naccess(&par, 0, nd->nl_cred, &dflags);
		cache_put(&par);
		if (error) {
		    if (!keeperror(nd, error))
			    error = 0;
		}
	    }
	}

	/*
	 * [end of subsection]
	 *
	 * nch is locked and referenced.
	 * nd->nl_nch is unlocked and referenced.
	 *
	 * nl_nch must be unlocked or we could chain lock to the root
	 * if a resolve gets stuck (e.g. in NFS).
	 */
	KKASSERT((nd->nl_flags & NLC_NCPISLOCKED) == 0);

	/*
	 * Resolve the namespace if necessary.  The ncp returned by
	 * cache_nlookup() is referenced and locked.
	 *
	 * XXX neither '.' nor '..' should return EAGAIN since they were
	 * previously resolved and thus cannot be newly created ncp's.
	 */
	if (nch.ncp->nc_flag & NCF_UNRESOLVED) {
	    hit = 0;
	    error = cache_resolve(&nch, nd->nl_cred);
	    if (error == ESTALE) {
		if (!inretry)
		    error = ENOENT;
		doretry = TRUE;
	    }
	    KKASSERT(error != EAGAIN);
	} else {
	    error = nch.ncp->nc_error;
	}

	/*
	 * Early completion.  ENOENT is not an error if this is the last
	 * component and NLC_CREATE or NLC_RENAME (rename target) was
	 * requested.  Note that ncp->nc_error is left as ENOENT in that
	 * case, which we check later on.
	 *
	 * Also handle invalid '.' or '..' components terminating a path
	 * for a create/rename/delete.  The standard requires this and pax
	 * pretty stupidly depends on it.
	 */
	if (islastelement(ptr)) {
	    if (error == ENOENT &&
		(nd->nl_flags & (NLC_CREATE | NLC_RENAME_DST))
	    ) {
		if (nd->nl_flags & NLC_NFS_RDONLY) {
			error = EROFS;
		} else {
			error = naccess(&nch, nd->nl_flags | dflags,
					nd->nl_cred, NULL);
		}
	    }
	    if (error == 0 && wasdotordotdot &&
		(nd->nl_flags & (NLC_CREATE | NLC_DELETE |
				 NLC_RENAME_SRC | NLC_RENAME_DST))) {
		/*
		 * POSIX junk
		 */
		if (nd->nl_flags & NLC_CREATE)
			error = EEXIST;
		else if (nd->nl_flags & NLC_DELETE)
			error = (wasdotordotdot == 1) ? EINVAL : ENOTEMPTY;
		else
			error = EINVAL;
	    }
	}

	/*
	 * Early completion on error.
	 */
	if (error) {
	    cache_put(&nch);
	    break;
	}

	/*
	 * If the element is a symlink and it is either not the last
	 * element or it is the last element and we are allowed to
	 * follow symlinks, resolve the symlink.
	 */
	if ((nch.ncp->nc_flag & NCF_ISSYMLINK) &&
	    (*ptr || (nd->nl_flags & NLC_FOLLOW))
	) {
	    if (nd->nl_loopcnt++ >= MAXSYMLINKS) {
		error = ELOOP;
		cache_put(&nch);
		break;
	    }
	    error = nreadsymlink(nd, &nch, &nlc);
	    cache_put(&nch);
	    if (error)
		break;

	    /*
	     * Concatenate trailing path elements onto the returned symlink.
	     * Note that if the path component (ptr) is not exhausted, it
	     * will being with a '/', so we do not have to add another one.
	     *
	     * The symlink may not be empty.
	     */
	    len = strlen(ptr);
	    if (nlc.nlc_namelen == 0 || nlc.nlc_namelen + len >= MAXPATHLEN) {
		error = nlc.nlc_namelen ? ENAMETOOLONG : ENOENT;
		objcache_put(namei_oc, nlc.nlc_nameptr);
		break;
	    }
	    bcopy(ptr, nlc.nlc_nameptr + nlc.nlc_namelen, len + 1);
	    if (nd->nl_flags & NLC_HASBUF)
		objcache_put(namei_oc, nd->nl_path);
	    nd->nl_path = nlc.nlc_nameptr;
	    nd->nl_flags |= NLC_HASBUF;
	    ptr = nd->nl_path;

	    /*
	     * Go back up to the top to resolve any initial '/'s in the
	     * symlink.
	     */
	    continue;
	}
	
	/*
	 * If the element is a directory and we are crossing a mount point,
	 * Locate the mount.
	 */
	while ((nch.ncp->nc_flag & NCF_ISMOUNTPT) && 
	    (nd->nl_flags & NLC_NOCROSSMOUNT) == 0 &&
	    (mp = cache_findmount(&nch)) != NULL
	) {
	    struct vnode *tdp;
	    int vfs_do_busy = 0;

	    /*
	     * VFS must be busied before the namecache entry is locked,
	     * but we don't want to waste time calling vfs_busy() if the
	     * mount point is already resolved.
	     */
again:
	    cache_put(&nch);
	    if (vfs_do_busy) {
		while (vfs_busy(mp, 0)) {
		    if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
			kprintf("nlookup: warning umount race avoided\n");
			cache_dropmount(mp);
			error = EBUSY;
			vfs_do_busy = 0;
			goto double_break;
		    }
		}
	    }
	    cache_get_maybe_shared(&mp->mnt_ncmountpt, &nch,
				   wantsexcllock(nd, ptr));

	    if (nch.ncp->nc_flag & NCF_UNRESOLVED) {
		if (vfs_do_busy == 0) {
		    vfs_do_busy = 1;
		    goto again;
		}
		error = VFS_ROOT(mp, &tdp);
		vfs_unbusy(mp);
		vfs_do_busy = 0;
		if (keeperror(nd, error)) {
		    cache_dropmount(mp);
		    break;
		}
		if (error == 0) {
		    cache_setvp(&nch, tdp);
		    vput(tdp);
		}
	    }
	    if (vfs_do_busy)
		vfs_unbusy(mp);
	    cache_dropmount(mp);
	}

	if (keeperror(nd, error)) {
	    cache_put(&nch);
double_break:
	    break;
	}
	    
	/*
	 * Skip any slashes to get to the next element.  If there 
	 * are any slashes at all the current element must be a
	 * directory or, in the create case, intended to become a directory.
	 * If it isn't we break without incrementing ptr and fall through
	 * to the failure case below.
	 */
	while (*ptr == '/') {
	    if ((nch.ncp->nc_flag & NCF_ISDIR) == 0 && 
		!(nd->nl_flags & NLC_WILLBEDIR)
	    ) {
		break;
	    }
	    ++ptr;
	}

	/*
	 * Continuation case: additional elements and the current
	 * element is a directory.
	 */
	if (*ptr && (nch.ncp->nc_flag & NCF_ISDIR)) {
	    if (nd->nl_flags & NLC_NCDIR) {
		    cache_drop_ncdir(&nd->nl_nch);
		    nd->nl_flags &= ~NLC_NCDIR;
	    } else {
		    cache_drop(&nd->nl_nch);
	    }
	    cache_unlock(&nch);
	    KKASSERT((nd->nl_flags & NLC_NCPISLOCKED) == 0);
	    nd->nl_nch = nch;
	    continue;
	}

	/*
	 * Failure case: additional elements and the current element
	 * is not a directory
	 */
	if (*ptr) {
	    cache_put(&nch);
	    error = ENOTDIR;
	    break;
	}

	/*
	 * Successful lookup of last element.
	 *
	 * Check permissions if the target exists.  If the target does not
	 * exist directory permissions were already tested in the early
	 * completion code above.
	 *
	 * nd->nl_flags will be adjusted on return with NLC_APPENDONLY
	 * if the file is marked append-only, and NLC_STICKY if the directory
	 * containing the file is sticky.
	 */
	if (nch.ncp->nc_vp && (nd->nl_flags & NLC_ALLCHKS)) {
	    error = naccess(&nch, nd->nl_flags | dflags,
			    nd->nl_cred, NULL);
	    if (keeperror(nd, error)) {
		cache_put(&nch);
		break;
	    }
	}

	/*
	 * Termination: no more elements.
	 *
	 * If NLC_REFDVP is set acquire a referenced parent dvp.
	 */
	if (nd->nl_flags & NLC_REFDVP) {
		cache_lock(&nd->nl_nch);
		error = cache_vref(&nd->nl_nch, nd->nl_cred, &nd->nl_dvp);
		cache_unlock(&nd->nl_nch);
		if (keeperror(nd, error)) {
			kprintf("NLC_REFDVP: Cannot ref dvp of %p\n", nch.ncp);
			cache_put(&nch);
			break;
		}
	}
	if (nd->nl_flags & NLC_NCDIR) {
		cache_drop_ncdir(&nd->nl_nch);
		nd->nl_flags &= ~NLC_NCDIR;
	} else {
		cache_drop(&nd->nl_nch);
	}
	nd->nl_nch = nch;
	nd->nl_flags |= NLC_NCPISLOCKED;
	error = 0;
	break;
    }

    if (hit)
	++gd->gd_nchstats->ncs_longhits;
    else
	++gd->gd_nchstats->ncs_longmiss;

    if (nd->nl_flags & NLC_NCPISLOCKED)
	KKASSERT(cache_lockstatus(&nd->nl_nch) > 0);

    /*
     * Retry the whole thing if doretry flag is set, but only once.
     * autofs(5) may mount another filesystem under its root directory
     * while resolving a path.
     */
    if (doretry && !inretry) {
	inretry = TRUE;
	nd->nl_flags &= NLC_NCDIR;
	nd->nl_flags |= saveflag;
	goto nlookup_start;
    }

    /*
     * NOTE: If NLC_CREATE was set the ncp may represent a negative hit
     * (ncp->nc_error will be ENOENT), but we will still return an error
     * code of 0.
     */
    return(error);
}
Exemplo n.º 13
0
/*
 * Allocates a new node of type 'type' inside the 'tmp' mount point, with
 * its owner set to 'uid', its group to 'gid' and its mode set to 'mode',
 * using the credentials of the process 'p'.
 *
 * If the node type is set to 'VDIR', then the parent parameter must point
 * to the parent directory of the node being created.  It may only be NULL
 * while allocating the root node.
 *
 * If the node type is set to 'VBLK' or 'VCHR', then the rdev parameter
 * specifies the device the node represents.
 *
 * If the node type is set to 'VLNK', then the parameter target specifies
 * the file name of the target file for the symbolic link that is being
 * created.
 *
 * Note that new nodes are retrieved from the available list if it has
 * items or, if it is empty, from the node pool as long as there is enough
 * space to create them.
 *
 * Returns zero on success or an appropriate error code on failure.
 */
int
tmpfs_alloc_node(struct tmpfs_mount *tmp, enum vtype type,
		 uid_t uid, gid_t gid, mode_t mode,
		 char *target, int rmajor, int rminor,
		 struct tmpfs_node **node)
{
	struct tmpfs_node *nnode;
	struct timespec ts;
	udev_t rdev;

	KKASSERT(IFF(type == VLNK, target != NULL));
	KKASSERT(IFF(type == VBLK || type == VCHR, rmajor != VNOVAL));

	if (tmp->tm_nodes_inuse >= tmp->tm_nodes_max)
		return (ENOSPC);

	nnode = objcache_get(tmp->tm_node_pool, M_WAITOK | M_NULLOK);
	if (nnode == NULL)
		return (ENOSPC);

	/* Generic initialization. */
	nnode->tn_type = type;
	vfs_timestamp(&ts);
	nnode->tn_ctime = nnode->tn_mtime = nnode->tn_atime
		= ts.tv_sec;
	nnode->tn_ctimensec = nnode->tn_mtimensec = nnode->tn_atimensec
		= ts.tv_nsec;
	nnode->tn_uid = uid;
	nnode->tn_gid = gid;
	nnode->tn_mode = mode;
	nnode->tn_id = tmpfs_fetch_ino(tmp);
	nnode->tn_advlock.init_done = 0;
	KKASSERT(nnode->tn_links == 0);

	/* Type-specific initialization. */
	switch (nnode->tn_type) {
	case VBLK:
	case VCHR:
		rdev = makeudev(rmajor, rminor);
		if (rdev == NOUDEV) {
			objcache_put(tmp->tm_node_pool, nnode);
			return(EINVAL);
		}
		nnode->tn_rdev = rdev;
		break;

	case VDIR:
		RB_INIT(&nnode->tn_dir.tn_dirtree);
		RB_INIT(&nnode->tn_dir.tn_cookietree);
		nnode->tn_size = 0;
		break;

	case VFIFO:
		/* FALLTHROUGH */
	case VSOCK:
		break;

	case VLNK:
		nnode->tn_size = strlen(target);
		nnode->tn_link = kmalloc(nnode->tn_size + 1, tmp->tm_name_zone,
					 M_WAITOK | M_NULLOK);
		if (nnode->tn_link == NULL) {
			objcache_put(tmp->tm_node_pool, nnode);
			return (ENOSPC);
		}
		bcopy(target, nnode->tn_link, nnode->tn_size);
		nnode->tn_link[nnode->tn_size] = '\0';
		break;

	case VREG:
		nnode->tn_reg.tn_aobj = swap_pager_alloc(NULL, 0,
							 VM_PROT_DEFAULT, 0);
		nnode->tn_reg.tn_aobj_pages = 0;
		nnode->tn_size = 0;
		vm_object_set_flag(nnode->tn_reg.tn_aobj, OBJ_NOPAGEIN);
		break;

	default:
		panic("tmpfs_alloc_node: type %p %d", nnode, (int)nnode->tn_type);
	}

	TMPFS_NODE_LOCK(nnode);
	TMPFS_LOCK(tmp);
	LIST_INSERT_HEAD(&tmp->tm_nodes_used, nnode, tn_entries);
	tmp->tm_nodes_inuse++;
	TMPFS_UNLOCK(tmp);
	TMPFS_NODE_UNLOCK(nnode);

	*node = nnode;
	return 0;
}
Exemplo n.º 14
0
/*
 * Destroys the node pointed to by node from the file system 'tmp'.
 * If the node does not belong to the given mount point, the results are
 * unpredicted.
 *
 * If the node references a directory; no entries are allowed because
 * their removal could need a recursive algorithm, something forbidden in
 * kernel space.  Furthermore, there is not need to provide such
 * functionality (recursive removal) because the only primitives offered
 * to the user are the removal of empty directories and the deletion of
 * individual files.
 *
 * Note that nodes are not really deleted; in fact, when a node has been
 * allocated, it cannot be deleted during the whole life of the file
 * system.  Instead, they are moved to the available list and remain there
 * until reused.
 *
 * A caller must have TMPFS_NODE_LOCK(node) and this function unlocks it.
 */
void
tmpfs_free_node(struct tmpfs_mount *tmp, struct tmpfs_node *node)
{
	vm_pindex_t pages = 0;

#ifdef INVARIANTS
	TMPFS_ASSERT_ELOCKED(node);
	KKASSERT(node->tn_vnode == NULL);
	KKASSERT((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0);
#endif

	TMPFS_LOCK(tmp);
	LIST_REMOVE(node, tn_entries);
	tmp->tm_nodes_inuse--;
	TMPFS_UNLOCK(tmp);
	TMPFS_NODE_UNLOCK(node);  /* Caller has this lock */

	switch (node->tn_type) {
	case VNON:
		/* Do not do anything.  VNON is provided to let the
		 * allocation routine clean itself easily by avoiding
		 * duplicating code in it. */
		/* FALLTHROUGH */
	case VBLK:
		/* FALLTHROUGH */
	case VCHR:
		/* FALLTHROUGH */
		break;
	case VDIR:
		/*
		 * The parent link can be NULL if this is the root
		 * node or if it is a directory node that was rmdir'd.
		 *
		 * XXX what if node is a directory which still contains
		 * directory entries (e.g. due to a forced umount) ?
		 */
		node->tn_size = 0;
		KKASSERT(node->tn_dir.tn_parent == NULL);

		/*
		 * If the root node is being destroyed don't leave a
		 * dangling pointer in tmpfs_mount.
		 */
		if (node == tmp->tm_root)
			tmp->tm_root = NULL;
		break;
	case VFIFO:
		/* FALLTHROUGH */
	case VSOCK:
		break;

	case VLNK:
		kfree(node->tn_link, tmp->tm_name_zone);
		node->tn_link = NULL;
		node->tn_size = 0;
		break;

	case VREG:
		if (node->tn_reg.tn_aobj != NULL)
			vm_object_deallocate(node->tn_reg.tn_aobj);
		node->tn_reg.tn_aobj = NULL;
		pages = node->tn_reg.tn_aobj_pages;
		break;

	default:
		panic("tmpfs_free_node: type %p %d", node, (int)node->tn_type);
	}

	/*
	 * Clean up fields for the next allocation.  The objcache only ctors
	 * new allocations.
	 */
	tmpfs_node_ctor(node, NULL, 0);
	objcache_put(tmp->tm_node_pool, node);
	/* node is now invalid */

	if (pages)
		atomic_add_long(&tmp->tm_pages_used, -(long)pages);
}