Esempio n. 1
0
static void fifo_reinit_vp(vnode_t *vp)
{
	vn_reinit(vp);
	vp->v_type = VFIFO;
	vp->v_flag &= VROOT;
	vp->v_flag |= VNOMAP | VNOSWAP;
}
Esempio n. 2
0
vnode_t *
vn_alloc(int kmflag)
{
	vnode_t *vp;

	vp = kmem_cache_alloc(vn_cache, kmflag);

	if (vp != NULL) {
		vn_reinit(vp);
	}

	return (vp);
}
Esempio n. 3
0
int
zfs_create_share_dir(zfs_sb_t *zsb, dmu_tx_t *tx)
{
#ifdef HAVE_SMB_SHARE
	zfs_acl_ids_t acl_ids;
	vattr_t vattr;
	znode_t *sharezp;
	vnode_t *vp;
	znode_t *zp;
	int error;

	vattr.va_mask = AT_MODE|AT_UID|AT_GID|AT_TYPE;
	vattr.va_mode = S_IFDIR | 0555;
	vattr.va_uid = crgetuid(kcred);
	vattr.va_gid = crgetgid(kcred);

	sharezp = kmem_cache_alloc(znode_cache, KM_SLEEP);
	sharezp->z_moved = 0;
	sharezp->z_unlinked = 0;
	sharezp->z_atime_dirty = 0;
	sharezp->z_zfsvfs = zfsvfs;
	sharezp->z_is_sa = zfsvfs->z_use_sa;

	vp = ZTOV(sharezp);
	vn_reinit(vp);
	vp->v_type = VDIR;

	VERIFY(0 == zfs_acl_ids_create(sharezp, IS_ROOT_NODE, &vattr,
	    kcred, NULL, &acl_ids));
	zfs_mknode(sharezp, &vattr, tx, kcred, IS_ROOT_NODE, &zp, &acl_ids);
	ASSERT3P(zp, ==, sharezp);
	ASSERT(!vn_in_dnlc(ZTOV(sharezp))); /* not valid to move */
	POINTER_INVALIDATE(&sharezp->z_zfsvfs);
	error = zap_add(zfsvfs->z_os, MASTER_NODE_OBJ,
	    ZFS_SHARES_DIR, 8, 1, &sharezp->z_id, tx);
	zfsvfs->z_shares_dir = sharezp->z_id;

	zfs_acl_ids_free(&acl_ids);
	// ZTOV(sharezp)->v_count = 0;
	sa_handle_destroy(sharezp->z_sa_hdl);
	kmem_cache_free(znode_cache, sharezp);

	return (error);
#else
	return (0);
#endif /* HAVE_SMB_SHARE */
}
Esempio n. 4
0
vnode_t *vn_alloc(int kmflag)
{
    ASSERT(kmflag == 0 || kmflag == UMEM_NOFAIL);

    vnode_t *vp;

    vp = kmem_cache_alloc(vnode_cache, kmflag);

    /* taken from vn_cache_constructor */
    mutex_init(&vp->v_lock, NULL, MUTEX_DEFAULT, NULL);
    rwst_init(&vp->v_vfsmhlock.ve_lock, NULL, RW_DEFAULT, NULL);

    if(vp != NULL) {
        vp->v_path = NULL;
        vp->v_data = NULL;
        vn_reinit(vp);
    }

    /*fprintf(stderr, "VNode %p alloc'ed\n", vp);*/
    return vp;
}
Esempio n. 5
0
/*
 * Allocate a new lxproc node
 *
 * This also allocates the vnode associated with it
 */
lxpr_node_t *
lxpr_getnode(vnode_t *dp, lxpr_nodetype_t type, proc_t *p, int fd)
{
	lxpr_node_t *lxpnp;
	vnode_t *vp;
	user_t *up;
	timestruc_t now;

	/*
	 * Allocate a new node. It is deallocated in vop_innactive
	 */
	lxpnp = kmem_cache_alloc(lxpr_node_cache, KM_SLEEP);

	/*
	 * Set defaults (may be overridden below)
	 */
	gethrestime(&now);
	lxpnp->lxpr_type = type;
	lxpnp->lxpr_realvp = NULL;
	lxpnp->lxpr_parent = dp;
	VN_HOLD(dp);
	if (p != NULL) {
		lxpnp->lxpr_pid = ((p->p_pid ==
		    curproc->p_zone->zone_proc_initpid) ? 1 : p->p_pid);

		lxpnp->lxpr_time = PTOU(p)->u_start;
		lxpnp->lxpr_uid = crgetruid(p->p_cred);
		lxpnp->lxpr_gid = crgetrgid(p->p_cred);
		lxpnp->lxpr_ino = lxpr_inode(type, p->p_pid, fd);
	} else {
		/* Pretend files without a proc belong to sched */
		lxpnp->lxpr_pid = 0;
		lxpnp->lxpr_time = now;
		lxpnp->lxpr_uid = lxpnp->lxpr_gid = 0;
		lxpnp->lxpr_ino = lxpr_inode(type, 0, 0);
	}

	/* initialize the vnode data */
	vp = lxpnp->lxpr_vnode;
	vn_reinit(vp);
	vp->v_flag = VNOCACHE|VNOMAP|VNOSWAP|VNOMOUNT;
	vp->v_vfsp = dp->v_vfsp;

	/*
	 * Do node specific stuff
	 */
	switch (type) {
	case LXPR_PROCDIR:
		vp->v_flag |= VROOT;
		vp->v_type = VDIR;
		lxpnp->lxpr_mode = 0555;	/* read-search by everyone */
		break;

	case LXPR_PID_CURDIR:
		ASSERT(p != NULL);

		/*
		 * Zombie check.  p_stat is officially protected by pidlock,
		 * but we can't grab pidlock here because we already hold
		 * p_lock.  Luckily if we look at the process exit code
		 * we see that p_stat only transisions from SRUN to SZOMB
		 * while p_lock is held.  Aside from this, the only other
		 * p_stat transition that we need to be aware about is
		 * SIDL to SRUN, but that's not a problem since lxpr_lock()
		 * ignores nodes in the SIDL state so we'll never get a node
		 * that isn't already in the SRUN state.
		 */
		if (p->p_stat == SZOMB) {
			lxpnp->lxpr_realvp = NULL;
		} else {
			up = PTOU(p);
			lxpnp->lxpr_realvp = up->u_cdir;
			ASSERT(lxpnp->lxpr_realvp != NULL);
			VN_HOLD(lxpnp->lxpr_realvp);
		}
		vp->v_type = VLNK;
		lxpnp->lxpr_mode = 0777;	/* anyone does anything ! */
		break;

	case LXPR_PID_ROOTDIR:
		ASSERT(p != NULL);
		/* Zombie check.  see locking comment above */
		if (p->p_stat == SZOMB) {
			lxpnp->lxpr_realvp = NULL;
		} else {
			up = PTOU(p);
			lxpnp->lxpr_realvp =
			    up->u_rdir != NULL ? up->u_rdir : rootdir;
			ASSERT(lxpnp->lxpr_realvp != NULL);
			VN_HOLD(lxpnp->lxpr_realvp);
		}
		vp->v_type = VLNK;
		lxpnp->lxpr_mode = 0777;	/* anyone does anything ! */
		break;

	case LXPR_PID_EXE:
		ASSERT(p != NULL);
		lxpnp->lxpr_realvp = p->p_exec;
		if (lxpnp->lxpr_realvp != NULL) {
			VN_HOLD(lxpnp->lxpr_realvp);
		}
		vp->v_type = VLNK;
		lxpnp->lxpr_mode = 0777;
		break;

	case LXPR_SELF:
		vp->v_type = VLNK;
		lxpnp->lxpr_mode = 0777;	/* anyone does anything ! */
		break;

	case LXPR_PID_FD_FD:
		ASSERT(p != NULL);
		/* lxpr_realvp is set after we return */
		vp->v_type = VLNK;
		lxpnp->lxpr_mode = 0700;	/* read-write-exe owner only */
		break;

	case LXPR_PID_FDDIR:
		ASSERT(p != NULL);
		vp->v_type = VDIR;
		lxpnp->lxpr_mode = 0500;	/* read-search by owner only */
		break;

	case LXPR_PIDDIR:
		ASSERT(p != NULL);
		vp->v_type = VDIR;
		lxpnp->lxpr_mode = 0511;
		break;

	case LXPR_SYSDIR:
	case LXPR_SYS_FSDIR:
	case LXPR_SYS_FS_INOTIFYDIR:
	case LXPR_SYS_KERNELDIR:
	case LXPR_NETDIR:
		vp->v_type = VDIR;
		lxpnp->lxpr_mode = 0555;	/* read-search by all */
		break;

	case LXPR_PID_ENV:
	case LXPR_PID_MEM:
		ASSERT(p != NULL);
		/*FALLTHRU*/
	case LXPR_KCORE:
		vp->v_type = VREG;
		lxpnp->lxpr_mode = 0400;	/* read-only by owner only */
		break;

	default:
		vp->v_type = VREG;
		lxpnp->lxpr_mode = 0444;	/* read-only by all */
		break;
	}

	return (lxpnp);
}
Esempio n. 6
0
/*
 * Construct a new znode/vnode and intialize.
 *
 * This does not do a call to dmu_set_user() that is
 * up to the caller to do, in case you don't want to
 * return the znode
 */
static znode_t *
zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz)
{
	znode_t	*zp;
	vnode_t *vp;

	zp = kmem_cache_alloc(znode_cache, KM_SLEEP);
	zfs_znode_cache_constructor(zp, zfsvfs->z_parent->z_vfs, 0);

	ASSERT(zp->z_dirlocks == NULL);
	ASSERT(zp->z_dbuf == NULL);
	ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs));

	/*
	 * Defer setting z_zfsvfs until the znode is ready to be a candidate for
	 * the zfs_znode_move() callback.
	 */
	zp->z_phys = NULL;
	zp->z_unlinked = 0;
	zp->z_atime_dirty = 0;
	zp->z_mapcnt = 0;
	zp->z_last_itx = 0;
	zp->z_id = db->db_object;
	zp->z_blksz = blksz;
	zp->z_seq = 0x7A4653;
	zp->z_sync_cnt = 0;

	vp = ZTOV(zp);
#ifdef TODO
	vn_reinit(vp);
#endif

	zfs_znode_dmu_init(zfsvfs, zp, db);

	zp->z_gen = zp->z_phys->zp_gen;

#if 0
	if (vp == NULL)
		return (zp);
#endif

	vp->v_type = IFTOVT((mode_t)zp->z_phys->zp_mode);
	switch (vp->v_type) {
	case VDIR:
		zp->z_zn_prefetch = B_TRUE; /* z_prefetch default is enabled */
		break;
	case VFIFO:
		vp->v_op = &zfs_fifoops;
		break;
	}
	if (vp->v_type != VFIFO)
		VN_LOCK_ASHARE(vp);

	mutex_enter(&zfsvfs->z_znodes_lock);
	list_insert_tail(&zfsvfs->z_all_znodes, zp);
	membar_producer();
	/*
	 * Everything else must be valid before assigning z_zfsvfs makes the
	 * znode eligible for zfs_znode_move().
	 */
	zp->z_zfsvfs = zfsvfs;
	mutex_exit(&zfsvfs->z_znodes_lock);

	VFS_HOLD(zfsvfs->z_vfs);
	return (zp);
}
static vnode_t *
make_rnode4(nfs4_sharedfh_t *fh, r4hashq_t *rhtp, struct vfs *vfsp,
    struct vnodeops *vops,
    int (*putapage)(vnode_t *, page_t *, u_offset_t *, size_t *, int, cred_t *),
    int *newnode, cred_t *cr)
{
	rnode4_t *rp;
	rnode4_t *trp;
	vnode_t *vp;
	mntinfo4_t *mi;

	ASSERT(RW_READ_HELD(&rhtp->r_lock));

	mi = VFTOMI4(vfsp);

start:
	if ((rp = r4find(rhtp, fh, vfsp)) != NULL) {
		vp = RTOV4(rp);
		*newnode = 0;
		return (vp);
	}
	rw_exit(&rhtp->r_lock);

	mutex_enter(&rp4freelist_lock);

	if (rp4freelist != NULL && rnode4_new >= nrnode) {
		rp = rp4freelist;
		rp4_rmfree(rp);
		mutex_exit(&rp4freelist_lock);

		vp = RTOV4(rp);

		if (rp->r_flags & R4HASHED) {
			rw_enter(&rp->r_hashq->r_lock, RW_WRITER);
			mutex_enter(&vp->v_lock);
			if (vp->v_count > 1) {
				vp->v_count--;
				mutex_exit(&vp->v_lock);
				rw_exit(&rp->r_hashq->r_lock);
				rw_enter(&rhtp->r_lock, RW_READER);
				goto start;
			}
			mutex_exit(&vp->v_lock);
			rp4_rmhash_locked(rp);
			rw_exit(&rp->r_hashq->r_lock);
		}

		r4inactive(rp, cr);

		mutex_enter(&vp->v_lock);
		if (vp->v_count > 1) {
			vp->v_count--;
			mutex_exit(&vp->v_lock);
			rw_enter(&rhtp->r_lock, RW_READER);
			goto start;
		}
		mutex_exit(&vp->v_lock);
		vn_invalid(vp);

		/*
		 * destroy old locks before bzero'ing and
		 * recreating the locks below.
		 */
		uninit_rnode4(rp);

		/*
		 * Make sure that if rnode is recycled then
		 * VFS count is decremented properly before
		 * reuse.
		 */
		VFS_RELE(vp->v_vfsp);
		vn_reinit(vp);
	} else {
		vnode_t *new_vp;

		mutex_exit(&rp4freelist_lock);

		rp = kmem_cache_alloc(rnode4_cache, KM_SLEEP);
		new_vp = vn_alloc(KM_SLEEP);

		atomic_add_long((ulong_t *)&rnode4_new, 1);
#ifdef DEBUG
		clstat4_debug.nrnode.value.ui64++;
#endif
		vp = new_vp;
	}

	bzero(rp, sizeof (*rp));
	rp->r_vnode = vp;
	nfs_rw_init(&rp->r_rwlock, NULL, RW_DEFAULT, NULL);
	nfs_rw_init(&rp->r_lkserlock, NULL, RW_DEFAULT, NULL);
	mutex_init(&rp->r_svlock, NULL, MUTEX_DEFAULT, NULL);
	mutex_init(&rp->r_statelock, NULL, MUTEX_DEFAULT, NULL);
	mutex_init(&rp->r_statev4_lock, NULL, MUTEX_DEFAULT, NULL);
	mutex_init(&rp->r_os_lock, NULL, MUTEX_DEFAULT, NULL);
	rp->created_v4 = 0;
	list_create(&rp->r_open_streams, sizeof (nfs4_open_stream_t),
	    offsetof(nfs4_open_stream_t, os_node));
	rp->r_lo_head.lo_prev_rnode = &rp->r_lo_head;
	rp->r_lo_head.lo_next_rnode = &rp->r_lo_head;
	cv_init(&rp->r_cv, NULL, CV_DEFAULT, NULL);
	cv_init(&rp->r_commit.c_cv, NULL, CV_DEFAULT, NULL);
	rp->r_flags = R4READDIRWATTR;
	rp->r_fh = fh;
	rp->r_hashq = rhtp;
	sfh4_hold(rp->r_fh);
	rp->r_server = mi->mi_curr_serv;
	rp->r_deleg_type = OPEN_DELEGATE_NONE;
	rp->r_deleg_needs_recovery = OPEN_DELEGATE_NONE;
	nfs_rw_init(&rp->r_deleg_recall_lock, NULL, RW_DEFAULT, NULL);

	rddir4_cache_create(rp);
	rp->r_putapage = putapage;
	vn_setops(vp, vops);
	vp->v_data = (caddr_t)rp;
	vp->v_vfsp = vfsp;
	VFS_HOLD(vfsp);
	vp->v_type = VNON;
	if (isrootfh(fh, rp))
		vp->v_flag = VROOT;
	vn_exists(vp);

	/*
	 * There is a race condition if someone else
	 * alloc's the rnode while no locks are held, so we
	 * check again and recover if found.
	 */
	rw_enter(&rhtp->r_lock, RW_WRITER);
	if ((trp = r4find(rhtp, fh, vfsp)) != NULL) {
		vp = RTOV4(trp);
		*newnode = 0;
		rw_exit(&rhtp->r_lock);
		rp4_addfree(rp, cr);
		rw_enter(&rhtp->r_lock, RW_READER);
		return (vp);
	}
	rp4_addhash(rp);
	*newnode = 1;
	return (vp);
}