Ejemplo n.º 1
0
struct vnode *
vn_initialize(
	struct inode	*inode)
{
	struct vnode	*vp = LINVFS_GET_VP(inode);

	XFS_STATS_INC(vn_active);
	XFS_STATS_INC(vn_alloc);

	vp->v_flag = VMODIFIED;
	spinlock_init(&vp->v_lock, "v_lock");

	spin_lock(&vnumber_lock);
	if (!++vn_generation)	/* v_number shouldn't be zero */
		vn_generation++;
	vp->v_number = vn_generation;
	spin_unlock(&vnumber_lock);

	ASSERT(VN_CACHED(vp) == 0);

	/* Initialize the first behavior and the behavior chain head. */
	vn_bhv_head_init(VN_BHV_HEAD(vp), "vnode");

#ifdef	XFS_VNODE_TRACE
	vp->v_trace = ktrace_alloc(VNODE_TRACE_SIZE, KM_SLEEP);
#endif	/* XFS_VNODE_TRACE */

	vn_trace_exit(vp, "vn_initialize", (inst_t *)__return_address);
	return vp;
}
/*
 * Allocate and initialize a dquot. We don't always allocate fresh memory;
 * we try to reclaim a free dquot if the number of incore dquots are above
 * a threshold.
 * The only field inside the core that gets initialized at this point
 * is the d_id field. The idea is to fill in the entire q_core
 * when we read in the on disk dquot.
 */
STATIC xfs_dquot_t *
xfs_qm_dqinit(
	xfs_mount_t  *mp,
	xfs_dqid_t   id,
	uint	     type)
{
	xfs_dquot_t	*dqp;
	boolean_t	brandnewdquot;

	brandnewdquot = xfs_qm_dqalloc_incore(&dqp);
	dqp->dq_flags = type;
	dqp->q_core.d_id = cpu_to_be32(id);
	dqp->q_mount = mp;

	/*
	 * No need to re-initialize these if this is a reclaimed dquot.
	 */
	if (brandnewdquot) {
		dqp->dq_flnext = dqp->dq_flprev = dqp;
		mutex_init(&dqp->q_qlock);
		initnsema(&dqp->q_flock, 1, "fdq");
		sv_init(&dqp->q_pinwait, SV_DEFAULT, "pdq");

#ifdef XFS_DQUOT_TRACE
		dqp->q_trace = ktrace_alloc(DQUOT_TRACE_SIZE, KM_SLEEP);
		xfs_dqtrace_entry(dqp, "DQINIT");
#endif
	} else {
		/*
		 * Only the q_core portion was zeroed in dqreclaim_one().
		 * So, we need to reset others.
		 */
		 dqp->q_nrefs = 0;
		 dqp->q_blkno = 0;
		 dqp->MPL_NEXT = dqp->HL_NEXT = NULL;
		 dqp->HL_PREVP = dqp->MPL_PREVP = NULL;
		 dqp->q_bufoffset = 0;
		 dqp->q_fileoffset = 0;
		 dqp->q_transp = NULL;
		 dqp->q_gdquot = NULL;
		 dqp->q_res_bcount = 0;
		 dqp->q_res_icount = 0;
		 dqp->q_res_rtbcount = 0;
		 dqp->q_pincount = 0;
		 dqp->q_hash = NULL;
		 ASSERT(dqp->dq_flnext == dqp->dq_flprev);

#ifdef XFS_DQUOT_TRACE
		 ASSERT(dqp->q_trace);
		 xfs_dqtrace_entry(dqp, "DQRECLAIMED_INIT");
#endif
	 }

	/*
	 * log item gets initialized later
	 */
	return (dqp);
}
Ejemplo n.º 3
0
/*
 * xfs_filestream_init() is called at xfs initialisation time to set up the
 * memory zone that will be used for filestream data structure allocation.
 */
int
xfs_filestream_init(void)
{
	item_zone = kmem_zone_init(sizeof(fstrm_item_t), "fstrm_item");
#ifdef XFS_FILESTREAMS_TRACE
	xfs_filestreams_trace_buf = ktrace_alloc(XFS_FSTRM_KTRACE_SIZE, KM_SLEEP);
#endif
	return item_zone ? 0 : -ENOMEM;
}
Ejemplo n.º 4
0
void
osi_PostPopulateVCache(struct vcache *avc) {
    memset(&(avc->vc_bhv_desc), 0, sizeof(avc->vc_bhv_desc));
    bhv_desc_init(&(avc->vc_bhv_desc), avc, avc, &Afs_vnodeops);

#if defined(AFS_SGI65_ENV)
    vn_bhv_head_init(&(avc->v.v_bh), "afsvp");
    vn_bhv_insert_initial(&(avc->v.v_bh), &(avc->vc_bhv_desc));
    avc->v.v_mreg = avc->v.v_mregb = (struct pregion *)avc;
# if defined(VNODE_TRACING)
    avc->v.v_trace = ktrace_alloc(VNODE_TRACE_SIZE, 0);
# endif
    init_bitlock(&avc->v.v_pcacheflag, VNODE_PCACHE_LOCKBIT, "afs_pcache",
		 avc->v.v_number);
    init_mutex(&avc->v.v_filocksem, MUTEX_DEFAULT, "afsvfl", (long)avc);
    init_mutex(&avc->v.v_buf_lock, MUTEX_DEFAULT, "afsvnbuf", (long)avc);
#else
    bhv_head_init(&(avc->v.v_bh));
    bhv_insert_initial(&(avc->v.v_bh), &(avc->vc_bhv_desc));
#endif

    vnode_pcache_init(&avc->v);

#if defined(DEBUG) && defined(VNODE_INIT_BITLOCK)
    /* Above define is never true execpt in SGI test kernels. */
    init_bitlock(&avc->v.v_flag, VLOCK, "vnode", avc->v.v_number);
#endif

#ifdef INTR_KTHREADS
    AFS_VN_INIT_BUF_LOCK(&(avc->v));
#endif

    vSetVfsp(avc, afs_globalVFS);
    vSetType(avc, VREG);

    VN_SET_DPAGES(&(avc->v), NULL);
    osi_Assert((avc->v.v_flag & VINACT) == 0);
    avc->v.v_flag = 0;
    osi_Assert(VN_GET_PGCNT(&(avc->v)) == 0);
    osi_Assert(avc->mapcnt == 0 && avc->vc_locktrips == 0);
    osi_Assert(avc->vc_rwlockid == OSI_NO_LOCKID);
    osi_Assert(avc->v.v_filocks == NULL);
# if !defined(AFS_SGI65_ENV)
    osi_Assert(avc->v.v_filocksem == NULL);
# endif
    osi_Assert(avc->cred == NULL);
# if defined(AFS_SGI64_ENV)
    vnode_pcache_reinit(&avc->v);
    avc->v.v_rdev = NODEV;
# endif
    vn_initlist((struct vnlist *)&avc->v);
    avc->lastr = 0;
}
Ejemplo n.º 5
0
static int
xfs_vn_allocate(xfs_mount_t *mp, xfs_inode_t *ip, struct xfs_vnode **vpp)
{
	struct vnode *vp;
	struct xfs_vnode *vdata;
	int error;

	/* Use zone allocator here? */
	vdata = kmem_zalloc(sizeof(*vdata), KM_SLEEP);

	error = getnewvnode("xfs", XVFSTOMNT(XFS_MTOVFS(mp)),
			    &xfs_vnops, &vp);
	if (error) {
		kmem_free(vdata, sizeof(*vdata));
		return (error);
	}

	vp->v_vnlock->lk_flags |= LK_CANRECURSE;
	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
	error = insmntque(vp, XVFSTOMNT(XFS_MTOVFS(mp)));
	if (error != 0) {
		kmem_free(vdata, sizeof(*vdata));
		return (error);
	}

	vp->v_data = (void *)vdata;
	vdata->v_number= 0;
	vdata->v_inode = ip;
	vdata->v_vfsp  = XFS_MTOVFS(mp);
	vdata->v_vnode = vp;

 	vn_bhv_head_init(VN_BHV_HEAD(vdata), "vnode");


#ifdef  CONFIG_XFS_VNODE_TRACING
        vp->v_trace = ktrace_alloc(VNODE_TRACE_SIZE, KM_SLEEP);
#endif  /* CONFIG_XFS_VNODE_TRACING */

        vn_trace_exit(vp, "vn_initialize", (inst_t *)__return_address);

	if (error == 0)
		*vpp = vdata;

	return (error);
}
Ejemplo n.º 6
0
/*
 * Allocate and initialise an xfs_inode.
 */
STATIC struct xfs_inode *
xfs_inode_alloc(
	struct xfs_mount	*mp,
	xfs_ino_t		ino)
{
	struct xfs_inode	*ip;

	/*
	 * if this didn't occur in transactions, we could use
	 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
	 * code up to do this anyway.
	 */
	ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
	if (!ip)
		return NULL;

	ASSERT(atomic_read(&ip->i_iocount) == 0);
	ASSERT(atomic_read(&ip->i_pincount) == 0);
	ASSERT(!spin_is_locked(&ip->i_flags_lock));
	ASSERT(completion_done(&ip->i_flush));

	/* initialise the xfs inode */
	ip->i_ino = ino;
	ip->i_mount = mp;
	memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
	ip->i_afp = NULL;
	memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
	ip->i_flags = 0;
	ip->i_update_core = 0;
	ip->i_update_size = 0;
	ip->i_delayed_blks = 0;
	memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
	ip->i_size = 0;
	ip->i_new_size = 0;

	/*
	 * Initialize inode's trace buffers.
	 */
#ifdef	XFS_INODE_TRACE
	ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_NOFS);
#endif
#ifdef XFS_BMAP_TRACE
	ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_NOFS);
#endif
#ifdef XFS_BTREE_TRACE
	ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_NOFS);
#endif
#ifdef XFS_RW_TRACE
	ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_NOFS);
#endif
#ifdef XFS_ILOCK_TRACE
	ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_NOFS);
#endif
#ifdef XFS_DIR2_TRACE
	ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS);
#endif
	/*
	* Now initialise the VFS inode. We do this after the xfs_inode
	* initialisation as internal failures will result in ->destroy_inode
	* being called and that will pass down through the reclaim path and
	* free the XFS inode. This path requires the XFS inode to already be
	* initialised. Hence if this call fails, the xfs_inode has already
	* been freed and we should not reference it at all in the error
	* handling.
	*/
	if (!inode_init_always(mp->m_super, VFS_I(ip)))
		return NULL;

	/* prevent anyone from using this yet */
	VFS_I(ip)->i_state = I_NEW|I_LOCK;

	return ip;
}
Ejemplo n.º 7
0
int
xfs_init(void)
{
	extern kmem_zone_t	*xfs_bmap_free_item_zone;
	extern kmem_zone_t	*xfs_btree_cur_zone;
	extern kmem_zone_t	*xfs_trans_zone;
	extern kmem_zone_t	*xfs_buf_item_zone;
	extern kmem_zone_t	*xfs_dabuf_zone;
#ifdef XFS_DABUF_DEBUG
	extern lock_t	        xfs_dabuf_global_lock;
	spinlock_init(&xfs_dabuf_global_lock, "xfsda");
#endif

	/*
	 * Initialize all of the zone allocators we use.
	 */
	xfs_bmap_free_item_zone = kmem_zone_init(sizeof(xfs_bmap_free_item_t),
						 "xfs_bmap_free_item");
	xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t),
					    "xfs_btree_cur");
	xfs_inode_zone = kmem_zone_init(sizeof(xfs_inode_t), "xfs_inode");
	xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans");
	xfs_da_state_zone =
		kmem_zone_init(sizeof(xfs_da_state_t), "xfs_da_state");
	xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf");

	/*
	 * The size of the zone allocated buf log item is the maximum
	 * size possible under XFS.  This wastes a little bit of memory,
	 * but it is much faster.
	 */
	xfs_buf_item_zone =
		kmem_zone_init((sizeof(xfs_buf_log_item_t) +
				(((XFS_MAX_BLOCKSIZE / XFS_BLI_CHUNK) /
				  NBWORD) * sizeof(int))),
			       "xfs_buf_item");
	xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) +
				       ((XFS_EFD_MAX_FAST_EXTENTS - 1) * sizeof(xfs_extent_t))),
				      "xfs_efd_item");
	xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) +
				       ((XFS_EFI_MAX_FAST_EXTENTS - 1) * sizeof(xfs_extent_t))),
				      "xfs_efi_item");
	xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork");
	xfs_ili_zone = kmem_zone_init(sizeof(xfs_inode_log_item_t), "xfs_ili");
	xfs_chashlist_zone = kmem_zone_init(sizeof(xfs_chashlist_t),
					    "xfs_chashlist");
	_ACL_ZONE_INIT(xfs_acl_zone, "xfs_acl");

	/*
	 * Allocate global trace buffers.
	 */
#ifdef XFS_ALLOC_TRACE
	xfs_alloc_trace_buf = ktrace_alloc(XFS_ALLOC_TRACE_SIZE, KM_SLEEP);
#endif
#ifdef XFS_BMAP_TRACE
	xfs_bmap_trace_buf = ktrace_alloc(XFS_BMAP_TRACE_SIZE, KM_SLEEP);
#endif
#ifdef XFS_BMBT_TRACE
	xfs_bmbt_trace_buf = ktrace_alloc(XFS_BMBT_TRACE_SIZE, KM_SLEEP);
#endif
#ifdef XFS_DIR_TRACE
	xfs_dir_trace_buf = ktrace_alloc(XFS_DIR_TRACE_SIZE, KM_SLEEP);
#endif
#ifdef XFS_ATTR_TRACE
	xfs_attr_trace_buf = ktrace_alloc(XFS_ATTR_TRACE_SIZE, KM_SLEEP);
#endif
#ifdef XFS_DIR2_TRACE
	xfs_dir2_trace_buf = ktrace_alloc(XFS_DIR2_GTRACE_SIZE, KM_SLEEP);
#endif

	xfs_dir_startup();

#if (defined(DEBUG) || defined(INDUCE_IO_ERROR))
	xfs_error_test_init();
#endif /* DEBUG || INDUCE_IO_ERROR */

	xfs_init_procfs();
	xfs_sysctl_register();
	return 0;
}
Ejemplo n.º 8
0
/*
 * Allocate and initialize a dquot. We don't always allocate fresh memory;
 * we try to reclaim a free dquot if the number of incore dquots are above
 * a threshold.
 * The only field inside the core that gets initialized at this point
 * is the d_id field. The idea is to fill in the entire q_core
 * when we read in the on disk dquot.
 */
STATIC xfs_dquot_t *
xfs_qm_dqinit(
	xfs_mount_t  *mp,
	xfs_dqid_t   id,
	uint	     type)
{
	xfs_dquot_t	*dqp;
	boolean_t	brandnewdquot;

	brandnewdquot = xfs_qm_dqalloc_incore(&dqp);
	dqp->dq_flags = type;
	dqp->q_core.d_id = cpu_to_be32(id);
	dqp->q_mount = mp;

	/*
	 * No need to re-initialize these if this is a reclaimed dquot.
	 */
	if (brandnewdquot) {
		dqp->dq_flnext = dqp->dq_flprev = dqp;
		mutex_init(&dqp->q_qlock);
		init_waitqueue_head(&dqp->q_pinwait);

		/*
		 * Because we want to use a counting completion, complete
		 * the flush completion once to allow a single access to
		 * the flush completion without blocking.
		 */
		init_completion(&dqp->q_flush);
		complete(&dqp->q_flush);

#ifdef XFS_DQUOT_TRACE
		dqp->q_trace = ktrace_alloc(DQUOT_TRACE_SIZE, KM_NOFS);
		xfs_dqtrace_entry(dqp, "DQINIT");
#endif
	} else {
		/*
		 * Only the q_core portion was zeroed in dqreclaim_one().
		 * So, we need to reset others.
		 */
		 dqp->q_nrefs = 0;
		 dqp->q_blkno = 0;
		 dqp->MPL_NEXT = dqp->HL_NEXT = NULL;
		 dqp->HL_PREVP = dqp->MPL_PREVP = NULL;
		 dqp->q_bufoffset = 0;
		 dqp->q_fileoffset = 0;
		 dqp->q_transp = NULL;
		 dqp->q_gdquot = NULL;
		 dqp->q_res_bcount = 0;
		 dqp->q_res_icount = 0;
		 dqp->q_res_rtbcount = 0;
		 atomic_set(&dqp->q_pincount, 0);
		 dqp->q_hash = NULL;
		 ASSERT(dqp->dq_flnext == dqp->dq_flprev);

#ifdef XFS_DQUOT_TRACE
		 ASSERT(dqp->q_trace);
		 xfs_dqtrace_entry(dqp, "DQRECLAIMED_INIT");
#endif
	}

	/*
	 * In either case we need to make sure group quotas have a different
	 * lock class than user quotas, to make sure lockdep knows we can
	 * locks of one of each at the same time.
	 */
	if (!(type & XFS_DQ_USER))
		lockdep_set_class(&dqp->q_qlock, &xfs_dquot_other_class);

	/*
	 * log item gets initialized later
	 */
	return (dqp);
}