struct vnode * vn_initialize( struct inode *inode) { struct vnode *vp = LINVFS_GET_VP(inode); XFS_STATS_INC(vn_active); XFS_STATS_INC(vn_alloc); vp->v_flag = VMODIFIED; spinlock_init(&vp->v_lock, "v_lock"); spin_lock(&vnumber_lock); if (!++vn_generation) /* v_number shouldn't be zero */ vn_generation++; vp->v_number = vn_generation; spin_unlock(&vnumber_lock); ASSERT(VN_CACHED(vp) == 0); /* Initialize the first behavior and the behavior chain head. */ vn_bhv_head_init(VN_BHV_HEAD(vp), "vnode"); #ifdef XFS_VNODE_TRACE vp->v_trace = ktrace_alloc(VNODE_TRACE_SIZE, KM_SLEEP); #endif /* XFS_VNODE_TRACE */ vn_trace_exit(vp, "vn_initialize", (inst_t *)__return_address); return vp; }
void osi_PostPopulateVCache(struct vcache *avc) { memset(&(avc->vc_bhv_desc), 0, sizeof(avc->vc_bhv_desc)); bhv_desc_init(&(avc->vc_bhv_desc), avc, avc, &Afs_vnodeops); #if defined(AFS_SGI65_ENV) vn_bhv_head_init(&(avc->v.v_bh), "afsvp"); vn_bhv_insert_initial(&(avc->v.v_bh), &(avc->vc_bhv_desc)); avc->v.v_mreg = avc->v.v_mregb = (struct pregion *)avc; # if defined(VNODE_TRACING) avc->v.v_trace = ktrace_alloc(VNODE_TRACE_SIZE, 0); # endif init_bitlock(&avc->v.v_pcacheflag, VNODE_PCACHE_LOCKBIT, "afs_pcache", avc->v.v_number); init_mutex(&avc->v.v_filocksem, MUTEX_DEFAULT, "afsvfl", (long)avc); init_mutex(&avc->v.v_buf_lock, MUTEX_DEFAULT, "afsvnbuf", (long)avc); #else bhv_head_init(&(avc->v.v_bh)); bhv_insert_initial(&(avc->v.v_bh), &(avc->vc_bhv_desc)); #endif vnode_pcache_init(&avc->v); #if defined(DEBUG) && defined(VNODE_INIT_BITLOCK) /* Above define is never true execpt in SGI test kernels. */ init_bitlock(&avc->v.v_flag, VLOCK, "vnode", avc->v.v_number); #endif #ifdef INTR_KTHREADS AFS_VN_INIT_BUF_LOCK(&(avc->v)); #endif vSetVfsp(avc, afs_globalVFS); vSetType(avc, VREG); VN_SET_DPAGES(&(avc->v), NULL); osi_Assert((avc->v.v_flag & VINACT) == 0); avc->v.v_flag = 0; osi_Assert(VN_GET_PGCNT(&(avc->v)) == 0); osi_Assert(avc->mapcnt == 0 && avc->vc_locktrips == 0); osi_Assert(avc->vc_rwlockid == OSI_NO_LOCKID); osi_Assert(avc->v.v_filocks == NULL); # if !defined(AFS_SGI65_ENV) osi_Assert(avc->v.v_filocksem == NULL); # endif osi_Assert(avc->cred == NULL); # if defined(AFS_SGI64_ENV) vnode_pcache_reinit(&avc->v); avc->v.v_rdev = NODEV; # endif vn_initlist((struct vnlist *)&avc->v); avc->lastr = 0; }
static int xfs_vn_allocate(xfs_mount_t *mp, xfs_inode_t *ip, struct xfs_vnode **vpp) { struct vnode *vp; struct xfs_vnode *vdata; int error; /* Use zone allocator here? */ vdata = kmem_zalloc(sizeof(*vdata), KM_SLEEP); error = getnewvnode("xfs", XVFSTOMNT(XFS_MTOVFS(mp)), &xfs_vnops, &vp); if (error) { kmem_free(vdata, sizeof(*vdata)); return (error); } vp->v_vnlock->lk_flags |= LK_CANRECURSE; vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread); error = insmntque(vp, XVFSTOMNT(XFS_MTOVFS(mp))); if (error != 0) { kmem_free(vdata, sizeof(*vdata)); return (error); } vp->v_data = (void *)vdata; vdata->v_number= 0; vdata->v_inode = ip; vdata->v_vfsp = XFS_MTOVFS(mp); vdata->v_vnode = vp; vn_bhv_head_init(VN_BHV_HEAD(vdata), "vnode"); #ifdef CONFIG_XFS_VNODE_TRACING vp->v_trace = ktrace_alloc(VNODE_TRACE_SIZE, KM_SLEEP); #endif /* CONFIG_XFS_VNODE_TRACING */ vn_trace_exit(vp, "vn_initialize", (inst_t *)__return_address); if (error == 0) *vpp = vdata; return (error); }