struct dentry * vnlayer_decode_fh( SUPER_T *sb, __u32 *fh, int len, /* counted in units of 4-bytes */ int fhtype, int (*acceptable)(void *context, struct dentry *de), void *context ) { MDKI_FID_T *lfidp, *plfidp; DENT_T *dp; int error, fidlen; struct svc_export *exp = context; /* XXX cheating! */ SUPER_T *realsb = exp->ex_dentry->d_inode->i_sb; fidlen = fhtype >> 1; if (fidlen == 0) return ERR_PTR(-EINVAL); lfidp = KMEM_ALLOC(MDKI_FID_ALLOC_LEN(fidlen), KM_SLEEP); if (lfidp == NULL) return ERR_PTR(-ENOMEM); plfidp = KMEM_ALLOC(MDKI_FID_ALLOC_LEN(fidlen), KM_SLEEP); if (plfidp == NULL) { KMEM_FREE(lfidp, MDKI_FID_ALLOC_LEN(fidlen)); return ERR_PTR(-ENOMEM); } error = vnlayer_unpack_fh(fh, len, fhtype, fidlen, lfidp, plfidp); if (error == 0) { /* * We've extracted the identifying details from the * client-provided fid. Now use the system routines to handle * dentry tree work, it will call back to * sb->s_export_op->get_dentry to interpret either the parent * or the object. */ dp = (*realsb->s_export_op->find_exported_dentry)(realsb, lfidp, plfidp, acceptable, context); if (IS_ERR(dp)) { MDKI_VFS_LOG(VFS_LOG_ESTALE, "%s: pid %d call to find_exported_dentry returned error %ld\n", __FUNCTION__, current->pid, PTR_ERR(dp)); } } else { dp = ERR_PTR(error); } KMEM_FREE(lfidp, MDKI_FID_ALLOC_LEN(fidlen)); KMEM_FREE(plfidp, MDKI_FID_ALLOC_LEN(fidlen)); return dp; }
void vnode_iop_put_link( struct dentry *dentry, struct nameidata *nd, void *cookie) { KMEM_FREE(cookie, PATH_MAX); return; }
void hxge_destroy_kstats(p_hxge_t hxgep) { int channel; p_hxge_dma_pt_cfg_t p_dma_cfgp; p_hxge_hw_pt_cfg_t p_cfgp; HXGE_DEBUG_MSG((hxgep, KST_CTL, "==> hxge_destroy_kstats")); if (hxgep->statsp == NULL) return; if (hxgep->statsp->ksp) kstat_delete(hxgep->statsp->ksp); p_dma_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config; p_cfgp = (p_hxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config; for (channel = 0; channel < p_cfgp->max_rdcs; channel++) { if (hxgep->statsp->rdc_ksp[channel]) { kstat_delete(hxgep->statsp->rdc_ksp[channel]); } } for (channel = 0; channel < p_cfgp->max_tdcs; channel++) { if (hxgep->statsp->tdc_ksp[channel]) { kstat_delete(hxgep->statsp->tdc_ksp[channel]); } } if (hxgep->statsp->rdc_sys_ksp) kstat_delete(hxgep->statsp->rdc_sys_ksp); if (hxgep->statsp->tdc_sys_ksp) kstat_delete(hxgep->statsp->tdc_sys_ksp); if (hxgep->statsp->peu_sys_ksp) kstat_delete(hxgep->statsp->peu_sys_ksp); if (hxgep->statsp->mmac_ksp) kstat_delete(hxgep->statsp->mmac_ksp); if (hxgep->statsp->pfc_ksp) kstat_delete(hxgep->statsp->pfc_ksp); if (hxgep->statsp->vmac_ksp) kstat_delete(hxgep->statsp->vmac_ksp); if (hxgep->statsp->port_ksp) kstat_delete(hxgep->statsp->port_ksp); if (hxgep->statsp) KMEM_FREE(hxgep->statsp, hxgep->statsp->stats_size); HXGE_DEBUG_MSG((hxgep, KST_CTL, "<== hxge_destroy_kstats")); }
int #endif vnode_iop_follow_link( DENT_T *dentry, /* link */ struct nameidata *nd /* link resolution */ ) { INODE_T *ip; struct uio uio; iovec_t iov; int err = 0; char *buf = KMEM_ALLOC(PATH_MAX, KM_SLEEP); CALL_DATA_T cd; if (buf == NULL) #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) return ERR_PTR(-ENOMEM); #else return -ENOMEM; #endif uio.uio_iov = &iov; mfs_uioset(&uio, buf, PATH_MAX-1, 0, UIO_SYSSPACE); mdki_linux_init_call_data(&cd); ip = dentry->d_inode; ASSERT_KERNEL_UNLOCKED(); ASSERT_I_SEM_NOT_MINE(ip); err = VOP_READLINK(ITOV(ip), &uio, &cd); err = mdki_errno_unix_to_linux(err); mdki_linux_destroy_call_data(&cd); if (err == 0) { if (uio.uio_resid == 0) err = -ENAMETOOLONG; else { /* readlink doesn't copy a NUL at the end, we must do it */ buf[uio.uio_offset] = '\0'; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) /* follow the link */ err = vfs_follow_link(nd, buf); #else nd_set_link(nd, buf); return(buf); /* vnop_iop_put_link() will free this buf. */ #endif } } KMEM_FREE(buf, PATH_MAX); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) return ERR_PTR(err); #else return(err); #endif }
void mvfs_rddir_cache_destroy(struct mfs_mnode *mnp) { ASSERT(MFS_ISVOB(mnp)); /* mnode lock is not necessary (nor is it held). This routine is only called from mnode destroy code, so the mnode can't be found by other processes. */ /* ASSERT(MISLOCKED(mnp)); */ MDB_XLOG((MDB_MNOPS, "rddir cache destroy mnp %lx\n", mnp)); if (mnp->mn_vob.rddir_cache) { mvfs_rddir_cache_empty(mnp); KMEM_FREE(mnp->mn_vob.rddir_cache, RDDIR_CACHE_SIZE(mnp->mn_vob.rddir_cache)); mnp->mn_vob.rddir_cache = NULL; } }
STATIC void mvfs_rddir_cache_empty(struct mfs_mnode *mnp) { register int i; struct mvfs_rce *ep; for (i = 0, ep = &mnp->mn_vob.rddir_cache->entries[0]; i < mnp->mn_vob.rddir_cache->nentries; i++, ep++) { if (ep->valid) { if (ep->block != NULL) KMEM_FREE(ep->block, ep->bsize); ep->valid = FALSE; ep->block = NULL; } } }
void mvfs_rddir_cache_enter_mnlocked( struct mfs_mnode *mnp, struct mvfs_rce *entryp ) { register int i; register struct mvfs_rce *ep; register mvfs_common_data_t *mcdp = MDKI_COMMON_GET_DATAP(); ASSERT(MFS_ISVOB(mnp)); ASSERT(MISLOCKED(mnp)); if (!mcdp->mvfs_rdcenabled) { return; } if (mnp->mn_vob.rddir_cache == NULL) { mnp->mn_vob.rddir_cache = (struct mvfs_rddir_cache *)KMEM_ALLOC( RDDIR_CACHE_SIZE_N(mcdp->mvfs_rddir_blocks), KM_SLEEP|KM_PAGED); if (mnp->mn_vob.rddir_cache == NULL) { MDB_XLOG((MDB_MNOPS, "mvfs_rddir_cache_enter: Failed to allocate memory for " "rddir cache, not caching dirents for mnp = " "%"KS_FMT_PTR_T"\n", mnp)); return; } mnp->mn_vob.rddir_cache->nentries = mcdp->mvfs_rddir_blocks; for (i = 0, ep = &mnp->mn_vob.rddir_cache->entries[0]; i < mnp->mn_vob.rddir_cache->nentries; i++, ep++) { ep->valid = FALSE; ep->block = NULL; } } if (entryp->offset == (MOFFSET_T)0) { /* always use first entry for offset 0 (try to keep it around * since it probably has `.' and `..') */ ep = &mnp->mn_vob.rddir_cache->entries[0]; } else { /* If cache has an unused slot, use it. * * If it's full, replace the last entry. readdir() is almost * always used sequentially and traverses the entire * directory, so if it won't all fit, leave at least the first * portion in cache with the hope that it will find `..' in a * cached block (for pwd) */ for (i = 1, ep = &mnp->mn_vob.rddir_cache->entries[0]; i < mnp->mn_vob.rddir_cache->nentries; i++, ep++) { if (!ep[1].valid) { ep++; break; } } } if (ep->valid && ep->block != NULL) { KMEM_FREE(ep->block, ep->bsize); } *ep = *entryp; MDB_XLOG((MDB_MNOPS, "rddir cache enter mnp %lx off %lx size %lx\n", mnp, ep->offset, ep->size)); }
/* * NFS access to vnode file systems. * * We provide dentry/inode_to_fh() and fh_to_dentry() methods so that the * vnode-based file system can hook up its VOP_FID() and VFS_VGET() * methods. The Linux NFS server calls these methods when encoding an * object into a file handle to be passed to the client for future * use, and when decoding a file handle and looking for the file * system object it describes. * * VOP_FID() takes a vnode and provides a file ID (fid) that can later * be presented (in a pair with a VFS pointer) to VFS_VGET() to * reconstitute that vnode. In a Sun ONC-NFS style kernel, VOP_FID() * is used twice per file handle, once for the exported directory and * once for the object itself. In Linux, the NFS layer itself handles * the export tree checking (depending on the status of * NFSEXP_NOSUBTREECHECK), so the file system only needs to fill in * the file handle with details for the object itself. We always * provide both object and parent in the file handle to be sure that * we don't end up short on file handle space in a future call that * requires both. * * On a call from the NFS client, the Linux NFS layer finds a * superblock pointer from the file handle passed by the NFS client, * then calls the fh_to_dentry() method to get a dentry. Sun ONC-NFS * kernels call VFS_VGET() on a vfsp, passing the FID portion of the * file handle. In this layer, we unpack the file handle, determine * whether the parent or the object is needed, and pass the info along * to a VFS_VGET() call. Once that returns, we look for an attached * dentry and use it, or fabricate a new one which NFS will attempt to * reconnect to the namespace. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) int vnlayer_inode_to_fh( struct inode *inode, __u32 *fh, int *lenp, struct inode *parent ) #else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) */ int vnlayer_dentry_to_fh( struct dentry *dent, __u32 *fh, int *lenp, int need_parent ) #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) */ { int error; int type; int mylen; MDKI_FID_T *lfidp = NULL; MDKI_FID_T *parent_fidp = NULL; mdki_boolean_t bailout_needed = TRUE; /* Assume we'll fail. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27) SUPER_T *sbp; #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) struct inode *inode = dent->d_inode; struct inode *parent = dent->d_parent->d_inode; #endif /* * We use the type byte (return value) to encode the FH length. Since we * always include two FIDs of the same size, the type must be even, so * that's how we "encode" the length of each FID (i.e. it is half the total * length). * * Always include parent entry; this makes sure that we only work with NFS * protocols that have enough room for our file handles. (Without this, we * may return a directory file handle OK yet be unable to return a plain * file handle.) Currently, we can just barely squeeze two standard * 10-byte vnode FIDs into the NFS v2 file handle. The NFS v3 handle has * plenty of room. */ ASSERT(ITOV(inode)); error = VOP_FID(ITOV(inode), &lfidp); if (error != 0) { ASSERT(lfidp == NULL); goto bailout; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) /* we may be called with a NULL parent */ if (parent == NULL) { /* in this case, fabricate a fake parent */ parent_fidp = (MDKI_FID_T *) KMEM_ALLOC(MDKI_FID_LEN(lfidp), KM_SLEEP); if (parent_fidp == NULL) { MDKI_VFS_LOG(VFS_LOG_ERR, "%s: can't allocate %d bytes\n", __func__, (int) MDKI_FID_LEN(lfidp)); goto bailout; } memset(parent_fidp, 0xff, MDKI_FID_LEN(lfidp)); parent_fidp->fid_len = lfidp->fid_len; } else #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) */ { error = VOP_FID(ITOV(parent), &parent_fidp); if (error != 0) { ASSERT(parent_fidp == NULL); goto bailout; } } /* * Our encoding scheme can't tolerate different length FIDs * (because otherwise the type wouldn't be guaranteed to be even). */ if (parent_fidp->fid_len != lfidp->fid_len) { MDKI_VFS_LOG(VFS_LOG_ERR, "%s: unbalanced parent/child fid lengths: %d, %d\n", __func__, parent_fidp->fid_len, lfidp->fid_len); goto bailout; } /* * vnode layer needs to release the storage for a fid on * Linux. The VOP_FID() function allocates its own fid in * non-error cases. Other UNIX systems release this storage * in the caller of VOP_FID, so we have to do it here. We * copy the vnode-style fid into the caller-allocated space, * then free our allocated version here. * * Remember: vnode lengths are counting bytes, Linux lengths count __u32 * units. */ type = parent_fidp->fid_len + lfidp->fid_len; /* Guaranteed even. */ mylen = roundup(type + MDKI_FID_EXTRA_SIZE, sizeof(*fh)); if (mylen == VNODE_NFS_FH_TYPE_RESERVED || mylen >= VNODE_NFS_FH_TYPE_ERROR) { MDKI_VFS_LOG(VFS_LOG_ESTALE, "%s: required length %d out of range (%d,%d)\n", __func__, mylen, VNODE_NFS_FH_TYPE_RESERVED, VNODE_NFS_FH_TYPE_ERROR); goto bailout; } if (((*lenp) * sizeof(*fh)) < mylen) { MDKI_VFS_LOG(VFS_LOG_ESTALE, "%s: need %d bytes for FH, have %d\n", __func__, mylen, (int) (sizeof(*fh) * (*lenp))); goto bailout; } /* Copy FIDs into file handle. */ *lenp = mylen / sizeof(*fh); /* No remainder because of roundup above. */ BZERO(fh, mylen); /* Zero whole fh to round up to __u32 boundary */ BCOPY(lfidp->fid_data, fh, lfidp->fid_len); BCOPY(parent_fidp->fid_data, ((caddr_t)fh) + (type / 2), parent_fidp->fid_len); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27) /* * For 64 bits OS, use a 32 bits hash of the SB pointer. * For 32 bits OS, use the pointer itself. */ if (ITOV(inode) == NULL || ITOV(inode)->v_vfsmnt == NULL) { MDKI_VFS_LOG(VFS_LOG_ESTALE, "%s: %p is this a MVFS inode?\n", __func__, inode); goto bailout; } else { sbp = ((struct vfsmount *)ITOV(inode)->v_vfsmnt)->mnt_sb; } MDKI_FID_SET_SB_HASH(fh, type / 2, MDKI_FID_CALC_HASH(sbp)); #endif bailout_needed = FALSE; /* We're home free now. */ if (bailout_needed) { bailout: type = VNODE_NFS_FH_TYPE_ERROR; *lenp = 0; } #ifdef KMEMDEBUG if (lfidp != NULL) REAL_KMEM_FREE(lfidp, MDKI_FID_LEN(lfidp)); if (parent_fidp != NULL) REAL_KMEM_FREE(parent_fidp, MDKI_FID_LEN(parent_fidp)); #else if (lfidp != NULL) KMEM_FREE(lfidp, MDKI_FID_LEN(lfidp)); if (parent_fidp != NULL) KMEM_FREE(parent_fidp, MDKI_FID_LEN(parent_fidp)); #endif return type; }
int vnlayer_fill_super( SUPER_T *super_p, void *data_p, int silent ) { INODE_T *ino_p; VNODE_T *rootvp; VATTR_T va; VFS_T *vfsp; int err = 0; CALL_DATA_T cd; ASSERT_KERNEL_LOCKED(); /* sys_mount() */ ASSERT_SB_MOUNT_LOCKED_W(super_p); /* can't assert on mount_sem, we don't have access to it. */ if (vnlayer_vfs_opvec == NULL) { MDKI_VFS_LOG(VFS_LOG_ERR, "%s: VFS operation not set yet " "(no file system module loaded?)\n", __func__); err = -ENODATA; goto return_NULL; } if (MDKI_INOISOURS(vnlayer_get_urdir_inode())) { /* can't handle this case */ MDKI_VFS_LOG(VFS_LOG_ERR, "%s: can't handle mounts inside setview.\n", __func__); err = -EINVAL; goto return_NULL; } /* * The only fields we have coming in are s_type and s_flags. */ /* Verify this */ super_p->s_blocksize = MVFS_DEF_BLKSIZE; super_p->s_blocksize_bits = MVFS_DEF_BLKSIZE_BITS; super_p->s_maxbytes = MVFS_DEF_MAX_FILESIZE; super_p->s_op = &mvfs_super_ops; super_p->s_export_op = &vnlayer_export_ops; super_p->dq_op = NULL; super_p->s_magic = MVFS_SUPER_MAGIC; /* * XXX This module is currently restricted to one client file system * type at a time, as registered via the vnlayer_vfs_opvec. */ vfsp = KMEM_ALLOC(sizeof(*vfsp), KM_SLEEP); if (vfsp == NULL) { MDKI_VFS_LOG(VFS_LOG_ERR, "%s failed: no memory\n", __func__); SET_SBTOVFS(super_p, NULL); err = -ENOMEM; goto return_NULL; } BZERO(vfsp, sizeof(*vfsp)); SET_VFSTOSB(vfsp, super_p); SET_SBTOVFS(super_p, vfsp); vfsp->vfs_op = vnlayer_vfs_opvec; /* XXX fill in more of vfsp (flag?) */ if (super_p->s_flags & MS_RDONLY) vfsp->vfs_flag |= VFS_RDONLY; if (super_p->s_flags & MS_NOSUID) vfsp->vfs_flag |= VFS_NOSUID; err = vnlayer_linux_mount(vfsp, data_p); if (err) { goto bailout; } /* * Now create our dentry and set that up in the superblock. Get * the inode from the vnode at the root of the file system, and * attach it to a new dentry. */ mdki_linux_init_call_data(&cd); err = VFS_ROOT(SBTOVFS(super_p), &rootvp); if (err) { err = mdki_errno_unix_to_linux(err); (void) VFS_UNMOUNT(vfsp,&cd); mdki_linux_destroy_call_data(&cd); goto bailout; } ino_p = VTOI(rootvp); #ifdef CONFIG_FS_POSIX_ACL /* If the system supports ACLs, we set the flag in the superblock * depending on the ability of the underlying filesystem */ if (vfsp->vfs_flag & VFS_POSIXACL) { super_p->s_flags |= MS_POSIXACL; } #endif /* * Call getattr() to prime this inode with real attributes via the * callback to mdki_linux_vattr_pullup() */ VATTR_NULL(&va); /* ignore error code, we're committed */ (void) VOP_GETATTR(rootvp, &va, 0, &cd); /* This will allocate a dentry with a name of /, which is * what Linux uses in all filesystem roots. The dentry is * also not put on the hash chains because Linux does not * hash file system roots. It finds them through the super * blocks. */ super_p->s_root = VNODE_D_ALLOC_ROOT(ino_p); if (super_p->s_root) { if (VFSTOSB(vnlayer_looproot_vp->v_vfsp) == super_p) { /* loopback names are done with regular dentry ops */ MDKI_SET_DOPS(super_p->s_root, &vnode_dentry_ops); } else { /* * setview names come in via VOB mounts, they're marked * with setview dentry ops */ MDKI_SET_DOPS(super_p->s_root, &vnode_setview_dentry_ops); } super_p->s_root->d_fsdata = NULL; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) atomic_set(&super_p->s_root->d_count, 1); #endif /* d_alloc_root assumes that the caller will take care of * bumping the inode count for the dentry. So we will oblige */ igrab(ino_p); } else { VN_RELE(rootvp); (void) VFS_UNMOUNT(vfsp,&cd); mdki_linux_destroy_call_data(&cd); err = -ENOMEM; goto bailout; } mdki_linux_destroy_call_data(&cd); #if LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0) super_p->s_dirt = 1; /* we want to be called on write_super/sync() */ #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0) */ #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,38) /* write back is delegated to the undelying fs */ super_p->s_bdi = &noop_backing_dev_info; #endif /* * release reference on rootvp--super block holds appropriate * references now */ VN_RELE(rootvp); return(0); bailout: MDKI_VFS_LOG(VFS_LOG_ERR, "%s failed: error %d\n", __func__, vnlayer_errno_linux_to_unix(err)); SET_SBTOVFS(super_p, NULL); KMEM_FREE(vfsp, sizeof(*vfsp)); return_NULL: return(err); }
/* Common file handle decoding for both parent and dentry */ static struct dentry * vnlayer_decode_fh( SUPER_T *sb, struct fid *fh, int len, /* counted in units of 4-bytes */ int fhtype, int is_parent) { MDKI_FID_T *lfidp; DENT_T *dp; int error, fidlen; SUPER_T *realsb; unsigned realsb_hash; fidlen = fhtype >> 1; if (fidlen == 0) { return ERR_PTR(-EINVAL); } if (len * 4 < MDKI_FID_LEN_WITH_HASH(fidlen)) { MDKI_VFS_LOG(VFS_LOG_ESTALE, "%s: FH too small to be a MVFS FH\n", __FUNCTION__); return ERR_PTR(-EINVAL); } lfidp = KMEM_ALLOC(MDKI_FID_ALLOC_LEN(fidlen), KM_SLEEP); if (lfidp == NULL) { return ERR_PTR(-ENOMEM); } if (is_parent) { error = vnlayer_unpack_fh((__u32 *)fh, len, fhtype, fidlen, NULL, lfidp); } else { error = vnlayer_unpack_fh((__u32 *)fh, len, fhtype, fidlen, lfidp, NULL); } if (error == 0) { realsb_hash = MDKI_FID_SB_HASH(fh, fidlen); /* * Search in the VOB mount list for the super_block we encoded. * If the result is not NULL, the superblock was locked with * MDKI_LOCK_SB and must be unlocked with MDKI_UNLOCK_SB. */ realsb = (SUPER_T *) mvfs_find_mount(vnlayer_eval_mount, &realsb_hash); if (realsb != NULL) { /* * It found a matching VOB mount to this hash, we will leave to * vnlayer_get_dentry decides wether we can trust this FID, * it should be able to smell any staleness. */ dp = vnlayer_get_dentry(realsb, lfidp); MDKI_UNLOCK_SB(realsb); if (IS_ERR(dp)) { MDKI_VFS_LOG(VFS_LOG_ESTALE, "%s: pid %d vnlayer_get_dentry returned error %ld\n", __FUNCTION__, current->pid, PTR_ERR(dp)); } } else { dp = ERR_PTR(-EINVAL); MDKI_VFS_LOG(VFS_LOG_ESTALE, "%s SB not found, hash=%08x\n", __FUNCTION__, realsb_hash); } } else { dp = ERR_PTR(error); } KMEM_FREE(lfidp, MDKI_FID_ALLOC_LEN(fidlen)); return dp; }
int #endif vnode_shadow_iop_follow_link( DENT_T *dentry, /* entry we are trying to resolve */ struct nameidata *nd /* Contains parent dentry */ ) { int err = 0; int len = PATH_MAX; char *buff; mm_segment_t old_fs; /* Because we provide a kernel buffer. */ INODE_T *real_inode; DENT_T *real_dentry; VNODE_T *cvp; /* this function must consume a reference on base */ /* We only path_release on error. */ err = 0; real_dentry = REALDENTRY_LOCKED(dentry, &cvp); if (real_dentry == NULL) { err = -ENOENT; MDKI_PATH_RELEASE(nd); goto out_nolock; } VNODE_DGET(real_dentry); /* protect inode */ if (real_dentry->d_inode == NULL) { /* delete race */ err = -ENOENT; MDKI_PATH_RELEASE(nd); goto out; } real_inode = real_dentry->d_inode; /* If there are no underlying symlink functions, we are done */ if (real_inode->i_op && real_inode->i_op->readlink && real_inode->i_op->follow_link) { buff = KMEM_ALLOC(len, KM_SLEEP); if (!buff) { MDKI_PATH_RELEASE(nd); err = -ENOMEM; goto out; } /* We're providing a kernel buffer to copy into, so let everyone know. */ old_fs = get_fs(); set_fs(KERNEL_DS); err = vnode_shadow_iop_readlink(dentry, buff, len); set_fs(old_fs); if (err < 0) { KMEM_FREE(buff, len); MDKI_PATH_RELEASE(nd); goto out; } /* done with dentry */ /* Make sure string is null terminated */ buff[err] = 0; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) err = vfs_follow_link(nd, buff); KMEM_FREE(buff,len); #else VNODE_DPUT(real_dentry); REALDENTRY_UNLOCK(dentry, cvp); nd_set_link(nd, buff); return(buff); /* vnop_iop_put_link() will free this buf. */ #endif } out: VNODE_DPUT(real_dentry); REALDENTRY_UNLOCK(dentry, cvp); out_nolock: #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) return ERR_PTR(err); #else return(err); #endif }