static int fuse_nfs_getattr(const char *path, struct stat *stbuf) { int ret = 0; memset(stbuf, 0, sizeof(struct stat)); ret = nfs_stat(nfs, path, stbuf); return ret; }
int CNFSConnection::stat(VFSURL* url, struct stat *statbuff) { PLATFORM::CLockObject lock(*this); int nfsRet = 0; std::string exportPath; std::string relativePath; struct nfs_context *pTmpContext = NULL; resolveHost(url->hostname); if(splitUrlIntoExportAndPath(url->hostname, url->filename, exportPath, relativePath)) { pTmpContext = nfs_init_context(); if(pTmpContext) { //we connect to the directory of the path. This will be the "root" path of this connection then. //So all fileoperations are relative to this mountpoint... nfsRet = nfs_mount(pTmpContext, m_resolvedHostName.c_str(), exportPath.c_str()); if(nfsRet == 0) { nfsRet = nfs_stat(pTmpContext, relativePath.c_str(), statbuff); } else { XBMC->Log(ADDON::LOG_ERROR,"NFS: Failed to mount nfs share: %s (%s)\n", exportPath.c_str(), nfs_get_error(m_pNfsContext)); } nfs_destroy_context(pTmpContext); XBMC->Log(ADDON::LOG_DEBUG,"NFS: Connected to server %s and export %s in tmpContext\n", url->hostname, exportPath.c_str()); } } return nfsRet; }
static void workspace_nfs_getattr(fuse_req_t req, struct entry_struct *entry, struct call_info_struct *call_info) { struct resource_struct *resource=call_info->object->resource; struct net_nfs_export_struct *nfs_export=(struct net_nfs_export_struct *) resource->data; struct nfs_context *nfs_ctx=(struct nfs_context *) nfs_export->data; char *path=call_info->pathinfo.path + call_info->relpath; struct stat st; int result=0; if (strlen(path)==0) path=(char *) rootpath; memset(&st, 0, sizeof(struct stat)); logoutput("workspace_nfs_getattr, path %s", path); pthread_mutex_lock(&nfs_export->mutex); result=nfs_stat(nfs_ctx, path, &st); pthread_mutex_unlock(&nfs_export->mutex); if (result<0) { fuse_reply_err(req, abs(result)); } else { struct inode_struct *inode=entry->inode; inode->mode=st.st_mode; inode->nlink=st.st_nlink; inode->uid=st.st_uid; inode->gid=st.st_gid; inode->rdev=st.st_rdev; inode->size=st.st_size; st.st_blksize=_DEFAULT_BLOCKSIZE; if (inode->size % st.st_blksize == 0) { st.st_blocks=inode->size / st.st_blksize; } else { st.st_blocks=1 + inode->size / st.st_blksize; } inode->mtim.tv_sec=st.st_mtim.tv_sec; inode->mtim.tv_nsec=st.st_mtim.tv_nsec; inode->ctim.tv_sec=st.st_ctim.tv_sec; inode->ctim.tv_nsec=st.st_ctim.tv_nsec; st.st_ino=inode->ino; st.st_dev=0; fuse_reply_attr(req, &st, fs_options.attr_timeout); } free_path_pathinfo(&call_info->pathinfo); }
static void workspace_nfs_mknod(fuse_req_t req, struct inode_struct *pinode, struct name_struct *xname, struct call_info_struct *call_info, mode_t mode, dev_t rdev) { struct resource_struct *resource=call_info->object->resource; struct net_nfs_export_struct *nfs_export=(struct net_nfs_export_struct *) resource->data; struct nfs_context *nfs_ctx=(struct nfs_context *) nfs_export->data; char *path=call_info->pathinfo.path + call_info->relpath; struct entry_struct *entry=NULL, *parent=pinode->alias; struct inode_struct *inode; logoutput("workspace_nfs_mknod, path %s", path); entry=create_entry(parent, xname); inode=create_inode(); if (entry && inode) { int result=0; inode->alias=entry; entry->inode=inode; pthread_mutex_lock(&nfs_export->mutex); result=nfs_mknod(nfs_ctx, path, mode, rdev); pthread_mutex_unlock(&nfs_export->mutex); if (result==0) { struct fuse_entry_param e; unsigned int error=0; struct stat st; memset(&st, 0, sizeof(struct stat)); add_inode_hashtable(inode, increase_inodes_workspace, (void *) call_info->object->workspace_mount); insert_entry(entry, &error, 0); adjust_pathmax(call_info->pathinfo.len); pthread_mutex_lock(&nfs_export->mutex); nfs_chmod(nfs_ctx, path, mode); nfs_stat(nfs_ctx, path, &st); pthread_mutex_unlock(&nfs_export->mutex); inode->nlookup=1; inode->mode=st.st_mode; inode->nlink=st.st_nlink; inode->uid=st.st_uid; inode->gid=st.st_gid; inode->rdev=st.st_rdev; inode->size=st.st_size; inode->mtim.tv_sec=st.st_mtim.tv_sec; inode->mtim.tv_nsec=st.st_mtim.tv_nsec; inode->ctim.tv_sec=st.st_ctim.tv_sec; inode->ctim.tv_nsec=st.st_ctim.tv_nsec; e.ino = inode->ino; e.generation = 1; e.attr_timeout = fs_options.attr_timeout; e.entry_timeout = fs_options.entry_timeout; e.attr.st_ino = e.ino; e.attr.st_mode = st.st_mode; e.attr.st_nlink = st.st_nlink; e.attr.st_uid = st.st_uid; e.attr.st_gid = st.st_gid; e.attr.st_rdev = st.st_rdev; e.attr.st_atim.tv_sec = st.st_atim.tv_sec; e.attr.st_atim.tv_nsec = st.st_atim.tv_nsec; e.attr.st_mtim.tv_sec = st.st_mtim.tv_sec; e.attr.st_mtim.tv_nsec = st.st_mtim.tv_nsec; e.attr.st_ctim.tv_sec = st.st_ctim.tv_sec; e.attr.st_ctim.tv_nsec = st.st_ctim.tv_nsec; e.attr.st_blksize=_DEFAULT_BLOCKSIZE; if (inode->size % e.attr.st_blksize == 0) { e.attr.st_blocks=inode->size / e.attr.st_blksize; } else { e.attr.st_blocks=1 + inode->size / e.attr.st_blksize; } fuse_reply_entry(req, &e); } else { /* error nfs create */ destroy_entry(entry); free(inode); fuse_reply_err(req, abs(result)); } } else { /* not enough memory to allocate entry and/or inode */ if (entry) { destroy_entry(entry); entry=NULL; } if (inode) { free(inode); inode=NULL; } fuse_reply_err(req, ENOMEM); } free_path_pathinfo(&call_info->pathinfo); }
static void workspace_nfs_lookup_cached(fuse_req_t req, struct entry_struct *entry, struct call_info_struct *call_info) { struct resource_struct *resource=call_info->object->resource; struct net_nfs_export_struct *nfs_export=(struct net_nfs_export_struct *) resource->data; struct nfs_context *nfs_ctx=(struct nfs_context *) nfs_export->data; char *path=call_info->pathinfo.path + call_info->relpath; struct stat st; int result=0; memset(&st, 0, sizeof(struct stat)); if (strlen(path)==0) path=(char *) rootpath; logoutput("workspace_nfs_lookup_cached, path %s", path); pthread_mutex_lock(&nfs_export->mutex); result=nfs_stat(nfs_ctx, path, &st); pthread_mutex_unlock(&nfs_export->mutex); if (result<0) { result=abs(result); if (result==ENOENT) { struct inode_struct *inode=entry->inode; unsigned int error=0; inode=entry->inode; inode->alias=NULL; remove_entry(entry, &error); queue_remove(call_info->object, entry, &error); entry=NULL; } fuse_reply_err(req, result); } else { struct fuse_entry_param e; struct inode_struct *inode=entry->inode; inode->mode=st.st_mode; inode->nlink=st.st_nlink; inode->uid=st.st_uid; inode->gid=st.st_gid; inode->rdev=st.st_rdev; inode->mtim.tv_sec=st.st_mtim.tv_sec; inode->mtim.tv_nsec=st.st_mtim.tv_nsec; inode->ctim.tv_sec=st.st_ctim.tv_sec; inode->ctim.tv_nsec=st.st_ctim.tv_nsec; inode->size=st.st_size; e.ino = inode->ino; e.generation = 1; e.attr_timeout = fs_options.attr_timeout; e.entry_timeout = fs_options.entry_timeout; get_current_time(&entry->synctime); e.attr.st_dev = 0; e.attr.st_ino = e.ino; e.attr.st_mode = st.st_mode; e.attr.st_nlink = st.st_nlink; e.attr.st_uid = st.st_uid; e.attr.st_gid = st.st_gid; e.attr.st_rdev = st.st_rdev; e.attr.st_size = st.st_size; e.attr.st_atim.tv_sec = st.st_atim.tv_sec; e.attr.st_atim.tv_nsec = st.st_atim.tv_nsec; e.attr.st_mtim.tv_sec = st.st_mtim.tv_sec; e.attr.st_mtim.tv_nsec = st.st_mtim.tv_nsec; e.attr.st_ctim.tv_sec = st.st_ctim.tv_sec; e.attr.st_ctim.tv_nsec = st.st_ctim.tv_nsec; e.attr.st_blksize=_DEFAULT_BLOCKSIZE; if (inode->size % e.attr.st_blksize == 0) { e.attr.st_blocks = inode->size / e.attr.st_blksize; } else { e.attr.st_blocks = 1 + inode->size / e.attr.st_blksize; } fuse_reply_entry(req, &e); } free_path_pathinfo(&call_info->pathinfo); }