int nfs_Create(nfs_arg_t *parg, exportlist_t *pexport, fsal_op_context_t *pcontext, nfs_worker_data_t *pworker, struct svc_req *preq, nfs_res_t *pres) { char *str_file_name = NULL; fsal_name_t file_name; fsal_accessmode_t mode = 0; cache_entry_t *file_pentry = NULL; cache_entry_t *parent_pentry = NULL; fsal_attrib_list_t parent_attr; fsal_attrib_list_t attr; fsal_attrib_list_t attr_parent_after; fsal_attrib_list_t attr_newfile; fsal_attrib_list_t attributes_create; fsal_attrib_list_t *ppre_attr; cache_inode_status_t cache_status = CACHE_INODE_SUCCESS; cache_inode_status_t cache_status_lookup; cache_inode_file_type_t parent_filetype; int rc = NFS_REQ_OK; #ifdef _USE_QUOTA fsal_status_t fsal_status ; #endif if(isDebug(COMPONENT_NFSPROTO)) { char str[LEN_FH_STR]; switch (preq->rq_vers) { case NFS_V2: str_file_name = parg->arg_create2.where.name; break; case NFS_V3: str_file_name = parg->arg_create3.where.name; break; } nfs_FhandleToStr(preq->rq_vers, &(parg->arg_create2.where.dir), &(parg->arg_create3.where.dir), NULL, str); LogDebug(COMPONENT_NFSPROTO, "REQUEST PROCESSING: Calling nfs_Create handle: %s name: %s", str, str_file_name); } if((preq->rq_vers == NFS_V3) && (nfs3_Is_Fh_Xattr(&(parg->arg_create3.where.dir)))) { rc = nfs3_Create_Xattr(parg, pexport, pcontext, preq, pres); goto out; } if(preq->rq_vers == NFS_V3) { /* to avoid setting it on each error case */ pres->res_create3.CREATE3res_u.resfail.dir_wcc.before.attributes_follow = FALSE; pres->res_create3.CREATE3res_u.resfail.dir_wcc.after.attributes_follow = FALSE; ppre_attr = NULL; } if((parent_pentry = nfs_FhandleToCache(preq->rq_vers, &(parg->arg_create2.where.dir), &(parg->arg_create3.where.dir), NULL, &(pres->res_dirop2.status), &(pres->res_create3.status), NULL, &parent_attr, pcontext, &rc)) == NULL) { /* Stale NFS FH ? */ goto out; } /* get directory attributes before action (for V3 reply) */ ppre_attr = &parent_attr; /* Extract the filetype */ parent_filetype = cache_inode_fsal_type_convert(parent_attr.type); /* * Sanity checks: new file name must be non-null; parent must be a * directory. */ if(parent_filetype != DIRECTORY) { switch (preq->rq_vers) { case NFS_V2: pres->res_dirop2.status = NFSERR_NOTDIR; break; case NFS_V3: pres->res_create3.status = NFS3ERR_NOTDIR; break; } rc = NFS_REQ_OK; goto out; } switch (preq->rq_vers) { case NFS_V2: str_file_name = parg->arg_create2.where.name; if(parg->arg_create2.attributes.mode != (unsigned int)-1) { mode = unix2fsal_mode(parg->arg_create2.attributes.mode); } else { mode = 0; } break; case NFS_V3: str_file_name = parg->arg_create3.where.name; if(parg->arg_create3.how.mode == EXCLUSIVE) { /* * Client has not provided mode information. * If the create works, the client will issue * a separate setattr request to fix up the * file's mode, so pick arbitrary value for now. */ mode = 0; } else if(parg->arg_create3.how.createhow3_u.obj_attributes.mode.set_it == TRUE) mode = unix2fsal_mode(parg->arg_create3.how.createhow3_u.obj_attributes.mode. set_mode3_u.mode); else mode = 0; break; } #ifdef _USE_QUOTA /* if quota support is active, then we should check is the FSAL allows inode creation or not */ fsal_status = FSAL_check_quota( pexport->fullpath, FSAL_QUOTA_INODES, FSAL_OP_CONTEXT_TO_UID( pcontext ) ) ; if( FSAL_IS_ERROR( fsal_status ) ) { switch (preq->rq_vers) { case NFS_V2: pres->res_dirop2.status = NFSERR_DQUOT ; break; case NFS_V3: pres->res_create3.status = NFS3ERR_DQUOT; break; } rc = NFS_REQ_OK ; goto out; } #endif /* _USE_QUOTA */ // if(str_file_name == NULL || strlen(str_file_name) == 0) if(str_file_name == NULL || *str_file_name == '\0' ) { if(preq->rq_vers == NFS_V2) pres->res_dirop2.status = NFSERR_IO; if(preq->rq_vers == NFS_V3) pres->res_create3.status = NFS3ERR_INVAL; } else { if((cache_status = cache_inode_error_convert(FSAL_str2name(str_file_name, FSAL_MAX_NAME_LEN, &file_name))) == CACHE_INODE_SUCCESS) { /* * Lookup file to see if it exists. If so, use it. Otherwise * create a new one. */ file_pentry = cache_inode_lookup(parent_pentry, &file_name, &attr, pcontext, &cache_status_lookup); if((cache_status_lookup == CACHE_INODE_NOT_FOUND) || ((cache_status_lookup == CACHE_INODE_SUCCESS) && (parg->arg_create3.how.mode == UNCHECKED))) { /* Create the file */ if((parg->arg_create3.how.mode == UNCHECKED) && (cache_status_lookup == CACHE_INODE_SUCCESS)) { cache_status = CACHE_INODE_SUCCESS; attr_newfile = attr; } else file_pentry = cache_inode_create(parent_pentry, &file_name, REGULAR_FILE, mode, NULL, &attr_newfile, pcontext, &cache_status); if(file_pentry != NULL) { /* * Look at sattr to see if some attributes are to be set at creation time */ attributes_create.asked_attributes = 0ULL; switch (preq->rq_vers) { case NFS_V2: if(nfs2_Sattr_To_FSALattr(&attributes_create, &parg->arg_create2.attributes) == 0) { pres->res_dirop2.status = NFSERR_IO; rc = NFS_REQ_OK; goto out; break; } break; case NFS_V3: if(nfs3_Sattr_To_FSALattr(&attributes_create, &parg->arg_create3.how.createhow3_u. obj_attributes) == 0) { pres->res_create3.status = NFS3ERR_INVAL; rc = NFS_REQ_OK; goto out; } break; } /* Mode is managed above (in cache_inode_create), there is no need * to manage it */ if(attributes_create.asked_attributes & FSAL_ATTR_MODE) attributes_create.asked_attributes &= ~FSAL_ATTR_MODE; /* Some clients (like Solaris 10) try to set the size of the file to 0 * at creation time. The FSAL create empty file, so we ignore this */ if(attributes_create.asked_attributes & FSAL_ATTR_SIZE) attributes_create.asked_attributes &= ~FSAL_ATTR_SIZE; if(attributes_create.asked_attributes & FSAL_ATTR_SPACEUSED) attributes_create.asked_attributes &= ~FSAL_ATTR_SPACEUSED; /* Are there attributes to be set (additional to the mode) ? */ if(attributes_create.asked_attributes != 0ULL && attributes_create.asked_attributes != FSAL_ATTR_MODE) { /* A call to cache_inode_setattr is required */ if(cache_inode_setattr(file_pentry, &attributes_create, pcontext, &cache_status) != CACHE_INODE_SUCCESS) { /* If we are here, there was an error */ nfs_SetFailedStatus(pcontext, pexport, preq->rq_vers, cache_status, &pres->res_dirop2.status, &pres->res_create3.status, NULL, NULL, parent_pentry, ppre_attr, &(pres->res_create3.CREATE3res_u.resfail. dir_wcc), NULL, NULL, NULL); if(nfs_RetryableError(cache_status)) { rc = NFS_REQ_DROP; goto out; } rc = NFS_REQ_OK; goto out; } /* Get the resulting attributes from the Cache Inode */ if(cache_inode_getattr(file_pentry, &attr_newfile, pcontext, &cache_status) != CACHE_INODE_SUCCESS) { /* If we are here, there was an error */ nfs_SetFailedStatus(pcontext, pexport, preq->rq_vers, cache_status, &pres->res_dirop2.status, &pres->res_create3.status, NULL, NULL, parent_pentry, ppre_attr, &(pres->res_create3.CREATE3res_u.resfail. dir_wcc), NULL, NULL, NULL); if(nfs_RetryableError(cache_status)) { rc = NFS_REQ_DROP; goto out; } rc = NFS_REQ_OK; goto out; } } switch (preq->rq_vers) { case NFS_V2: /* Build file handle */ if(nfs2_FSALToFhandle( &(pres->res_dirop2.DIROP2res_u.diropok.file), &file_pentry->handle, pexport) == 0) pres->res_dirop2.status = NFSERR_IO; else { if(!nfs2_FSALattr_To_Fattr( pexport, &attr_newfile, &(pres->res_dirop2.DIROP2res_u. diropok.attributes))) pres->res_dirop2.status = NFSERR_IO; else pres->res_dirop2.status = NFS_OK; } break; case NFS_V3: /* Build file handle */ pres->res_create3.status = nfs3_AllocateFH(&pres->res_create3.CREATE3res_u .resok.obj.post_op_fh3_u.handle); if (pres->res_create3.status != NFS3_OK) { rc = NFS_REQ_OK; goto out; } /* Set Post Op Fh3 structure */ if(nfs3_FSALToFhandle( &(pres->res_create3.CREATE3res_u.resok .obj.post_op_fh3_u.handle), &file_pentry->handle, pexport) == 0) { gsh_free(pres->res_create3.CREATE3res_u.resok.obj. post_op_fh3_u.handle.data.data_val); pres->res_create3.status = NFS3ERR_BADHANDLE; rc = NFS_REQ_OK; goto out; } /* Set Post Op Fh3 structure */ pres->res_create3.CREATE3res_u.resok.obj.handle_follows = TRUE; /* Get the attributes of the parent after the operation */ attr_parent_after = parent_pentry->attributes; /* Build entry attributes */ nfs_SetPostOpAttr(pexport, &attr_newfile, &(pres->res_create3.CREATE3res_u.resok. obj_attributes)); /* * Build Weak Cache Coherency data */ nfs_SetWccData(pexport, ppre_attr, &attr_parent_after, &(pres->res_create3.CREATE3res_u .resok.dir_wcc)); pres->res_create3.status = NFS3_OK; break; } /* switch */ rc = NFS_REQ_OK; goto out; } } else { if(cache_status_lookup == CACHE_INODE_SUCCESS) { /* Trying to create a file that already exists */ cache_status = CACHE_INODE_ENTRY_EXISTS; switch (preq->rq_vers) { case NFS_V2: pres->res_dirop2.status = NFSERR_EXIST; break; case NFS_V3: pres->res_create3.status = NFS3ERR_EXIST; break; } } else { /* Server fault */ cache_status = cache_status_lookup; switch (preq->rq_vers) { case NFS_V2: pres->res_dirop2.status = NFSERR_IO; break; case NFS_V3: pres->res_create3.status = NFS3ERR_INVAL; break; } } nfs_SetFailedStatus(pcontext, pexport, preq->rq_vers, cache_status, &pres->res_dirop2.status, &pres->res_create3.status, NULL, NULL, parent_pentry, ppre_attr, &(pres->res_create3.CREATE3res_u.resfail.dir_wcc), NULL, NULL, NULL); rc = NFS_REQ_OK; goto out; } /* if( cache_status_lookup == CACHE_INODE_NOT_FOUND ) */ } } /* Set the exit status */ nfs_SetFailedStatus(pcontext, pexport, preq->rq_vers, cache_status, &pres->res_dirop2.status, &pres->res_create3.status, NULL, NULL, parent_pentry, ppre_attr, &(pres->res_create3.CREATE3res_u.resfail.dir_wcc), NULL, NULL, NULL); /* If we are here, there was an error */ if(nfs_RetryableError(cache_status)) { rc = NFS_REQ_DROP; goto out; } rc = NFS_REQ_OK; out: /* return references */ if (file_pentry) cache_inode_put(file_pentry); if (parent_pentry) cache_inode_put(parent_pentry); return (rc); } /* nfs_Create */
/** * * cache_inode_readdir_populate: fully reads a directory in FSAL and caches * the related entries. * * fully reads a directory in FSAL and caches the related entries. No MT * safety managed here !! * * @param pentry [IN] entry for the parent directory to be read. This must be * a DIRECTORY * @param ht [IN] hash table used for the cache, unused in this call. * @param pclient [INOUT] ressource allocated by the client for the nfs * management. * @param pcontext [IN] FSAL credentials * @param pstatus [OUT] returned status. * */ cache_inode_status_t cache_inode_readdir_populate( cache_entry_t * pentry_dir, cache_inode_policy_t policy, hash_table_t * ht, cache_inode_client_t * pclient, fsal_op_context_t * pcontext, cache_inode_status_t * pstatus) { fsal_dir_t fsal_dirhandle; fsal_status_t fsal_status; fsal_attrib_list_t dir_attributes; fsal_cookie_t begin_cookie; fsal_cookie_t end_cookie; fsal_count_t nbfound; fsal_count_t iter; fsal_boolean_t fsal_eod; cache_entry_t *pentry = NULL; cache_entry_t *pentry_parent = pentry_dir; fsal_attrib_list_t object_attributes; cache_inode_create_arg_t create_arg; cache_inode_file_type_t type; cache_inode_status_t cache_status; fsal_dirent_t array_dirent[FSAL_READDIR_SIZE + 20]; cache_inode_fsal_data_t new_entry_fsdata; cache_inode_dir_entry_t *new_dir_entry = NULL; uint64_t i = 0; /* Set the return default to CACHE_INODE_SUCCESS */ *pstatus = CACHE_INODE_SUCCESS; /* Only DIRECTORY entries are concerned */ if(pentry_dir->internal_md.type != DIRECTORY) { *pstatus = CACHE_INODE_BAD_TYPE; return *pstatus; } #ifdef _USE_MFSL_ASYNC /* If entry is asynchronous (via MFSL), it should not be repopulated until it is synced */ if(MFSL_ASYNC_is_synced(&pentry_dir->mobject) == FALSE) { /* Directory is asynchronous, do not repopulate it and let it * in the state 'has_been_readdir == FALSE' */ *pstatus = CACHE_INODE_SUCCESS; return *pstatus; } #endif /* If directory is already populated , there is no job to do */ if(pentry_dir->object.dir.has_been_readdir == CACHE_INODE_YES) { *pstatus = CACHE_INODE_SUCCESS; return *pstatus; } /* Invalidate all the dirents */ if(cache_inode_invalidate_all_cached_dirent(pentry_dir, ht, pclient, pstatus) != CACHE_INODE_SUCCESS) return *pstatus; /* Open the directory */ dir_attributes.asked_attributes = pclient->attrmask; #ifdef _USE_MFSL fsal_status = MFSL_opendir(&pentry_dir->mobject, pcontext, &pclient->mfsl_context, &fsal_dirhandle, &dir_attributes, NULL); #else fsal_status = FSAL_opendir(&pentry_dir->object.dir.handle, pcontext, &fsal_dirhandle, &dir_attributes); #endif if(FSAL_IS_ERROR(fsal_status)) { *pstatus = cache_inode_error_convert(fsal_status); if(fsal_status.major == ERR_FSAL_STALE) { cache_inode_status_t kill_status; LogEvent(COMPONENT_CACHE_INODE, "cache_inode_readdir: Stale FSAL File Handle detected for pentry = %p, fsal_status=(%u,%u)", pentry_dir, fsal_status.major, fsal_status.minor); if(cache_inode_kill_entry(pentry_dir, WT_LOCK, ht, pclient, &kill_status) != CACHE_INODE_SUCCESS) LogCrit(COMPONENT_CACHE_INODE, "cache_inode_readdir: Could not kill entry %p, status = %u", pentry_dir, kill_status); *pstatus = CACHE_INODE_FSAL_ESTALE; } return *pstatus; } /* Loop for readding the directory */ FSAL_SET_COOKIE_BEGINNING(begin_cookie); FSAL_SET_COOKIE_BEGINNING(end_cookie); fsal_eod = FALSE; do { #ifdef _USE_MFSL fsal_status = MFSL_readdir(&fsal_dirhandle, begin_cookie, pclient->attrmask, FSAL_READDIR_SIZE * sizeof(fsal_dirent_t), array_dirent, &end_cookie, &nbfound, &fsal_eod, &pclient->mfsl_context, NULL); #else fsal_status = FSAL_readdir(&fsal_dirhandle, begin_cookie, pclient->attrmask, FSAL_READDIR_SIZE * sizeof(fsal_dirent_t), array_dirent, &end_cookie, &nbfound, &fsal_eod); #endif if(FSAL_IS_ERROR(fsal_status)) { *pstatus = cache_inode_error_convert(fsal_status); return *pstatus; } for(iter = 0; iter < nbfound; iter++) { LogFullDebug(COMPONENT_NFS_READDIR, "cache readdir populate found entry %s", array_dirent[iter].name.name); /* It is not needed to cache '.' and '..' */ if(!FSAL_namecmp(&(array_dirent[iter].name), (fsal_name_t *) & FSAL_DOT) || !FSAL_namecmp(&(array_dirent[iter].name), (fsal_name_t *) & FSAL_DOT_DOT)) { LogFullDebug(COMPONENT_NFS_READDIR, "cache readdir populate : do not cache . and .."); continue; } /* If dir entry is a symbolic link, its content has to be read */ if((type = cache_inode_fsal_type_convert(array_dirent[iter].attributes.type)) == SYMBOLIC_LINK) { #ifdef _USE_MFSL mfsl_object_t tmp_mfsl; #endif /* Let's read the link for caching its value */ object_attributes.asked_attributes = pclient->attrmask; if( CACHE_INODE_KEEP_CONTENT( pentry_dir->policy ) ) { #ifdef _USE_MFSL tmp_mfsl.handle = array_dirent[iter].handle; fsal_status = MFSL_readlink(&tmp_mfsl, pcontext, &pclient->mfsl_context, &create_arg.link_content, &object_attributes, NULL); #else fsal_status = FSAL_readlink(&array_dirent[iter].handle, pcontext, &create_arg.link_content, &object_attributes); #endif } else { fsal_status.major = ERR_FSAL_NO_ERROR ; fsal_status.minor = 0 ; } if(FSAL_IS_ERROR(fsal_status)) { *pstatus = cache_inode_error_convert(fsal_status); if(fsal_status.major == ERR_FSAL_STALE) { cache_inode_status_t kill_status; LogEvent(COMPONENT_CACHE_INODE, "cache_inode_readdir: Stale FSAL File Handle detected for pentry = %p, fsal_status=(%u,%u)", pentry_dir, fsal_status.major, fsal_status.minor ); if(cache_inode_kill_entry(pentry_dir, WT_LOCK, ht, pclient, &kill_status) != CACHE_INODE_SUCCESS) LogCrit(COMPONENT_CACHE_INODE, "cache_inode_readdir: Could not kill entry %p, status = %u", pentry_dir, kill_status); *pstatus = CACHE_INODE_FSAL_ESTALE; } return *pstatus; } } /* Try adding the entry, if it exists then this existing entry is returned */ new_entry_fsdata.handle = array_dirent[iter].handle; new_entry_fsdata.cookie = 0; /* XXX needed? */ if((pentry = cache_inode_new_entry( &new_entry_fsdata, &array_dirent[iter].attributes, type, policy, &create_arg, NULL, ht, pclient, pcontext, FALSE, /* This is population and no creation */ pstatus)) == NULL) return *pstatus; cache_status = cache_inode_add_cached_dirent( pentry_parent, &(array_dirent[iter].name), pentry, ht, &new_dir_entry, pclient, pcontext, pstatus); if(cache_status != CACHE_INODE_SUCCESS && cache_status != CACHE_INODE_ENTRY_EXISTS) return *pstatus; /* * Remember the FSAL readdir cookie associated with this dirent. This * is needed for partial directory reads. * * to_uint64 should be a lightweight operation--it is in the current * default implementation. We think the right thing -should- happen * therefore with if _USE_MFSL. * * I'm ignoring the status because the default operation is a memcmp-- * we lready -have- the cookie. */ if (cache_status != CACHE_INODE_ENTRY_EXISTS) { (void) FSAL_cookie_to_uint64(&array_dirent[iter].handle, pcontext, &array_dirent[iter].cookie, &new_dir_entry->fsal_cookie); /* we are filling in all entries, and the cookie avl was * cleared before adding dirents */ new_dir_entry->cookie = i; /* still an offset */ (void) avltree_insert(&new_dir_entry->node_c, &pentry_parent->object.dir.cookies); } /* !exist */ } /* iter */ /* Get prepared for next step */ begin_cookie = end_cookie; /* next offset */ i++; } while(fsal_eod != TRUE); /* Close the directory */ #ifdef _USE_MFSL fsal_status = MFSL_closedir(&fsal_dirhandle, &pclient->mfsl_context, NULL); #else fsal_status = FSAL_closedir(&fsal_dirhandle); #endif if(FSAL_IS_ERROR(fsal_status)) { *pstatus = cache_inode_error_convert(fsal_status); return *pstatus; } /* End of work */ pentry_dir->object.dir.has_been_readdir = CACHE_INODE_YES; *pstatus = CACHE_INODE_SUCCESS; return *pstatus; } /* cache_inode_readdir_populate */
int nfs3_Mknod(nfs_arg_t *parg, exportlist_t *pexport, fsal_op_context_t *pcontext, nfs_worker_data_t *pworker, struct svc_req *preq, nfs_res_t * pres) { cache_entry_t *parent_pentry = NULL; fsal_attrib_list_t parent_attr; fsal_attrib_list_t *ppre_attr; fsal_attrib_list_t attr_parent_after; cache_inode_file_type_t parent_filetype; cache_inode_file_type_t nodetype; char *str_file_name = NULL; fsal_name_t file_name; cache_inode_status_t cache_status; cache_inode_status_t cache_status_lookup; fsal_accessmode_t mode = 0; cache_entry_t *node_pentry = NULL; fsal_attrib_list_t attr; cache_inode_create_arg_t create_arg; fsal_handle_t *pfsal_handle; int rc = NFS_REQ_OK; #ifdef _USE_QUOTA fsal_status_t fsal_status ; #endif memset(&create_arg, 0, sizeof(create_arg)); if(isDebug(COMPONENT_NFSPROTO)) { char str[LEN_FH_STR]; sprint_fhandle3(str, &(parg->arg_mknod3.where.dir)); LogDebug(COMPONENT_NFSPROTO, "REQUEST PROCESSING: Calling nfs3_Mknod handle: %s name: %s", str, parg->arg_mknod3.where.name); } /* to avoid setting them on each error case */ pres->res_mknod3.MKNOD3res_u.resfail.dir_wcc.before.attributes_follow = FALSE; pres->res_mknod3.MKNOD3res_u.resfail.dir_wcc.after.attributes_follow = FALSE; ppre_attr = NULL; /* retrieve parent entry */ if((parent_pentry = nfs_FhandleToCache(preq->rq_vers, NULL, &(parg->arg_mknod3.where.dir), NULL, NULL, &(pres->res_mknod3.status), NULL, &parent_attr, pcontext, &rc)) == NULL) { /* Stale NFS FH ? */ return rc; } /* get directory attributes before action (for V3 reply) */ ppre_attr = &parent_attr; /* Extract the filetype */ parent_filetype = cache_inode_fsal_type_convert(parent_attr.type); /* * Sanity checks: new node name must be non-null; parent must be a * directory. */ if(parent_filetype != DIRECTORY) { pres->res_mknod3.status = NFS3ERR_NOTDIR; rc = NFS_REQ_OK; goto out; } str_file_name = parg->arg_mknod3.where.name; switch (parg->arg_mknod3.what.type) { case NF3CHR: case NF3BLK: if(parg->arg_mknod3.what.mknoddata3_u.device.dev_attributes.mode.set_it) mode = (fsal_accessmode_t) parg->arg_mknod3.what.mknoddata3_u.device.dev_attributes. mode.set_mode3_u.mode; else mode = (fsal_accessmode_t) 0; create_arg.dev_spec.major = parg->arg_mknod3.what.mknoddata3_u.device.spec.specdata1; create_arg.dev_spec.minor = parg->arg_mknod3.what.mknoddata3_u.device.spec.specdata2; break; case NF3FIFO: case NF3SOCK: if(parg->arg_mknod3.what.mknoddata3_u.pipe_attributes.mode.set_it) mode = (fsal_accessmode_t) parg->arg_mknod3.what.mknoddata3_u.pipe_attributes.mode. set_mode3_u.mode; else mode = (fsal_accessmode_t) 0; create_arg.dev_spec.major = 0; create_arg.dev_spec.minor = 0; break; default: pres->res_mknod3.status = NFS3ERR_BADTYPE; rc = NFS_REQ_OK; goto out; } switch (parg->arg_mknod3.what.type) { case NF3CHR: nodetype = CHARACTER_FILE; break; case NF3BLK: nodetype = BLOCK_FILE; break; case NF3FIFO: nodetype = FIFO_FILE; break; case NF3SOCK: nodetype = SOCKET_FILE; break; default: pres->res_mknod3.status = NFS3ERR_BADTYPE; rc = NFS_REQ_OK; goto out; } //if(str_file_name == NULL || strlen(str_file_name) == 0) if(str_file_name == NULL || *str_file_name == '\0' ) { pres->res_mknod3.status = NFS3ERR_INVAL; rc = NFS_REQ_OK; goto out; } #ifdef _USE_QUOTA /* if quota support is active, then we should check is the FSAL allows inode creation or not */ fsal_status = FSAL_check_quota( pexport->fullpath, FSAL_QUOTA_INODES, FSAL_OP_CONTEXT_TO_UID( pcontext ) ) ; if( FSAL_IS_ERROR( fsal_status ) ) { pres->res_mknod3.status = NFS3ERR_DQUOT; return NFS_REQ_OK; } #endif /* _USE_QUOTA */ /* convert node name */ if((cache_status = cache_inode_error_convert(FSAL_str2name(str_file_name, 0, &file_name))) == CACHE_INODE_SUCCESS) { /* * Lookup node to see if it exists. If so, use it. Otherwise * create a new one. */ node_pentry = cache_inode_lookup(parent_pentry, &file_name, &attr, pcontext, &cache_status_lookup); if(cache_status_lookup == CACHE_INODE_NOT_FOUND) { /* Create the node */ if((node_pentry = cache_inode_create(parent_pentry, &file_name, nodetype, mode, &create_arg, &attr, pcontext, &cache_status)) != NULL) { MKNOD3resok *rok = &pres->res_mknod3.MKNOD3res_u.resok; /* * Get the FSAL handle for this entry */ pfsal_handle = &node_pentry->handle; /* Build file handle */ pres->res_mknod3.status = nfs3_AllocateFH(&rok->obj.post_op_fh3_u.handle); if(pres->res_mknod3.status != NFS3_OK) return NFS_REQ_OK; if(nfs3_FSALToFhandle(&rok->obj.post_op_fh3_u.handle, pfsal_handle, pexport) == 0) { gsh_free(rok->obj.post_op_fh3_u.handle.data.data_val); pres->res_mknod3.status = NFS3ERR_INVAL; rc = NFS_REQ_OK; goto out; } /* Set Post Op Fh3 structure */ rok->obj.handle_follows = TRUE; /* Build entry attributes */ nfs_SetPostOpAttr(pexport, &attr, &rok->obj_attributes); /* Get the attributes of the parent after the operation */ if(cache_inode_getattr(parent_pentry, &attr_parent_after, pcontext, &cache_status) != CACHE_INODE_SUCCESS) { pres->res_mknod3.status = nfs3_Errno(cache_status); rc = NFS_REQ_OK; goto out; } /* Build Weak Cache Coherency data */ nfs_SetWccData(pexport, ppre_attr, &attr_parent_after, &rok->dir_wcc); pres->res_mknod3.status = NFS3_OK; rc = NFS_REQ_OK; goto out; } /* mknod sucess */ } /* not found */ else { /* object already exists or failure during lookup */ if(cache_status_lookup == CACHE_INODE_SUCCESS) { /* Trying to create an entry that already exists */ pres->res_mknod3.status = NFS3ERR_EXIST; } else { /* Server fault */ pres->res_mknod3.status = NFS3ERR_INVAL; } nfs_SetWccData(pexport, NULL, NULL, &(pres->res_mknod3.MKNOD3res_u.resfail.dir_wcc)); rc = NFS_REQ_OK; goto out; } } /* If we are here, there was an error */ rc = nfs_SetFailedStatus(pexport, preq->rq_vers, cache_status, NULL, &pres->res_mknod3.status, NULL, ppre_attr, &(pres->res_mknod3.MKNOD3res_u.resfail.dir_wcc), NULL, NULL); out: /* return references */ if (parent_pentry) cache_inode_put(parent_pentry); if (node_pentry) cache_inode_put(node_pentry); return (rc); } /* nfs3_Mknod */
int nfs3_Readdirplus(nfs_arg_t * parg, exportlist_t * pexport, fsal_op_context_t * pcontext, cache_inode_client_t * pclient, hash_table_t * ht, struct svc_req *preq, nfs_res_t * pres) { static char __attribute__ ((__unused__)) funcName[] = "nfs3_Readdirplus"; typedef char entry_name_array_item_t[FSAL_MAX_NAME_LEN]; typedef char fh3_buffer_item_t[NFS3_FHSIZE]; unsigned int delta = 0; cache_entry_t *dir_pentry = NULL; cache_entry_t *pentry_dot_dot = NULL; unsigned long dircount; unsigned long maxcount; fsal_attrib_list_t dir_attr; fsal_attrib_list_t entry_attr; uint64_t begin_cookie; uint64_t end_cookie; uint64_t cache_inode_cookie; cache_inode_dir_entry_t **dirent_array = NULL; cookieverf3 cookie_verifier; int rc; unsigned int i = 0; unsigned int num_entries; unsigned long space_used; unsigned long estimated_num_entries; unsigned long asked_num_entries; cache_inode_file_type_t dir_filetype; cache_inode_endofdir_t eod_met = UNASSIGNED_EOD; cache_inode_status_t cache_status; cache_inode_status_t cache_status_gethandle; fsal_handle_t *pfsal_handle = NULL; entry_name_array_item_t *entry_name_array = NULL; fh3_buffer_item_t *fh3_array = NULL; entryplus3 reference_entry; READDIRPLUS3resok reference_reply; int dir_pentry_unlock = FALSE; if(isDebug(COMPONENT_NFSPROTO) || isDebug(COMPONENT_NFS_READDIR)) { char str[LEN_FH_STR]; log_components_t component; sprint_fhandle3(str, &(parg->arg_readdirplus3.dir)); if(isDebug(COMPONENT_NFSPROTO)) component = COMPONENT_NFSPROTO; else component = COMPONENT_NFS_READDIR; LogDebug(component, "REQUEST PROCESSING: Calling nfs3_Readdirplus handle: %s", str); } /* to avoid setting it on each error case */ pres->res_readdir3.READDIR3res_u.resfail.dir_attributes.attributes_follow = FALSE; dircount = parg->arg_readdirplus3.dircount; maxcount = parg->arg_readdirplus3.maxcount; begin_cookie = (unsigned int)parg->arg_readdirplus3.cookie; /* FIXME: This calculation over estimates the number of bytes that * READDIRPLUS3resok will use on the wire by 4 bytes on x86_64. */ space_used = sizeof(reference_reply.dir_attributes.attributes_follow) + sizeof(reference_reply.dir_attributes.post_op_attr_u.attributes) + sizeof(reference_reply.cookieverf) + sizeof(reference_reply.reply.eof); estimated_num_entries = (dircount - space_used + sizeof(entry3 *)) / (sizeof(entry3) - sizeof(char *)*2); // estimated_num_entries *= 4; LogFullDebug(COMPONENT_NFS_READDIR, "nfs3_Readdirplus: dircount=%lu maxcount=%lu begin_cookie=%" PRIu64" space_used=%lu estimated_num_entries=%lu", dircount, maxcount, begin_cookie, space_used, estimated_num_entries); /* Is this a xattr FH ? */ if(nfs3_Is_Fh_Xattr(&(parg->arg_readdirplus3.dir))) return nfs3_Readdirplus_Xattr(parg, pexport, pcontext, pclient, ht, preq, pres); /* Convert file handle into a vnode */ if((dir_pentry = nfs_FhandleToCache(preq->rq_vers, NULL, &(parg->arg_readdirplus3.dir), NULL, NULL, &(pres->res_readdirplus3.status), NULL, &dir_attr, pcontext, pclient, ht, &rc)) == NULL) { /* return NFS_REQ_DROP ; */ return rc; } /* Extract the filetype */ dir_filetype = cache_inode_fsal_type_convert(dir_attr.type); /* Sanity checks -- must be a directory */ if(dir_filetype != DIRECTORY) { pres->res_readdirplus3.status = NFS3ERR_NOTDIR; return NFS_REQ_OK; } /* switch */ memset(cookie_verifier, 0, sizeof(cookieverf3)); /* * If cookie verifier is used, then an non-trivial value is * returned to the client This value is the mtime of * the directory. If verifier is unused (as in many NFS * Servers) then only a set of zeros is returned (trivial * value) */ if(pexport->UseCookieVerifier) memcpy(cookie_verifier, &(dir_attr.mtime), sizeof(dir_attr.mtime)); /* * nothing to do if != 0 because the area is already full of * zero */ if(pexport->UseCookieVerifier && (begin_cookie != 0)) { /* * Not the first call, so we have to check the cookie * verifier */ if(memcmp(cookie_verifier, parg->arg_readdirplus3.cookieverf, NFS3_COOKIEVERFSIZE) != 0) { pres->res_readdirplus3.status = NFS3ERR_BAD_COOKIE; return NFS_REQ_OK; } } if((dirent_array = (cache_inode_dir_entry_t **) Mem_Alloc_Label( estimated_num_entries * sizeof(cache_inode_dir_entry_t*), "cache_inode_dir_entry_t in nfs3_Readdirplus")) == NULL) { pres->res_readdirplus3.status = NFS3ERR_IO; return NFS_REQ_DROP; } pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.entries = NULL; pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.eof = FALSE; /** @todo XXXX fix this--compare nfs4_op_readdir */ /* How many entries will we retry from cache_inode ? */ if(begin_cookie > 1) { asked_num_entries = estimated_num_entries; cache_inode_cookie = begin_cookie; } else { asked_num_entries = ((estimated_num_entries > 2) ? estimated_num_entries - 2 : 0); /* Keep space for '.' and '..' */ cache_inode_cookie = 0; } /* A definition that will be very useful to avoid very long names for variables */ #define RES_READDIRPLUS_REPLY pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply /* Call readdir */ if(cache_inode_readdir(dir_pentry, pexport->cache_inode_policy, cache_inode_cookie, asked_num_entries, &num_entries, &end_cookie, &eod_met, dirent_array, ht, &dir_pentry_unlock, pclient, pcontext, &cache_status) == CACHE_INODE_SUCCESS) { LogFullDebug(COMPONENT_NFS_READDIR, "Readdirplus3 -> Call to cache_inode_readdir( cookie=%" PRIu64", asked=%lu ) -> num_entries = %u", cache_inode_cookie, asked_num_entries, num_entries); if(eod_met == END_OF_DIR) { LogFullDebug(COMPONENT_NFS_READDIR, "+++++++++++++++++++++++++++++++++++++++++> EOD MET "); } /* If nothing was found, return nothing, but if cookie=0, we should return . and .. */ if((num_entries == 0) && (asked_num_entries != 0) && (begin_cookie > 1)) { pres->res_readdirplus3.status = NFS3_OK; pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.entries = NULL; pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.eof = TRUE; nfs_SetPostOpAttr(pcontext, pexport, dir_pentry, NULL, &(pres->res_readdirplus3.READDIRPLUS3res_u.resok. dir_attributes)); memcpy(pres->res_readdirplus3.READDIRPLUS3res_u.resok.cookieverf, cookie_verifier, sizeof(cookieverf3)); } else { /* Allocation of the structure for reply */ entry_name_array = (entry_name_array_item_t *) Mem_Alloc_Label(estimated_num_entries * (FSAL_MAX_NAME_LEN + 1), "entry_name_array in nfs3_Readdirplus"); if(entry_name_array == NULL) { /* after successful cache_inode_readdir, dir_pentry may be * read locked */ if (dir_pentry_unlock) V_r(&dir_pentry->lock); if( !CACHE_INODE_KEEP_CONTENT( dir_pentry->policy ) ) cache_inode_release_dirent( dirent_array, num_entries, pclient ) ; Mem_Free((char *)dirent_array); return NFS_REQ_DROP; } pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.entries = (entryplus3 *) Mem_Alloc_Label(estimated_num_entries * sizeof(entryplus3), "READDIRPLUS3res_u.resok.reply.entries"); if(pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.entries == NULL) { /* after successful cache_inode_readdir, dir_pentry may be * read locked */ if (dir_pentry_unlock) V_r(&dir_pentry->lock); if( !CACHE_INODE_KEEP_CONTENT( dir_pentry->policy ) ) cache_inode_release_dirent( dirent_array, num_entries, pclient ) ; Mem_Free((char *)dirent_array); Mem_Free((char *)entry_name_array); return NFS_REQ_DROP; } /* Allocation of the file handles */ fh3_array = (fh3_buffer_item_t *) Mem_Alloc_Label(estimated_num_entries * NFS3_FHSIZE, "Filehandle V3 in nfs3_Readdirplus"); if(fh3_array == NULL) { /* after successful cache_inode_readdir, dir_pentry may be * read locked */ if (dir_pentry_unlock) V_r(&dir_pentry->lock); if( !CACHE_INODE_KEEP_CONTENT( dir_pentry->policy ) ) cache_inode_release_dirent( dirent_array, num_entries, pclient ) ; Mem_Free((char *)dirent_array); Mem_Free((char *)entry_name_array); return NFS_REQ_DROP; } delta = 0; /* manage . and .. */ if(begin_cookie == 0) { /* Fill in '.' */ if(estimated_num_entries > 0) { if((pfsal_handle = cache_inode_get_fsal_handle(dir_pentry, &cache_status_gethandle)) == NULL) { /* after successful cache_inode_readdir, dir_pentry * may be read locked */ if (dir_pentry_unlock) V_r(&dir_pentry->lock); if( !CACHE_INODE_KEEP_CONTENT( dir_pentry->policy ) ) cache_inode_release_dirent( dirent_array, num_entries, pclient ) ; Mem_Free((char *)dirent_array); Mem_Free((char *)entry_name_array); Mem_Free((char *)fh3_array); pres->res_readdirplus3.status = nfs3_Errno(cache_status_gethandle); return NFS_REQ_OK; } FSAL_DigestHandle(FSAL_GET_EXP_CTX(pcontext), FSAL_DIGEST_FILEID3, pfsal_handle, (caddr_t) & (RES_READDIRPLUS_REPLY.entries[0]. fileid)); RES_READDIRPLUS_REPLY.entries[0].name = entry_name_array[0]; strcpy(RES_READDIRPLUS_REPLY.entries[0].name, "."); RES_READDIRPLUS_REPLY.entries[0].cookie = 1; pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.entries[0]. name_handle.post_op_fh3_u.handle.data.data_val = (char *)fh3_array[0]; if(nfs3_FSALToFhandle (&pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.entries[0]. name_handle.post_op_fh3_u.handle, pfsal_handle, pexport) == 0) { /* after successful cache_inode_readdir, dir_pentry may * be read locked */ if (dir_pentry_unlock) V_r(&dir_pentry->lock); if( !CACHE_INODE_KEEP_CONTENT( dir_pentry->policy ) ) cache_inode_release_dirent( dirent_array, num_entries, pclient ) ; Mem_Free((char *)dirent_array); Mem_Free((char *)entry_name_array); Mem_Free((char *)fh3_array); pres->res_readdirplus3.status = NFS3ERR_BADHANDLE; return NFS_REQ_OK; } RES_READDIRPLUS_REPLY.entries[0].name_attributes.attributes_follow = FALSE; RES_READDIRPLUS_REPLY.entries[0].name_handle.handle_follows = FALSE; entry_attr = dir_pentry->attributes; /* Set PostPoFh3 structure */ pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.entries[0]. name_handle.handle_follows = TRUE; pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.entries[0]. name_handle.post_op_fh3_u.handle.data.data_len = sizeof(file_handle_v3_t); nfs_SetPostOpAttr(pcontext, pexport, dir_pentry, &entry_attr, &(pres->res_readdirplus3.READDIRPLUS3res_u.resok. reply.entries[0].name_attributes)); LogFullDebug(COMPONENT_NFS_READDIR, "Readdirplus3 -> i=0 num_entries=%d space_used=%lu maxcount=%lu Name=. FileId=%016llx Cookie=%llu", num_entries, space_used, maxcount, RES_READDIRPLUS_REPLY.entries[0].fileid, RES_READDIRPLUS_REPLY.entries[0].cookie); delta += 1; } } /* Fill in '..' */ if(begin_cookie <= 1) { if(estimated_num_entries > delta) { if((pentry_dot_dot = cache_inode_lookupp_sw(dir_pentry, ht, pclient, pcontext, &cache_status_gethandle, !dir_pentry_unlock)) == NULL) { /* after successful cache_inode_readdir, dir_pentry may * be read locked */ if (dir_pentry_unlock) V_r(&dir_pentry->lock); if( !CACHE_INODE_KEEP_CONTENT( dir_pentry->policy ) ) cache_inode_release_dirent( dirent_array, num_entries, pclient ) ; Mem_Free((char *)dirent_array); Mem_Free((char *)entry_name_array); Mem_Free((char *)fh3_array); pres->res_readdirplus3.status = nfs3_Errno(cache_status_gethandle); return NFS_REQ_OK; } if((pfsal_handle = cache_inode_get_fsal_handle(pentry_dot_dot, &cache_status_gethandle)) == NULL) { /* after successful cache_inode_readdir, dir_pentry may * be read locked */ if (dir_pentry_unlock) V_r(&dir_pentry->lock); if( !CACHE_INODE_KEEP_CONTENT( dir_pentry->policy ) ) cache_inode_release_dirent( dirent_array, num_entries, pclient ) ; Mem_Free((char *)dirent_array); Mem_Free((char *)entry_name_array); Mem_Free((char *)fh3_array); pres->res_readdirplus3.status = nfs3_Errno(cache_status_gethandle); return NFS_REQ_OK; } FSAL_DigestHandle(FSAL_GET_EXP_CTX(pcontext), FSAL_DIGEST_FILEID3, pfsal_handle, (caddr_t) & (RES_READDIRPLUS_REPLY.entries[delta]. fileid)); RES_READDIRPLUS_REPLY.entries[delta].name = entry_name_array[delta]; strcpy(RES_READDIRPLUS_REPLY.entries[delta].name, ".."); /* Getting a file handle */ pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.entries[delta]. name_handle.post_op_fh3_u.handle.data.data_val = (char *)fh3_array[delta]; if(nfs3_FSALToFhandle (&pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.entries[delta]. name_handle.post_op_fh3_u.handle, pfsal_handle, pexport) == 0) { /* after successful cache_inode_readdir, dir_pentry may * be read locked */ if (dir_pentry_unlock) V_r(&dir_pentry->lock); if( !CACHE_INODE_KEEP_CONTENT( dir_pentry->policy ) ) cache_inode_release_dirent( dirent_array, num_entries, pclient ) ; Mem_Free((char *)dirent_array); Mem_Free((char *)entry_name_array); Mem_Free((char *)fh3_array); pres->res_readdirplus3.status = NFS3ERR_BADHANDLE; return NFS_REQ_OK; } RES_READDIRPLUS_REPLY.entries[delta].cookie = 2; RES_READDIRPLUS_REPLY.entries[delta].name_attributes.attributes_follow = FALSE; RES_READDIRPLUS_REPLY.entries[delta].name_handle.handle_follows = FALSE; entry_attr = pentry_dot_dot->attributes; /* Set PostPoFh3 structure */ pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.entries[delta]. name_handle.handle_follows = TRUE; pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.entries[delta]. name_handle.post_op_fh3_u.handle.data.data_len = sizeof(file_handle_v3_t); nfs_SetPostOpAttr(pcontext, pexport, pentry_dot_dot, &entry_attr, &(pres->res_readdirplus3.READDIRPLUS3res_u.resok. reply.entries[delta].name_attributes)); LogFullDebug(COMPONENT_NFS_READDIR, "Readdirplus3 -> i=%d num_entries=%d space_used=%lu maxcount=%lu Name=.. FileId=%016llx Cookie=%llu", delta, num_entries, space_used, maxcount, RES_READDIRPLUS_REPLY.entries[delta].fileid, RES_READDIRPLUS_REPLY.entries[delta].cookie); } RES_READDIRPLUS_REPLY.entries[0].nextentry = &(RES_READDIRPLUS_REPLY.entries[delta]); if(num_entries > delta + 1) /* not 0 ??? */ RES_READDIRPLUS_REPLY.entries[delta].nextentry = &(RES_READDIRPLUS_REPLY.entries[delta + 1]); else RES_READDIRPLUS_REPLY.entries[delta].nextentry = NULL; delta += 1; } /* if( begin_cookie == 0 ) */ for(i = delta; i < num_entries + delta; i++) { unsigned long needed; /* maxcount is the size with the FH and attributes overhead, * so entryplus3 is used instead of entry3. The data structures * in nfs23.h have funny padding depending on the arch (32 or 64). * We can't get an accurate estimate by simply using * sizeof(entryplus3). */ /* FIXME: There is still a 4 byte over estimate here on x86_64. */ /** @todo Remove cookie offset calculation in readdir and readdirplus (obsoleted) */ needed = sizeof(reference_entry) + NFS3_FHSIZE + ((strlen(dirent_array[i - delta]->name.name) + 3) & ~3); /* if delta == 1 or 2, then "." and ".." have already been added * to the readdirplus reply. */ if (i == delta) { needed += needed*delta /* size of a dir entry in reply */ - ((strlen(dirent_array[i - delta]->name.name) + 3) & ~3)*delta /* size of filename for current entry */ + 4*delta; /* size of "." and ".." filenames in reply */ } if((space_used += needed) > maxcount) { /* If delta != 0, then we already added "." or ".." to the reply. */ if(i == delta && delta == 0) { /* Not enough room to make even a single reply */ /* after successful cache_inode_readdir, dir_pentry may * be read locked */ if (dir_pentry_unlock) V_r(&dir_pentry->lock); if( !CACHE_INODE_KEEP_CONTENT( dir_pentry->policy ) ) cache_inode_release_dirent( dirent_array, num_entries, pclient ) ; Mem_Free((char *)dirent_array); Mem_Free((char *)entry_name_array); Mem_Free((char *)fh3_array); pres->res_readdirplus3.status = NFS3ERR_TOOSMALL; return NFS_REQ_OK; } break; /* Make post traitement */ } /* * Get information specific to this entry */ if((pfsal_handle = cache_inode_get_fsal_handle(dirent_array[i - delta]->pentry, &cache_status_gethandle)) == NULL) { /* after successful cache_inode_readdir, dir_pentry may be * read locked */ if (dir_pentry_unlock) V_r(&dir_pentry->lock); if( !CACHE_INODE_KEEP_CONTENT( dir_pentry->policy ) ) cache_inode_release_dirent( dirent_array, num_entries, pclient ) ; Mem_Free((char *)dirent_array); Mem_Free((char *)entry_name_array); Mem_Free((char *)fh3_array); pres->res_readdirplus3.status = nfs3_Errno(cache_status_gethandle); return NFS_REQ_OK; } /* Now fill in the replyed entryplus3 list */ FSAL_DigestHandle(FSAL_GET_EXP_CTX(pcontext), FSAL_DIGEST_FILEID3, pfsal_handle, (caddr_t) & (RES_READDIRPLUS_REPLY.entries[i].fileid)); FSAL_name2str(&dirent_array[i - delta]->name, entry_name_array[i], FSAL_MAX_NAME_LEN); RES_READDIRPLUS_REPLY.entries[i].name = entry_name_array[i]; LogFullDebug(COMPONENT_NFS_READDIR, "Readdirplus3 -> i=%u num_entries=%u delta=%u " "num_entries + delta - 1=%u end_cookie=%"PRIu64, i, num_entries, delta, num_entries + delta - 1, end_cookie); if(i != num_entries + delta - 1) RES_READDIRPLUS_REPLY.entries[i].cookie = dirent_array[i - delta]->cookie; else RES_READDIRPLUS_REPLY.entries[i].cookie = end_cookie; RES_READDIRPLUS_REPLY.entries[i].name_attributes.attributes_follow = FALSE; RES_READDIRPLUS_REPLY.entries[i].name_handle.handle_follows = FALSE; entry_attr = dirent_array[i - delta]->pentry->attributes; pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.entries[i].name_handle.post_op_fh3_u.handle.data.data_val = (char *)fh3_array[i]; /* Compute the NFSv3 file handle */ if(nfs3_FSALToFhandle (&pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.entries[i]. name_handle.post_op_fh3_u.handle, pfsal_handle, pexport) == 0) { /* after successful cache_inode_readdir, dir_pentry may be * read locked */ if (dir_pentry_unlock) V_r(&dir_pentry->lock); if( !CACHE_INODE_KEEP_CONTENT( dir_pentry->policy ) ) cache_inode_release_dirent( dirent_array, num_entries, pclient ) ; Mem_Free((char *)dirent_array); Mem_Free((char *)entry_name_array); Mem_Free((char *)fh3_array); pres->res_readdirplus3.status = NFS3ERR_BADHANDLE; return NFS_REQ_OK; } /* Set PostPoFh3 structure */ pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.entries[i].name_handle.handle_follows = TRUE; pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.entries[i].name_handle.post_op_fh3_u.handle.data.data_len = sizeof(file_handle_v3_t); nfs_SetPostOpAttr(pcontext, pexport, dirent_array[i - delta]->pentry, &entry_attr, &(pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.entries[i].name_attributes)); LogFullDebug(COMPONENT_NFS_READDIR, "Readdirplus3 -> i=%d, num_entries=%d needed=%lu space_used=%lu maxcount=%lu Name=%s FileId=%016llx Cookie=%llu", i, num_entries, needed, space_used, maxcount, dirent_array[i - delta]->name.name, RES_READDIRPLUS_REPLY.entries[i].fileid, RES_READDIRPLUS_REPLY.entries[i].cookie); RES_READDIRPLUS_REPLY.entries[i].nextentry = NULL; if(i != 0) RES_READDIRPLUS_REPLY.entries[i - 1].nextentry = &(RES_READDIRPLUS_REPLY.entries[i]); } pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.eof = FALSE; } nfs_SetPostOpAttr(pcontext, pexport, dir_pentry, &dir_attr, &(pres->res_readdirplus3.READDIRPLUS3res_u.resok.dir_attributes)); memcpy(pres->res_readdirplus3.READDIRPLUS3res_u.resok.cookieverf, cookie_verifier, sizeof(cookieverf3)); pres->res_readdirplus3.status = NFS3_OK; if((eod_met == END_OF_DIR) && (i == num_entries + delta)) { /* End of directory */ LogFullDebug(COMPONENT_NFS_READDIR, "============================================================> EOD MET !!!!!!"); pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.eof = TRUE; } else pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.eof = FALSE; nfs_SetPostOpAttr(pcontext, pexport, dir_pentry, &dir_attr, &(pres->res_readdirplus3.READDIRPLUS3res_u.resok.dir_attributes)); memcpy(pres->res_readdirplus3.READDIRPLUS3res_u.resok.cookieverf, cookie_verifier, sizeof(cookieverf3)); LogFullDebug(COMPONENT_NFS_READDIR, "============================================================"); /* after successful cache_inode_readdir, dir_pentry may be * read locked */ if (dir_pentry_unlock) V_r(&dir_pentry->lock); /* Free the memory */ if( !CACHE_INODE_KEEP_CONTENT( dir_pentry->policy ) ) cache_inode_release_dirent( dirent_array, num_entries, pclient ) ; Mem_Free((char *)dirent_array); return NFS_REQ_OK; } /* If we are here, there was an error */ /* after successful cache_inode_readdir, dir_pentry may be * read locked */ if (dir_pentry_unlock) V_r(&dir_pentry->lock); /* Free the memory */ if( !CACHE_INODE_KEEP_CONTENT( dir_pentry->policy ) ) cache_inode_release_dirent( dirent_array, num_entries, pclient ) ; Mem_Free((char *)dirent_array); Mem_Free((char *)entry_name_array); Mem_Free((char *)fh3_array); /* Is this a retryable error */ if(nfs_RetryableError(cache_status)) return NFS_REQ_DROP; /* Set failed status */ nfs_SetFailedStatus(pcontext, pexport, NFS_V3, cache_status, NULL, &pres->res_readdirplus3.status, dir_pentry, &(pres->res_readdirplus3.READDIRPLUS3res_u.resfail.dir_attributes), NULL, NULL, NULL, NULL, NULL, NULL); return NFS_REQ_OK; } /* nfs3_Readdirplus */
int nfs4_op_putfh(struct nfs_argop4 *op, compound_data_t * data, struct nfs_resop4 *resp) { int rc; int error; fsal_attrib_list_t attr; char outstr[1024]; char __attribute__ ((__unused__)) funcname[] = "nfs4_op_putfh"; resp->resop = NFS4_OP_PUTFH; res_PUTFH4.status = NFS4_OK; /* If there is no FH */ if(nfs4_Is_Fh_Empty(&(arg_PUTFH4.object))) { res_PUTFH4.status = NFS4ERR_NOFILEHANDLE; return res_PUTFH4.status; } /* If the filehandle is invalid */ if(nfs4_Is_Fh_Invalid(&(arg_PUTFH4.object))) { res_PUTFH4.status = NFS4ERR_BADHANDLE; return res_PUTFH4.status; } /* Tests if teh Filehandle is expired (for volatile filehandle) */ if(nfs4_Is_Fh_Expired(&(arg_PUTFH4.object))) { res_PUTFH4.status = NFS4ERR_FHEXPIRED; return res_PUTFH4.status; } /* If no currentFH were set, allocate one */ if(data->currentFH.nfs_fh4_len == 0) { if((error = nfs4_AllocateFH(&(data->currentFH))) != NFS4_OK) { res_PUTFH4.status = error; return res_PUTFH4.status; } } /* The same is to be done with mounted_on_FH */ if(data->mounted_on_FH.nfs_fh4_len == 0) { if((error = nfs4_AllocateFH(&(data->mounted_on_FH))) != NFS4_OK) { res_PUTFH4.status = error; return res_PUTFH4.status; } } /* Copy the filehandle from the reply structure */ data->currentFH.nfs_fh4_len = arg_PUTFH4.object.nfs_fh4_len; data->mounted_on_FH.nfs_fh4_len = arg_PUTFH4.object.nfs_fh4_len; /* Put the data in place */ memcpy(data->currentFH.nfs_fh4_val, arg_PUTFH4.object.nfs_fh4_val, arg_PUTFH4.object.nfs_fh4_len); memcpy(data->mounted_on_FH.nfs_fh4_val, arg_PUTFH4.object.nfs_fh4_val, arg_PUTFH4.object.nfs_fh4_len); nfs4_sprint_fhandle(&arg_PUTFH4.object, outstr); LogDebug(COMPONENT_NFS_V4, "NFS4_OP_PUTFH CURRENTFH BEFORE: File handle = %s", outstr); /* If the filehandle is not pseudo hs file handle, get the entry related to it, otherwise use fake values */ if(nfs4_Is_Fh_Pseudo(&(data->currentFH))) { data->current_entry = NULL; data->current_filetype = DIR_BEGINNING; data->pexport = NULL; /* No exportlist is related to pseudo fs */ } else { /* If data->exportp is null, a junction from pseudo fs was traversed, credp and exportp have to be updated */ if(data->pexport == NULL) { if((error = nfs4_SetCompoundExport(data)) != NFS4_OK) { res_PUTFH4.status = error; return res_PUTFH4.status; } } /* Build the pentry */ if((data->current_entry = nfs_FhandleToCache(NFS_V4, NULL, NULL, &(data->currentFH), NULL, NULL, &(res_PUTFH4.status), &attr, data->pcontext, data->pclient, data->ht, &rc)) == NULL) { res_PUTFH4.status = NFS4ERR_BADHANDLE; return res_PUTFH4.status; } /* Extract the filetype */ data->current_filetype = cache_inode_fsal_type_convert(attr.type); } /* Trace */ return NFS4_OK; } /* nfs4_op_putfh */
int nfs_Rename(nfs_arg_t * parg /* IN */ , exportlist_t * pexport /* IN */ , fsal_op_context_t * pcontext /* IN */ , cache_inode_client_t * pclient /* IN */ , hash_table_t * ht /* INOUT */ , struct svc_req *preq /* IN */ , nfs_res_t * pres /* OUT */ ) { static char __attribute__ ((__unused__)) funcName[] = "nfs_Rename"; char *str_entry_name = NULL; fsal_name_t entry_name; char *str_new_entry_name = NULL; fsal_name_t new_entry_name; cache_entry_t *pentry = NULL; cache_entry_t *new_pentry = NULL; cache_entry_t *parent_pentry = NULL; cache_entry_t *new_parent_pentry = NULL; cache_entry_t *should_not_exists = NULL; cache_entry_t *should_exists = NULL; cache_inode_status_t cache_status; int rc; fsal_attrib_list_t *ppre_attr; fsal_attrib_list_t pre_attr; fsal_attrib_list_t *pnew_pre_attr; fsal_attrib_list_t new_attr; fsal_attrib_list_t new_parent_attr; fsal_attrib_list_t attr; fsal_attrib_list_t tst_attr; cache_inode_file_type_t parent_filetype; cache_inode_file_type_t new_parent_filetype; if(preq->rq_vers == NFS_V3) { /* to avoid setting it on each error case */ pres->res_rename3.RENAME3res_u.resfail.fromdir_wcc.before.attributes_follow = FALSE; pres->res_rename3.RENAME3res_u.resfail.fromdir_wcc.after.attributes_follow = FALSE; pres->res_rename3.RENAME3res_u.resfail.todir_wcc.before.attributes_follow = FALSE; pres->res_rename3.RENAME3res_u.resfail.todir_wcc.after.attributes_follow = FALSE; ppre_attr = NULL; pnew_pre_attr = NULL; } /* Convert fromdir file handle into a cache_entry */ if((parent_pentry = nfs_FhandleToCache(preq->rq_vers, &(parg->arg_rename2.from.dir), &(parg->arg_rename3.from.dir), NULL, &(pres->res_dirop2.status), &(pres->res_create3.status), NULL, &pre_attr, pcontext, pclient, ht, &rc)) == NULL) { /* Stale NFS FH ? */ return rc; } /* Convert todir file handle into a cache_entry */ if((new_parent_pentry = nfs_FhandleToCache(preq->rq_vers, &(parg->arg_rename2.to.dir), &(parg->arg_rename3.to.dir), NULL, &(pres->res_dirop2.status), &(pres->res_create3.status), NULL, &new_parent_attr, pcontext, pclient, ht, &rc)) == NULL) { /* Stale NFS FH ? */ return rc; } /* get the attr pointers */ ppre_attr = &pre_attr; pnew_pre_attr = &new_parent_attr; /* Get the filetypes */ parent_filetype = cache_inode_fsal_type_convert(pre_attr.type); new_parent_filetype = cache_inode_fsal_type_convert(new_parent_attr.type); /* * Sanity checks: we must manage directories */ if((parent_filetype != DIR_BEGINNING && parent_filetype != DIR_CONTINUE) || (new_parent_filetype != DIR_BEGINNING && new_parent_filetype != DIR_CONTINUE)) { switch (preq->rq_vers) { case NFS_V2: pres->res_stat2 = NFSERR_NOTDIR; break; case NFS_V3: pres->res_rename3.status = NFS3ERR_NOTDIR; break; } return NFS_REQ_OK; } switch (preq->rq_vers) { case NFS_V2: str_entry_name = parg->arg_rename2.from.name; str_new_entry_name = parg->arg_rename2.to.name; break; case NFS_V3: str_entry_name = parg->arg_rename3.from.name; str_new_entry_name = parg->arg_rename3.to.name; break; } pentry = new_pentry = NULL; if(str_entry_name == NULL || strlen(str_entry_name) == 0 || str_new_entry_name == NULL || strlen(str_new_entry_name) == 0 || FSAL_IS_ERROR(FSAL_str2name(str_entry_name, FSAL_MAX_NAME_LEN, &entry_name)) || FSAL_IS_ERROR(FSAL_str2name(str_new_entry_name, FSAL_MAX_NAME_LEN, &new_entry_name))) { cache_status = CACHE_INODE_INVALID_ARGUMENT; } else { /* * Lookup file to see if new entry exists * */ should_not_exists = cache_inode_lookup(new_parent_pentry, &new_entry_name, &tst_attr, ht, pclient, pcontext, &cache_status); if(cache_status == CACHE_INODE_NOT_FOUND) { /* We need to lookup over the old entry also */ should_exists = cache_inode_lookup(parent_pentry, &entry_name, &tst_attr, ht, pclient, pcontext, &cache_status); /* Rename entry */ if(cache_status == CACHE_INODE_SUCCESS) cache_inode_rename(parent_pentry, &entry_name, new_parent_pentry, &new_entry_name, &attr, &new_attr, ht, pclient, pcontext, &cache_status); if(cache_status == CACHE_INODE_SUCCESS) { switch (preq->rq_vers) { case NFS_V2: pres->res_stat2 = NFS_OK; break; case NFS_V3: /* * Build Weak Cache Coherency * data */ nfs_SetWccData(pcontext, pexport, parent_pentry, ppre_attr, &attr, &(pres->res_rename3.RENAME3res_u.resok.fromdir_wcc)); nfs_SetWccData(pcontext, pexport, new_parent_pentry, pnew_pre_attr, &new_attr, &(pres->res_rename3.RENAME3res_u.resok.todir_wcc)); pres->res_rename3.status = NFS3_OK; break; } return NFS_REQ_OK; } } else { /* If name are the same (basically renaming a/file1 to a/file1, this is a non-erroneous situation to be managed */ if(new_parent_pentry == parent_pentry) { if(!FSAL_namecmp(&new_entry_name, &entry_name)) { /* trying to rename a file to himself, this is allowed */ cache_status = CACHE_INODE_SUCCESS; switch (preq->rq_vers) { case NFS_V2: pres->res_stat2 = NFS_OK; break; case NFS_V3: /* * Build Weak Cache Coherency * data */ nfs_SetWccData(pcontext, pexport, parent_pentry, ppre_attr, &attr, &(pres->res_rename3.RENAME3res_u.resok.fromdir_wcc)); nfs_SetWccData(pcontext, pexport, parent_pentry, ppre_attr, &attr, &(pres->res_rename3.RENAME3res_u.resok.todir_wcc)); pres->res_rename3.status = NFS3_OK; break; } return NFS_REQ_OK; } } /* New entry already exists. In this case (see RFC), entry should be compatible: Both are non-directories or * both are directories and 'todir' is empty. If compatible, old 'todir' entry is scratched, if not returns EEXISTS */ if(should_not_exists != NULL) { /* We need to lookup over the old entry also */ if((should_exists = cache_inode_lookup(parent_pentry, &entry_name, &tst_attr, ht, pclient, pcontext, &cache_status)) != NULL) { if(cache_inode_type_are_rename_compatible (should_exists, should_not_exists)) { /* Remove the old entry before renaming it */ if(cache_inode_remove(new_parent_pentry, &new_entry_name, &tst_attr, ht, pclient, pcontext, &cache_status) == CACHE_INODE_SUCCESS) { if(cache_inode_rename(parent_pentry, &entry_name, new_parent_pentry, &new_entry_name, &attr, &new_attr, ht, pclient, pcontext, &cache_status) == CACHE_INODE_SUCCESS) { /* trying to rename a file to himself, this is allowed */ switch (preq->rq_vers) { case NFS_V2: pres->res_stat2 = NFS_OK; break; case NFS_V3: /* * Build Weak Cache Coherency * data */ nfs_SetWccData(pcontext, pexport, parent_pentry, ppre_attr, &attr, &(pres->res_rename3.RENAME3res_u.resok. fromdir_wcc)); nfs_SetWccData(pcontext, pexport, parent_pentry, ppre_attr, &attr, &(pres->res_rename3.RENAME3res_u.resok. todir_wcc)); pres->res_rename3.status = NFS3_OK; break; } return NFS_REQ_OK; } } } } /* if( cache_inode_type_are_rename_compatible( should_exists, should_not_exists ) ) */ } /* if( ( should_exists = cache_inode_lookup( parent_pentry, .... */ /* If this point is reached, then destination object already exists with that name in the directory and types are not compatible, we should return that the file exists */ cache_status = CACHE_INODE_ENTRY_EXISTS; } /* if( should_not_exists != NULL ) */ } /* If we are here, there was an error */ if(nfs_RetryableError(cache_status)) { return NFS_REQ_DROP; } nfs_SetFailedStatus(pcontext, pexport, preq->rq_vers, cache_status, &pres->res_stat2, &pres->res_rename3.status, NULL, NULL, parent_pentry, ppre_attr, &(pres->res_rename3.RENAME3res_u.resfail.fromdir_wcc), new_parent_pentry, pnew_pre_attr, &(pres->res_rename3.RENAME3res_u.resfail.todir_wcc)); return NFS_REQ_OK; } /* nfs_Rename */
int nfs_Mkdir(nfs_arg_t *parg, exportlist_t *pexport, fsal_op_context_t *pcontext, nfs_worker_data_t *pworker, struct svc_req *preq, nfs_res_t *pres) { char *str_dir_name = NULL; fsal_accessmode_t mode = 0; cache_entry_t *dir_pentry = NULL; cache_entry_t *parent_pentry = NULL; fsal_attrib_list_t parent_attr; fsal_attrib_list_t attr; fsal_attrib_list_t *ppre_attr; fsal_attrib_list_t attr_parent_after; cache_inode_file_type_t parent_filetype; fsal_handle_t *pfsal_handle; fsal_name_t dir_name; cache_inode_status_t cache_status = CACHE_INODE_SUCCESS; cache_inode_status_t cache_status_lookup; cache_inode_create_arg_t create_arg; int rc = NFS_REQ_OK; #ifdef _USE_QUOTA fsal_status_t fsal_status ; #endif memset(&create_arg, 0, sizeof(create_arg)); if(isDebug(COMPONENT_NFSPROTO)) { char str[LEN_FH_STR]; switch (preq->rq_vers) { case NFS_V2: str_dir_name = parg->arg_mkdir2.where.name; break; case NFS_V3: str_dir_name = parg->arg_mkdir3.where.name; break; } nfs_FhandleToStr(preq->rq_vers, &(parg->arg_mkdir2.where.dir), &(parg->arg_mkdir3.where.dir), NULL, str); LogDebug(COMPONENT_NFSPROTO, "REQUEST PROCESSING: Calling nfs_Mkdir handle: %s name: %s", str, str_dir_name); } if(preq->rq_vers == NFS_V3) { /* to avoid setting it on each error case */ pres->res_mkdir3.MKDIR3res_u.resfail.dir_wcc.before.attributes_follow = FALSE; pres->res_mkdir3.MKDIR3res_u.resfail.dir_wcc.after.attributes_follow = FALSE; ppre_attr = NULL; } if((parent_pentry = nfs_FhandleToCache(preq->rq_vers, &(parg->arg_mkdir2.where.dir), &(parg->arg_mkdir3.where.dir), NULL, &(pres->res_dirop2.status), &(pres->res_mkdir3.status), NULL, &parent_attr, pcontext, &rc)) == NULL) { /* Stale NFS FH ? */ goto out; } /* get directory attributes before action (for V3 reply) */ ppre_attr = &parent_attr; /* Extract the filetype */ parent_filetype = cache_inode_fsal_type_convert(parent_attr.type); /* * Sanity checks: */ if(parent_filetype != DIRECTORY) { switch (preq->rq_vers) { case NFS_V2: pres->res_dirop2.status = NFSERR_NOTDIR; break; case NFS_V3: pres->res_mkdir3.status = NFS3ERR_NOTDIR; break; } rc = NFS_REQ_OK; goto out; } #ifdef _USE_QUOTA /* if quota support is active, then we should check is the FSAL allows inode creation or not */ fsal_status = FSAL_check_quota( pexport->fullpath, FSAL_QUOTA_INODES, FSAL_OP_CONTEXT_TO_UID( pcontext ) ) ; if( FSAL_IS_ERROR( fsal_status ) ) { switch (preq->rq_vers) { case NFS_V2: pres->res_dirop2.status = NFSERR_DQUOT; break; case NFS_V3: pres->res_mkdir3.status = NFS3ERR_DQUOT; break; } rc = NFS_REQ_OK ; goto out; } #endif /* _USE_QUOTA */ switch (preq->rq_vers) { case NFS_V2: str_dir_name = parg->arg_mkdir2.where.name; if(parg->arg_mkdir2.attributes.mode != (unsigned int)-1) { mode = (fsal_accessmode_t) parg->arg_mkdir2.attributes.mode; } else { mode = (fsal_accessmode_t) 0; } break; case NFS_V3: str_dir_name = parg->arg_mkdir3.where.name; if(parg->arg_mkdir3.attributes.mode.set_it == TRUE) mode = (fsal_accessmode_t) parg->arg_mkdir3.attributes.mode.set_mode3_u.mode; else mode = (fsal_accessmode_t) 0; break; } //if(str_dir_name == NULL || strlen(str_dir_name) == 0) if(str_dir_name == NULL || *str_dir_name == '\0' ) { if(preq->rq_vers == NFS_V2) pres->res_dirop2.status = NFSERR_IO; if(preq->rq_vers == NFS_V3) pres->res_mkdir3.status = NFS3ERR_INVAL; } else { /* Make the directory */ if((cache_status = cache_inode_error_convert(FSAL_str2name(str_dir_name, 0, &dir_name))) == CACHE_INODE_SUCCESS) { /* * Lookup file to see if it exists. If so, use it. Otherwise * create a new one. */ dir_pentry = cache_inode_lookup(parent_pentry, &dir_name, &attr, pcontext, &cache_status_lookup); if(cache_status_lookup == CACHE_INODE_NOT_FOUND) { /* The create_arg structure contains the information "newly created directory" * to be passed to cache_inode_new_entry from cache_inode_create */ create_arg.newly_created_dir = TRUE; /* Create the directory */ if((dir_pentry = cache_inode_create(parent_pentry, &dir_name, DIRECTORY, mode, &create_arg, &attr, pcontext, &cache_status)) != NULL) { /* * Get the FSAL handle for this entry */ pfsal_handle = &dir_pentry->handle; if(preq->rq_vers == NFS_V2) { DIROP2resok *d2ok = &pres->res_dirop2.DIROP2res_u.diropok; /* Build file handle */ if(!nfs2_FSALToFhandle(&d2ok->file, pfsal_handle, pexport)) pres->res_dirop2.status = NFSERR_IO; else { /* * Build entry * attributes */ if(nfs2_FSALattr_To_Fattr(pexport, &attr, &d2ok->attributes) == 0) pres->res_dirop2.status = NFSERR_IO; else pres->res_dirop2.status = NFS_OK; } } else { MKDIR3resok *d3ok = &pres->res_mkdir3.MKDIR3res_u.resok; /* Build file handle */ pres->res_mkdir3.status = nfs3_AllocateFH(&d3ok->obj.post_op_fh3_u.handle); if(pres->res_mkdir3.status != NFS3_OK) { rc = NFS_REQ_OK; goto out; } if(nfs3_FSALToFhandle(&d3ok->obj.post_op_fh3_u.handle, pfsal_handle, pexport) == 0) { gsh_free(d3ok->obj.post_op_fh3_u.handle.data.data_val); pres->res_mkdir3.status = NFS3ERR_INVAL; rc = NFS_REQ_OK; goto out; } /* Set Post Op Fh3 structure */ d3ok->obj.handle_follows = TRUE; /* * Build entry attributes */ nfs_SetPostOpAttr(pexport, &attr, &d3ok->obj_attributes); /* Get the attributes of the parent after the operation */ if(cache_inode_getattr(parent_pentry, &attr_parent_after, pcontext, &cache_status) != CACHE_INODE_SUCCESS) { pres->res_mkdir3.status = nfs3_Errno(cache_status); rc = NFS_REQ_OK; goto out; } /* * Build Weak Cache Coherency data */ nfs_SetWccData(pexport, ppre_attr, &attr_parent_after, &d3ok->dir_wcc); pres->res_mkdir3.status = NFS3_OK; } rc = NFS_REQ_OK; goto out; } } /* If( cache_status_lookup == CACHE_INODE_NOT_FOUND ) */ else { switch (preq->rq_vers) { case NFS_V2: if(cache_status_lookup == CACHE_INODE_SUCCESS) pres->res_dirop2.status = NFSERR_EXIST; else pres->res_dirop2.status = NFSERR_IO; break; case NFS_V3: if(cache_status_lookup == CACHE_INODE_SUCCESS) pres->res_mkdir3.status = NFS3ERR_EXIST; else pres->res_mkdir3.status = NFS3ERR_INVAL; nfs_SetWccData(pexport, ppre_attr, NULL, &(pres->res_mkdir3.MKDIR3res_u.resfail.dir_wcc)); break; } rc = NFS_REQ_OK; goto out; } } } /* If we are here, there was an error */ rc = nfs_SetFailedStatus(pexport, preq->rq_vers, cache_status, &pres->res_dirop2.status, &pres->res_mkdir3.status, NULL, ppre_attr, &(pres->res_mkdir3.MKDIR3res_u.resfail.dir_wcc), NULL, NULL); out: /* return references */ if (dir_pentry) cache_inode_put(dir_pentry); if (parent_pentry) cache_inode_put(parent_pentry); return (rc); }
int nfs_Readlink(nfs_arg_t * parg, exportlist_t * pexport, fsal_op_context_t * pcontext, cache_inode_client_t * pclient, hash_table_t * ht, struct svc_req *preq, nfs_res_t * pres) { static char __attribute__ ((__unused__)) funcName[] = "nfs_Readlink"; cache_entry_t *pentry = NULL; fsal_attrib_list_t attr; cache_inode_file_type_t filetype; cache_inode_status_t cache_status; int rc; fsal_path_t symlink_data; char *ptr = NULL; if(preq->rq_vers == NFS_V3) { /* to avoid setting it on each error case */ pres->res_readlink3.READLINK3res_u.resfail.symlink_attributes.attributes_follow = FALSE; } /* Convert file handle into a vnode */ if((pentry = nfs_FhandleToCache(preq->rq_vers, &(parg->arg_readlink2), &(parg->arg_readlink3.symlink), NULL, &(pres->res_readlink2.status), &(pres->res_readlink3.status), NULL, &attr, pcontext, pclient, ht, &rc)) == NULL) { /* Stale NFS FH ? */ return rc; } /* Extract the filetype */ filetype = cache_inode_fsal_type_convert(attr.type); /* Sanity Check: the pentry must be a link */ if(filetype != SYMBOLIC_LINK) { switch (preq->rq_vers) { case NFS_V2: pres->res_readlink2.status = NFSERR_IO; break; case NFS_V3: pres->res_readlink3.status = NFS3ERR_INVAL; } /* switch */ return NFS_REQ_OK; } /* if */ /* Perform readlink on the pentry */ if(cache_inode_readlink(pentry, &symlink_data, ht, pclient, pcontext, &cache_status) == CACHE_INODE_SUCCESS) { if((ptr = Mem_Alloc(FSAL_MAX_NAME_LEN)) == NULL) { switch (preq->rq_vers) { case NFS_V2: pres->res_readlink2.status = NFSERR_NXIO; break; case NFS_V3: pres->res_readlink3.status = NFS3ERR_IO; } /* switch */ return NFS_REQ_OK; } strcpy(ptr, symlink_data.path); /* Reply to the client (think about Mem_Free data after use ) */ switch (preq->rq_vers) { case NFS_V2: pres->res_readlink2.READLINK2res_u.data = ptr; pres->res_readlink2.status = NFS_OK; break; case NFS_V3: pres->res_readlink3.READLINK3res_u.resok.data = ptr; nfs_SetPostOpAttr(pcontext, pexport, pentry, &attr, &(pres->res_readlink3.READLINK3res_u.resok. symlink_attributes)); pres->res_readlink3.status = NFS3_OK; break; } return NFS_REQ_OK; } /* If we are here, there was an error */ if(nfs_RetryableError(cache_status)) { return NFS_REQ_DROP; } nfs_SetFailedStatus(pcontext, pexport, preq->rq_vers, cache_status, &pres->res_readlink2.status, &pres->res_readlink3.status, pentry, &(pres->res_readlink3.READLINK3res_u.resfail.symlink_attributes), NULL, NULL, NULL, NULL, NULL, NULL); return NFS_REQ_OK; } /* nfs_Readlink */
/** * * cache_inode_get: Gets an entry by using its fsdata as a key and caches it if needed. * * Gets an entry by using its fsdata as a key and caches it if needed. * ASSUMPTION: DIR_CONT entries are always garbabbaged before their related DIR_BEGINNG * * @param fsdata [IN] file system data * @param pattr [OUT] pointer to the attributes for the result. * @param ht [IN] hash table used for the cache, unused in this call. * @param pclient [INOUT] ressource allocated by the client for the nfs management. * @param pcontext [IN] FSAL credentials * @param pstatus [OUT] returned status. * * @return the pointer to the entry is successfull, NULL otherwise. * */ cache_entry_t *cache_inode_get(cache_inode_fsal_data_t * pfsdata, fsal_attrib_list_t * pattr, hash_table_t * ht, cache_inode_client_t * pclient, fsal_op_context_t * pcontext, cache_inode_status_t * pstatus) { hash_buffer_t key, value; cache_entry_t *pentry = NULL; fsal_status_t fsal_status; cache_inode_create_arg_t create_arg; cache_inode_file_type_t type; int hrc = 0; fsal_attrib_list_t fsal_attributes; cache_inode_fsal_data_t *ppoolfsdata = NULL; /* Set the return default to CACHE_INODE_SUCCESS */ *pstatus = CACHE_INODE_SUCCESS; /* stats */ pclient->stat.nb_call_total += 1; pclient->stat.func_stats.nb_call[CACHE_INODE_GET] += 1; /* Turn the input to a hash key */ if(cache_inode_fsaldata_2_key(&key, pfsdata, pclient)) { *pstatus = CACHE_INODE_UNAPPROPRIATED_KEY; /* stats */ pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_GET] += 1; ppoolfsdata = (cache_inode_fsal_data_t *) key.pdata; RELEASE_PREALLOC(ppoolfsdata, pclient->pool_key, next_alloc); return NULL; } switch (hrc = HashTable_Get(ht, &key, &value)) { case HASHTABLE_SUCCESS: /* Entry exists in the cache and was found */ pentry = (cache_entry_t *) value.pdata; /* return attributes additionally */ cache_inode_get_attributes(pentry, pattr); break; case HASHTABLE_ERROR_NO_SUCH_KEY: /* Cache miss, allocate a new entry */ /* If we ask for a dir cont (in this case pfsdata.cookie != FSAL_DIR_BEGINNING, we have * a client who performs a readdir in the middle of a directory, when the direcctories * have been garbbage. we must search for the DIR_BEGIN related to this DIR_CONT */ if(pfsdata->cookie != DIR_START) { /* added for sanity check */ LogDebug(COMPONENT_CACHE_INODE_GC, "=======> Pb cache_inode_get: line %u pfsdata->cookie != DIR_START (=%u) on object whose type is %u", __LINE__, pfsdata->cookie, cache_inode_fsal_type_convert(fsal_attributes.type)); pfsdata->cookie = DIR_START; /* Free this key */ cache_inode_release_fsaldata_key(&key, pclient); /* redo the call */ return cache_inode_get(pfsdata, pattr, ht, pclient, pcontext, pstatus); } /* First, call FSAL to know what the object is */ fsal_attributes.asked_attributes = pclient->attrmask; fsal_status = FSAL_getattrs(&pfsdata->handle, pcontext, &fsal_attributes); if(FSAL_IS_ERROR(fsal_status)) { *pstatus = cache_inode_error_convert(fsal_status); LogDebug(COMPONENT_CACHE_INODE_GC, "cache_inode_get: line %u cache_inode_status=%u fsal_status=%u,%u ", __LINE__, *pstatus, fsal_status.major, fsal_status.minor); if(fsal_status.major == ERR_FSAL_STALE) { char handle_str[256]; snprintHandle(handle_str, 256, &pfsdata->handle); LogEvent(COMPONENT_CACHE_INODE_GC,"cache_inode_get: Stale FSAL File Handle %s", handle_str); *pstatus = CACHE_INODE_FSAL_ESTALE; } /* stats */ pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_GET] += 1; /* Free this key */ cache_inode_release_fsaldata_key(&key, pclient); return NULL; } /* The type has to be set in the attributes */ if(!FSAL_TEST_MASK(fsal_attributes.supported_attributes, FSAL_ATTR_TYPE)) { *pstatus = CACHE_INODE_FSAL_ERROR; /* stats */ pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_GET] += 1; /* Free this key */ cache_inode_release_fsaldata_key(&key, pclient); return NULL; } /* Get the cache_inode file type */ type = cache_inode_fsal_type_convert(fsal_attributes.type); if(type == SYMBOLIC_LINK) { FSAL_CLEAR_MASK(fsal_attributes.asked_attributes); FSAL_SET_MASK(fsal_attributes.asked_attributes, pclient->attrmask); fsal_status = FSAL_readlink(&pfsdata->handle, pcontext, &create_arg.link_content, &fsal_attributes); if(FSAL_IS_ERROR(fsal_status)) { *pstatus = cache_inode_error_convert(fsal_status); /* stats */ pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_GET] += 1; /* Free this key */ cache_inode_release_fsaldata_key(&key, pclient); if(fsal_status.major == ERR_FSAL_STALE) { cache_inode_status_t kill_status; LogDebug(COMPONENT_CACHE_INODE_GC, "cache_inode_get: Stale FSAL File Handle detected for pentry = %p", pentry); if(cache_inode_kill_entry(pentry, ht, pclient, &kill_status) != CACHE_INODE_SUCCESS) LogCrit(COMPONENT_CACHE_INODE_GC,"cache_inode_get: Could not kill entry %p, status = %u", pentry, kill_status); *pstatus = CACHE_INODE_FSAL_ESTALE; } return NULL; } } /* Add the entry to the cache */ if((pentry = cache_inode_new_entry(pfsdata, &fsal_attributes, type, &create_arg, NULL, /* never used to add a new DIR_CONTINUE within the scope of this function */ ht, pclient, pcontext, FALSE, /* This is a population, not a creation */ pstatus)) == NULL) { /* stats */ pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_GET] += 1; /* Free this key */ cache_inode_release_fsaldata_key(&key, pclient); return NULL; } /* Set the returned attributes */ *pattr = fsal_attributes; /* Now, exit the switch/case and returns */ break; default: /* This should not happened */ *pstatus = CACHE_INODE_INVALID_ARGUMENT; /* stats */ pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_GET] += 1; /* Free this key */ cache_inode_release_fsaldata_key(&key, pclient); return NULL; break; } *pstatus = CACHE_INODE_SUCCESS; /* valid the found entry, if this is not feasable, returns nothing to the client */ P_w(&pentry->lock); if((*pstatus = cache_inode_valid(pentry, CACHE_INODE_OP_GET, pclient)) != CACHE_INODE_SUCCESS) { V_w(&pentry->lock); pentry = NULL; } V_w(&pentry->lock); /* stats */ pclient->stat.func_stats.nb_success[CACHE_INODE_GET] += 1; /* Free this key */ cache_inode_release_fsaldata_key(&key, pclient); return pentry; } /* cache_inode_get */
int nfs3_Readdirplus(nfs_arg_t * parg, exportlist_t * pexport, fsal_op_context_t * pcontext, cache_inode_client_t * pclient, hash_table_t * ht, struct svc_req *preq, nfs_res_t * pres) { static char __attribute__ ((__unused__)) funcName[] = "nfs3_Readdirplus"; typedef char entry_name_array_item_t[FSAL_MAX_NAME_LEN]; typedef char fh3_buffer_item_t[NFS3_FHSIZE]; unsigned int delta = 0; cache_entry_t *dir_pentry = NULL; cache_entry_t *pentry_dot_dot = NULL; unsigned long dircount; unsigned long maxcount; fsal_attrib_list_t dir_attr; fsal_attrib_list_t entry_attr; unsigned int begin_cookie; unsigned int end_cookie; unsigned int cache_inode_cookie; cache_inode_dir_entry_t *dirent_array = NULL; unsigned int *cookie_array = NULL; cookieverf3 cookie_verifier; int rc; unsigned int i = 0; unsigned int num_entries; unsigned long space_used; unsigned long estimated_num_entries; unsigned long asked_num_entries; cache_inode_file_type_t dir_filetype; cache_inode_endofdir_t eod_met = UNASSIGNED_EOD; cache_inode_status_t cache_status; cache_inode_status_t cache_status_gethandle; fsal_handle_t *pfsal_handle = NULL; entry_name_array_item_t *entry_name_array = NULL; fh3_buffer_item_t *fh3_array = NULL; /* to avoid setting it on each error case */ pres->res_readdir3.READDIR3res_u.resfail.dir_attributes.attributes_follow = FALSE; dircount = parg->arg_readdirplus3.dircount; maxcount = parg->arg_readdirplus3.maxcount; begin_cookie = (unsigned int)parg->arg_readdirplus3.cookie; space_used = sizeof(READDIRPLUS3resok); estimated_num_entries = dircount / sizeof(entryplus3); LogFullDebug(COMPONENT_NFS_READDIR, "---> nfs3_Readdirplus: dircount=%d maxcount=%d begin_cookie=%d space_used=%d estimated_num_entries=%d\n", dircount, maxcount, begin_cookie, space_used, estimated_num_entries); /* Is this a xattr FH ? */ if(nfs3_Is_Fh_Xattr(&(parg->arg_readdirplus3.dir))) return nfs3_Readdirplus_Xattr(parg, pexport, pcontext, pclient, ht, preq, pres); /* Convert file handle into a vnode */ /* BUGAZOMEU : rajouter acces direct au DIR_CONTINUE */ if((dir_pentry = nfs_FhandleToCache(preq->rq_vers, NULL, &(parg->arg_readdirplus3.dir), NULL, NULL, &(pres->res_readdirplus3.status), NULL, &dir_attr, pcontext, pclient, ht, &rc)) == NULL) { /* return NFS_REQ_DROP ; */ return rc; } /* Extract the filetype */ dir_filetype = cache_inode_fsal_type_convert(dir_attr.type); /* Sanity checks -- must be a directory */ if((dir_filetype != DIR_BEGINNING) && (dir_filetype != DIR_CONTINUE)) { pres->res_readdirplus3.status = NFS3ERR_NOTDIR; return NFS_REQ_OK; } /* switch */ memset(cookie_verifier, 0, sizeof(cookieverf3)); /* * If cookie verifier is used, then an non-trivial value is * returned to the client This value is the mtime of * the directory. If verifier is unused (as in many NFS * Servers) then only a set of zeros is returned (trivial * value) */ if(pexport->UseCookieVerifier) memcpy(cookie_verifier, &(dir_attr.mtime), sizeof(dir_attr.mtime)); /* * nothing to do if != 0 because the area is already full of * zero */ if(pexport->UseCookieVerifier && (begin_cookie != 0)) { /* * Not the first call, so we have to check the cookie * verifier */ if(memcmp(cookie_verifier, parg->arg_readdirplus3.cookieverf, NFS3_COOKIEVERFSIZE) != 0) { pres->res_readdirplus3.status = NFS3ERR_BAD_COOKIE; return NFS_REQ_OK; } } #ifdef _DEBUG_MEMLEAKS /* For debugging memory leaks */ BuddySetDebugLabel("cache_inode_dir_entry_t in nfs3_Readdirplus"); #endif if((dirent_array = (cache_inode_dir_entry_t *) Mem_Alloc(estimated_num_entries * sizeof(cache_inode_dir_entry_t))) == NULL) { pres->res_readdirplus3.status = NFS3ERR_IO; return NFS_REQ_DROP; } #ifdef _DEBUG_MEMLEAKS /* For debugging memory leaks */ BuddySetDebugLabel("cookie array in nfs3_Readdirplus"); #endif if((cookie_array = (unsigned int *)Mem_Alloc(estimated_num_entries * sizeof(unsigned int))) == NULL) { Mem_Free((char *)dirent_array); pres->res_readdirplus3.status = NFS3ERR_IO; return NFS_REQ_DROP; } pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.entries = NULL; pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.eof = FALSE; /* How many entries will we retry from cache_inode ? */ if(begin_cookie > 1) { asked_num_entries = estimated_num_entries; cache_inode_cookie = begin_cookie - 2; } else { asked_num_entries = ((estimated_num_entries > 2) ? estimated_num_entries - 2 : 0); /* Keep space for '.' and '..' */ cache_inode_cookie = 0; } /* A definition that will be very useful to avoid very long names for variables */ #define RES_READDIRPLUS_REPLY pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply /* Call readdir */ if(cache_inode_readdir(dir_pentry, cache_inode_cookie, asked_num_entries, &num_entries, &end_cookie, &eod_met, dirent_array, cookie_array, ht, pclient, pcontext, &cache_status) == CACHE_INODE_SUCCESS) { LogFullDebug(COMPONENT_NFS_READDIR, "-- Readdirplus3 -> Call to cache_inode_readdir( cookie=%d, asked=%d ) -> num_entries = %d\n", cache_inode_cookie, asked_num_entries, num_entries); if(eod_met == END_OF_DIR) { LogFullDebug(COMPONENT_NFS_READDIR, "+++++++++++++++++++++++++++++++++++++++++> EOD MET \n"); } /* If nothing was found, return nothing, but if cookie=0, we should return . and .. */ if((num_entries == 0) && (asked_num_entries != 0) && (begin_cookie > 1)) { pres->res_readdirplus3.status = NFS3_OK; pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.entries = NULL; pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.eof = TRUE; nfs_SetPostOpAttr(pcontext, pexport, dir_pentry, NULL, &(pres->res_readdirplus3.READDIRPLUS3res_u.resok. dir_attributes)); memcpy(pres->res_readdirplus3.READDIRPLUS3res_u.resok.cookieverf, cookie_verifier, sizeof(cookieverf3)); } else { #ifdef _DEBUG_MEMLEAKS /* For debugging memory leaks */ BuddySetDebugLabel("entry_name_array in nfs3_Readdirplus"); #endif /* Allocation of the structure for reply */ entry_name_array = (entry_name_array_item_t *) Mem_Alloc(estimated_num_entries * (FSAL_MAX_NAME_LEN + 1)); if(entry_name_array == NULL) { Mem_Free((char *)dirent_array); Mem_Free((char *)cookie_array); return NFS_REQ_DROP; } #ifdef _DEBUG_MEMLEAKS /* For debugging memory leaks */ BuddySetDebugLabel("READDIRPLUS3res_u.resok.reply.entries"); #endif pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.entries = (entryplus3 *) Mem_Alloc(estimated_num_entries * sizeof(entryplus3)); if(pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.entries == NULL) { Mem_Free((char *)dirent_array); Mem_Free((char *)cookie_array); Mem_Free((char *)entry_name_array); return NFS_REQ_DROP; } /* Allocation of the file handles */ #ifdef _DEBUG_MEMLEAKS /* For debugging memory leaks */ BuddySetDebugLabel("Filehandle V3 in nfs3_Readdirplus"); #endif fh3_array = (fh3_buffer_item_t *) Mem_Alloc(estimated_num_entries * NFS3_FHSIZE); #ifdef _DEBUG_MEMLEAKS /* For debugging memory leaks */ BuddySetDebugLabel("N/A"); #endif if(fh3_array == NULL) { Mem_Free((char *)dirent_array); Mem_Free((char *)cookie_array); Mem_Free((char *)entry_name_array); return NFS_REQ_DROP; } delta = 0; /* manage . and .. */ if(begin_cookie == 0) { /* Fill in '.' */ if(estimated_num_entries > 0) { if((pfsal_handle = cache_inode_get_fsal_handle(dir_pentry, &cache_status_gethandle)) == NULL) { Mem_Free((char *)dirent_array); Mem_Free((char *)cookie_array); Mem_Free((char *)entry_name_array); Mem_Free((char *)fh3_array); pres->res_readdirplus3.status = nfs3_Errno(cache_status_gethandle); return NFS_REQ_OK; } FSAL_DigestHandle(FSAL_GET_EXP_CTX(pcontext), FSAL_DIGEST_FILEID3, pfsal_handle, (caddr_t) & (RES_READDIRPLUS_REPLY.entries[0]. fileid)); RES_READDIRPLUS_REPLY.entries[0].name = entry_name_array[0]; strcpy(RES_READDIRPLUS_REPLY.entries[0].name, "."); RES_READDIRPLUS_REPLY.entries[0].cookie = 1; pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.entries[0]. name_handle.post_op_fh3_u.handle.data.data_val = (char *)fh3_array[0]; if(nfs3_FSALToFhandle (&pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.entries[0]. name_handle.post_op_fh3_u.handle, pfsal_handle, pexport) == 0) { Mem_Free((char *)dirent_array); Mem_Free((char *)cookie_array); Mem_Free((char *)entry_name_array); Mem_Free((char *)fh3_array); pres->res_readdirplus3.status = NFS3ERR_BADHANDLE; return NFS_REQ_OK; } RES_READDIRPLUS_REPLY.entries[0].name_attributes.attributes_follow = FALSE; RES_READDIRPLUS_REPLY.entries[0].name_handle.handle_follows = FALSE; cache_inode_get_attributes(dir_pentry, &entry_attr); /* Set PostPoFh3 structure */ pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.entries[0]. name_handle.handle_follows = TRUE; pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.entries[0]. name_handle.post_op_fh3_u.handle.data.data_len = sizeof(file_handle_v3_t); nfs_SetPostOpAttr(pcontext, pexport, dir_pentry, &entry_attr, &(pres->res_readdirplus3.READDIRPLUS3res_u.resok. reply.entries[0].name_attributes)); delta += 1; } } /* Fill in '..' */ if(begin_cookie <= 1) { if(estimated_num_entries > delta) { if((pentry_dot_dot = cache_inode_lookupp(dir_pentry, ht, pclient, pcontext, &cache_status_gethandle)) == NULL) { Mem_Free((char *)dirent_array); Mem_Free((char *)cookie_array); Mem_Free((char *)entry_name_array); Mem_Free((char *)fh3_array); pres->res_readdirplus3.status = nfs3_Errno(cache_status_gethandle); return NFS_REQ_OK; } if((pfsal_handle = cache_inode_get_fsal_handle(pentry_dot_dot, &cache_status_gethandle)) == NULL) { Mem_Free((char *)dirent_array); Mem_Free((char *)cookie_array); Mem_Free((char *)entry_name_array); Mem_Free((char *)fh3_array); pres->res_readdirplus3.status = nfs3_Errno(cache_status_gethandle); return NFS_REQ_OK; } FSAL_DigestHandle(FSAL_GET_EXP_CTX(pcontext), FSAL_DIGEST_FILEID3, pfsal_handle, (caddr_t) & (RES_READDIRPLUS_REPLY.entries[delta]. fileid)); RES_READDIRPLUS_REPLY.entries[delta].name = entry_name_array[delta]; strcpy(RES_READDIRPLUS_REPLY.entries[delta].name, ".."); /* Getting a file handle */ pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.entries[delta]. name_handle.post_op_fh3_u.handle.data.data_val = (char *)fh3_array[delta]; if(nfs3_FSALToFhandle (&pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.entries[0]. name_handle.post_op_fh3_u.handle, pfsal_handle, pexport) == 0) { Mem_Free((char *)dirent_array); Mem_Free((char *)cookie_array); Mem_Free((char *)entry_name_array); Mem_Free((char *)fh3_array); pres->res_readdirplus3.status = NFS3ERR_BADHANDLE; return NFS_REQ_OK; } RES_READDIRPLUS_REPLY.entries[delta].cookie = 2; RES_READDIRPLUS_REPLY.entries[delta].name_attributes.attributes_follow = FALSE; RES_READDIRPLUS_REPLY.entries[delta].name_handle.handle_follows = FALSE; cache_inode_get_attributes(pentry_dot_dot, &entry_attr); /* Set PostPoFh3 structure */ pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.entries[delta]. name_handle.handle_follows = TRUE; pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.entries[delta]. name_handle.post_op_fh3_u.handle.data.data_len = sizeof(file_handle_v3_t); nfs_SetPostOpAttr(pcontext, pexport, pentry_dot_dot, &entry_attr, &(pres->res_readdirplus3.READDIRPLUS3res_u.resok. reply.entries[delta].name_attributes)); } RES_READDIRPLUS_REPLY.entries[0].nextentry = &(RES_READDIRPLUS_REPLY.entries[delta]); if(num_entries > delta + 1) /* not 0 ??? */ RES_READDIRPLUS_REPLY.entries[delta].nextentry = &(RES_READDIRPLUS_REPLY.entries[delta + 1]); else RES_READDIRPLUS_REPLY.entries[delta].nextentry = NULL; delta += 1; } /* if( begin_cookie == 0 ) */ for(i = delta; i < num_entries + delta; i++) { unsigned long needed; /* dircount is the size without the FH and attributes overhead, so entry3 is used intead of entryplus3 */ needed = sizeof(entry3) + ((strlen(dirent_array[i - delta].name.name) + 3) & ~3); /* LogFullDebug(COMPONENT_NFS_READDIR, "==============> i=%d sizeof(entryplus3)=%d needed=%d space_used=%d maxcount=%d num_entries=%d asked_num_entries=%d\n", i, sizeof( entryplus3 ), needed, space_used, maxcount, num_entries, asked_num_entries ) ; */ if((space_used += needed) > maxcount) { if(i == delta) { /* * Not enough room to make even a single reply */ Mem_Free((char *)dirent_array); Mem_Free((char *)cookie_array); Mem_Free((char *)entry_name_array); Mem_Free((char *)fh3_array); pres->res_readdirplus3.status = NFS3ERR_TOOSMALL; return NFS_REQ_OK; } break; /* Make post traitement */ } /* * Get information specific to this entry */ if((pfsal_handle = cache_inode_get_fsal_handle(dirent_array[i - delta].pentry, &cache_status_gethandle)) == NULL) { Mem_Free((char *)dirent_array); Mem_Free((char *)cookie_array); Mem_Free((char *)entry_name_array); Mem_Free((char *)fh3_array); pres->res_readdirplus3.status = nfs3_Errno(cache_status_gethandle); return NFS_REQ_OK; } /* Now fill in the replyed entryplus3 list */ FSAL_DigestHandle(FSAL_GET_EXP_CTX(pcontext), FSAL_DIGEST_FILEID3, pfsal_handle, (caddr_t) & (RES_READDIRPLUS_REPLY.entries[i].fileid)); FSAL_name2str(&dirent_array[i - delta].name, entry_name_array[i], FSAL_MAX_NAME_LEN); RES_READDIRPLUS_REPLY.entries[i].name = entry_name_array[i]; if(i != num_entries + delta - 1) RES_READDIRPLUS_REPLY.entries[i].cookie = cookie_array[i + 1 - delta] + 2; else RES_READDIRPLUS_REPLY.entries[i].cookie = end_cookie + 2; RES_READDIRPLUS_REPLY.entries[i].name_attributes.attributes_follow = FALSE; RES_READDIRPLUS_REPLY.entries[i].name_handle.handle_follows = FALSE; cache_inode_get_attributes(dirent_array[i - delta].pentry, &entry_attr); pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.entries[i].name_handle. post_op_fh3_u.handle.data.data_val = (char *)fh3_array[i]; /* Compute the NFSv3 file handle */ if(nfs3_FSALToFhandle (&pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.entries[i]. name_handle.post_op_fh3_u.handle, pfsal_handle, pexport) == 0) { Mem_Free((char *)dirent_array); Mem_Free((char *)cookie_array); Mem_Free((char *)entry_name_array); Mem_Free((char *)fh3_array); pres->res_readdirplus3.status = NFS3ERR_BADHANDLE; return NFS_REQ_OK; } LogFullDebug(COMPONENT_NFS_READDIR, "-- Readdirplus3 -> i=%d num_entries=%d needed=%d space_used=%lu maxcount=%lu Name=%s FileId=%llu Cookie=%llu\n", i, num_entries, needed, space_used, maxcount, dirent_array[i - delta].name.name, RES_READDIRPLUS_REPLY.entries[i].fileid, RES_READDIRPLUS_REPLY.entries[i].cookie); /* Set PostPoFh3 structure */ pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.entries[i].name_handle. handle_follows = TRUE; pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.entries[i].name_handle. post_op_fh3_u.handle.data.data_len = sizeof(file_handle_v3_t); nfs_SetPostOpAttr(pcontext, pexport, dirent_array[i - delta].pentry, &entry_attr, &(pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply. entries[i].name_attributes)); RES_READDIRPLUS_REPLY.entries[i].nextentry = NULL; if(i != 0) RES_READDIRPLUS_REPLY.entries[i - 1].nextentry = &(RES_READDIRPLUS_REPLY.entries[i]); } pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.eof = FALSE; } nfs_SetPostOpAttr(pcontext, pexport, dir_pentry, &dir_attr, &(pres->res_readdirplus3.READDIRPLUS3res_u.resok.dir_attributes)); memcpy(pres->res_readdirplus3.READDIRPLUS3res_u.resok.cookieverf, cookie_verifier, sizeof(cookieverf3)); pres->res_readdirplus3.status = NFS3_OK; if((eod_met == END_OF_DIR) && (i == num_entries + delta)) { /* End of directory */ LogFullDebug(COMPONENT_NFS_READDIR, "============================================================> EOD MET !!!!!!\n"); pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.eof = TRUE; } else pres->res_readdirplus3.READDIRPLUS3res_u.resok.reply.eof = FALSE; nfs_SetPostOpAttr(pcontext, pexport, dir_pentry, &dir_attr, &(pres->res_readdirplus3.READDIRPLUS3res_u.resok.dir_attributes)); memcpy(pres->res_readdirplus3.READDIRPLUS3res_u.resok.cookieverf, cookie_verifier, sizeof(cookieverf3)); LogFullDebug(COMPONENT_NFS_READDIR,"============================================================\n"); /* Free the memory */ Mem_Free((char *)dirent_array); Mem_Free((char *)cookie_array); return NFS_REQ_OK; } /* If we are here, there was an error */ /* Free the memory */ Mem_Free((char *)dirent_array); Mem_Free((char *)cookie_array); Mem_Free((char *)entry_name_array); Mem_Free((char *)fh3_array); /* Is this a retryable error */ if(nfs_RetryableError(cache_status)) return NFS_REQ_DROP; /* Set failed status */ nfs_SetFailedStatus(pcontext, pexport, NFS_V3, cache_status, NULL, &pres->res_readdirplus3.status, dir_pentry, &(pres->res_readdirplus3.READDIRPLUS3res_u.resfail.dir_attributes), NULL, NULL, NULL, NULL, NULL, NULL); return NFS_REQ_OK; } /* nfs3_Readdirplus */
int nfs_Read(nfs_arg_t *parg, exportlist_t *pexport, fsal_op_context_t *pcontext, nfs_worker_data_t *pworker, struct svc_req *preq, nfs_res_t *pres) { cache_entry_t *pentry; fsal_attrib_list_t attr; fsal_attrib_list_t pre_attr; cache_inode_status_t cache_status = CACHE_INODE_SUCCESS; size_t size = 0; size_t read_size = 0; fsal_off_t offset = 0; void *data = NULL; cache_inode_file_type_t filetype; fsal_boolean_t eof_met=FALSE; int rc = NFS_REQ_OK; if(isDebug(COMPONENT_NFSPROTO)) { char str[LEN_FH_STR]; switch (preq->rq_vers) { case NFS_V2: offset = parg->arg_read2.offset; size = parg->arg_read2.count; break; case NFS_V3: offset = parg->arg_read3.offset; size = parg->arg_read3.count; } nfs_FhandleToStr(preq->rq_vers, &(parg->arg_read2.file), &(parg->arg_read3.file), NULL, str); LogDebug(COMPONENT_NFSPROTO, "REQUEST PROCESSING: Calling nfs_Read handle: %s start: %llx len: %llx", str, (unsigned long long) offset, (unsigned long long) size); } if(preq->rq_vers == NFS_V3) { /* to avoid setting it on each error case */ pres->res_read3.READ3res_u.resfail.file_attributes.attributes_follow = FALSE; /* initialize for read of size 0 */ pres->res_read3.READ3res_u.resok.eof = FALSE; pres->res_read3.READ3res_u.resok.count = 0; pres->res_read3.READ3res_u.resok.data.data_val = NULL; pres->res_read3.READ3res_u.resok.data.data_len = 0; pres->res_read3.status = NFS3_OK; } else if(preq->rq_vers == NFS_V2) { /* initialize for read of size 0 */ pres->res_read2.READ2res_u.readok.data.nfsdata2_val = NULL; pres->res_read2.READ2res_u.readok.data.nfsdata2_len = 0; pres->res_attr2.status = NFS_OK; } /* Convert file handle into a cache entry */ if((pentry = nfs_FhandleToCache(preq->rq_vers, &(parg->arg_read2.file), &(parg->arg_read3.file), NULL, &(pres->res_read2.status), &(pres->res_read3.status), NULL, &pre_attr, pcontext, &rc)) == NULL) { /* Stale NFS FH ? */ goto out; } if((preq->rq_vers == NFS_V3) && (nfs3_Is_Fh_Xattr(&(parg->arg_read3.file)))) { rc = nfs3_Read_Xattr(parg, pexport, pcontext, preq, pres); goto out; } if(cache_inode_access(pentry, FSAL_READ_ACCESS, pcontext, &cache_status) != CACHE_INODE_SUCCESS) { switch (preq->rq_vers) { case NFS_V2: pres->res_attr2.status = nfs2_Errno(cache_status); break; case NFS_V3: pres->res_read3.status = nfs3_Errno(cache_status); break; } rc = NFS_REQ_OK; goto out; } /* Extract the filetype */ filetype = cache_inode_fsal_type_convert(pre_attr.type); /* Sanity check: read only from a regular file */ if(filetype != REGULAR_FILE) { switch (preq->rq_vers) { case NFS_V2: /* * In the RFC tell it not good but it does * not tell what to do ... */ pres->res_attr2.status = NFSERR_ISDIR; break; case NFS_V3: if(filetype == DIRECTORY) pres->res_read3.status = NFS3ERR_ISDIR; else pres->res_read3.status = NFS3ERR_INVAL; break; } rc = NFS_REQ_OK; goto out; } /* For MDONLY export, reject write operation */ /* Request of type MDONLY_RO were rejected at the nfs_rpc_dispatcher level */ /* This is done by replying EDQUOT (this error is known for not disturbing the client's requests cache */ if(pexport->access_type == ACCESSTYPE_MDONLY || pexport->access_type == ACCESSTYPE_MDONLY_RO) { switch (preq->rq_vers) { case NFS_V2: pres->res_attr2.status = NFSERR_DQUOT; break; case NFS_V3: pres->res_read3.status = NFS3ERR_DQUOT; break; } nfs_SetFailedStatus(pcontext, pexport, preq->rq_vers, cache_status, &pres->res_read2.status, &pres->res_read3.status, pentry, &(pres->res_read3.READ3res_u.resfail.file_attributes), NULL, NULL, NULL, NULL, NULL, NULL); rc = NFS_REQ_OK; goto out; } /* Extract the argument from the request */ switch (preq->rq_vers) { case NFS_V2: offset = parg->arg_read2.offset; /* beginoffset is obsolete */ size = parg->arg_read2.count; /* totalcount is obsolete */ break; case NFS_V3: offset = parg->arg_read3.offset; size = parg->arg_read3.count; break; } /* * do not exceed maxium READ offset if set */ if((pexport->options & EXPORT_OPTION_MAXOFFSETREAD) == EXPORT_OPTION_MAXOFFSETREAD) { LogFullDebug(COMPONENT_NFSPROTO, "-----> Read offset=%llu count=%llu MaxOffSet=%llu", (unsigned long long) offset, (unsigned long long) size, (unsigned long long) pexport->MaxOffsetRead); if((fsal_off_t) (offset + size) > pexport->MaxOffsetRead) { LogEvent(COMPONENT_NFSPROTO, "NFS READ: A client tryed to violate max file size %llu for exportid #%hu", (unsigned long long) pexport->MaxOffsetRead, pexport->id); switch (preq->rq_vers) { case NFS_V2: pres->res_attr2.status = NFSERR_DQUOT; break; case NFS_V3: pres->res_read3.status = NFS3ERR_INVAL; break; } nfs_SetFailedStatus(pcontext, pexport, preq->rq_vers, cache_status, &pres->res_read2.status, &pres->res_read3.status, pentry, &(pres->res_read3.READ3res_u.resfail.file_attributes), NULL, NULL, NULL, NULL, NULL, NULL); rc = NFS_REQ_OK; goto out; } } /* * We should not exceed the FSINFO rtmax field for * the size */ if(((pexport->options & EXPORT_OPTION_MAXREAD) == EXPORT_OPTION_MAXREAD) && size > pexport->MaxRead) { /* * The client asked for too much, normally * this should not happen because the client * is calling nfs_Fsinfo at mount time and so * is aware of the server maximum write size */ size = pexport->MaxRead; } if(size == 0) { nfs_read_ok(pexport, preq, pres, NULL, 0, &pre_attr, 0); rc = NFS_REQ_OK; goto out; } else { data = gsh_malloc(size); if(data == NULL) { rc = NFS_REQ_DROP; goto out; } if((cache_inode_rdwr(pentry, CACHE_INODE_READ, offset, size, &read_size, data, &eof_met, pcontext, CACHE_INODE_SAFE_WRITE_TO_FS, &cache_status) == CACHE_INODE_SUCCESS) && (cache_inode_getattr(pentry, &attr, pcontext, &cache_status)) == CACHE_INODE_SUCCESS) { nfs_read_ok(pexport, preq, pres, data, read_size, &attr, ((offset + read_size) >= attr.filesize)); rc = NFS_REQ_OK; goto out; } gsh_free(data); } /* If we are here, there was an error */ if(nfs_RetryableError(cache_status)) { rc = NFS_REQ_DROP; goto out; } nfs_SetFailedStatus(pcontext, pexport, preq->rq_vers, cache_status, &pres->res_read2.status, &pres->res_read3.status, pentry, &(pres->res_read3.READ3res_u.resfail.file_attributes), NULL, NULL, NULL, NULL, NULL, NULL); rc = NFS_REQ_OK; out: /* return references */ if (pentry) cache_inode_put(pentry); return (rc); } /* nfs_Read */
/** * * @brief Gets an entry by using its fsdata as a key and caches it if needed. * * Gets an entry by using its fsdata as a key and caches it if needed. * * If a cache entry is returned, its refcount is incremented by one. * * It turns out we do need cache_inode_get_located functionality for * cases like lookupp on an entry returning itself when it isn't a * root. Therefore, if the 'associated' parameter is equal to the got * cache entry, a reference count is incremented but the structure * pointed to by attr is NOT filled in. * * @param[in] fsdata File system data * @param[out] attr The attributes of the got entry * @param[in] context FSAL credentials * @param[in] associated Entry that may be equal to the got entry * @param[out] status Returned status * * @return If successful, the pointer to the entry; NULL otherwise * */ cache_entry_t * cache_inode_get(cache_inode_fsal_data_t *fsdata, fsal_attrib_list_t *attr, fsal_op_context_t *context, cache_entry_t *associated, cache_inode_status_t *status) { hash_buffer_t key, value; cache_entry_t *entry = NULL; fsal_status_t fsal_status = {0, 0}; cache_inode_create_arg_t create_arg = { .newly_created_dir = FALSE }; cache_inode_file_type_t type = UNASSIGNED; hash_error_t hrc = 0; fsal_attrib_list_t fsal_attributes; fsal_handle_t *file_handle; struct hash_latch latch; /* Set the return default to CACHE_INODE_SUCCESS */ *status = CACHE_INODE_SUCCESS; /* Turn the input to a hash key on our own. */ key.pdata = fsdata->fh_desc.start; key.len = fsdata->fh_desc.len; hrc = HashTable_GetLatch(fh_to_cache_entry_ht, &key, &value, FALSE, &latch); if ((hrc != HASHTABLE_SUCCESS) && (hrc != HASHTABLE_ERROR_NO_SUCH_KEY)) { /* This should not happened */ *status = CACHE_INODE_HASH_TABLE_ERROR; LogCrit(COMPONENT_CACHE_INODE, "Hash access failed with code %d" " - this should not have happened", hrc); return NULL; } if (hrc == HASHTABLE_SUCCESS) { /* Entry exists in the cache and was found */ entry = value.pdata; /* take an extra reference within the critical section */ if (cache_inode_lru_ref(entry, LRU_REQ_INITIAL) != CACHE_INODE_SUCCESS) { /* Dead entry. Treat like a lookup failure. */ entry = NULL; } else { if (entry == associated) { /* Take a quick exit so we don't invert lock ordering. */ HashTable_ReleaseLatched(fh_to_cache_entry_ht, &latch); return entry; } } } HashTable_ReleaseLatched(fh_to_cache_entry_ht, &latch); if (!context) { /* Upcalls have no access to fsal_op_context_t, so just return the entry without revalidating it or creating a new one. */ if (entry == NULL) { *status = CACHE_INODE_NOT_FOUND; } return entry; } if (!entry) { /* Cache miss, allocate a new entry */ file_handle = (fsal_handle_t *) fsdata->fh_desc.start; /* First, call FSAL to know what the object is */ fsal_attributes.asked_attributes = cache_inode_params.attrmask; fsal_status = FSAL_getattrs(file_handle, context, &fsal_attributes); if (FSAL_IS_ERROR(fsal_status)) { *status = cache_inode_error_convert(fsal_status); LogDebug(COMPONENT_CACHE_INODE, "cache_inode_get: cache_inode_status=%u " "fsal_status=%u,%u ", *status, fsal_status.major, fsal_status.minor); return NULL; } /* The type has to be set in the attributes */ if (!FSAL_TEST_MASK(fsal_attributes.supported_attributes, FSAL_ATTR_TYPE)) { *status = CACHE_INODE_FSAL_ERROR; return NULL; } /* Get the cache_inode file type */ type = cache_inode_fsal_type_convert(fsal_attributes.type); if (type == SYMBOLIC_LINK) { fsal_attributes.asked_attributes = cache_inode_params.attrmask; fsal_status = FSAL_readlink(file_handle, context, &create_arg.link_content, &fsal_attributes); if (FSAL_IS_ERROR(fsal_status)) { *status = cache_inode_error_convert(fsal_status); return NULL; } } if ((entry = cache_inode_new_entry(fsdata, &fsal_attributes, type, &create_arg, status)) == NULL) { return NULL; } } *status = CACHE_INODE_SUCCESS; /* This is the replacement for cache_inode_renew_entry. Rather than calling that function at the start of every cache_inode call with the inode locked, we call cache_inode_check trust to perform 'heavyweight' (timed expiration of cached attributes, getattr-based directory trust) checks the first time after getting an inode. It does all of the checks read-locked and only acquires a write lock if there's something requiring a change. There is a second light-weight check done before use of cached data that checks whether the bits saying that inode attributes or inode content are trustworthy have been cleared by, for example, FSAL_CB. To summarize, the current implementation is that policy-based trust of validity is checked once per logical series of operations at cache_inode_get, and asynchronous trust is checked with use (when the attributes are locked for reading, for example.) */ if ((*status = cache_inode_check_trust(entry, context)) != CACHE_INODE_SUCCESS) { goto out_put; } /* Set the returned attributes */ *status = cache_inode_lock_trust_attrs(entry, context); /* cache_inode_lock_trust_attrs may fail, in that case, the attributes are wrong and pthread_rwlock_unlock can't be called again */ if(*status != CACHE_INODE_SUCCESS) { goto out_put; } *attr = entry->attributes; pthread_rwlock_unlock(&entry->attr_lock); return entry; out_put: cache_inode_put(entry); entry = NULL; return entry; } /* cache_inode_get */
cache_entry_t *cache_inode_get_located(cache_inode_fsal_data_t * pfsdata, cache_entry_t * plocation, cache_inode_policy_t policy, fsal_attrib_list_t * pattr, hash_table_t * ht, cache_inode_client_t * pclient, fsal_op_context_t * pcontext, cache_inode_status_t * pstatus) { hash_buffer_t key, value; cache_entry_t *pentry = NULL; fsal_status_t fsal_status; cache_inode_create_arg_t create_arg; cache_inode_file_type_t type; int hrc = 0; fsal_attrib_list_t fsal_attributes; cache_inode_fsal_data_t *ppoolfsdata = NULL; memset(&create_arg, 0, sizeof(create_arg)); /* Set the return default to CACHE_INODE_SUCCESS */ *pstatus = CACHE_INODE_SUCCESS; /* stats */ /* cache_invalidate calls this with no context or client */ if (pclient) { pclient->stat.nb_call_total += 1; pclient->stat.func_stats.nb_call[CACHE_INODE_GET] += 1; } /* Turn the input to a hash key */ if(cache_inode_fsaldata_2_key(&key, pfsdata, pclient)) { *pstatus = CACHE_INODE_UNAPPROPRIATED_KEY; /* stats */ /* cache_invalidate calls this with no context or client */ if (pclient) { pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_GET] += 1; ppoolfsdata = (cache_inode_fsal_data_t *) key.pdata; ReleaseToPool(ppoolfsdata, &pclient->pool_key); } return NULL; } switch (hrc = HashTable_Get(ht, &key, &value)) { case HASHTABLE_SUCCESS: /* Entry exists in the cache and was found */ pentry = (cache_entry_t *) value.pdata; /* return attributes additionally */ *pattr = pentry->attributes; if ( !pclient ) { /* invalidate. Just return it to mark it stale and go on. */ return( pentry ); } break; case HASHTABLE_ERROR_NO_SUCH_KEY: if ( !pclient ) { /* invalidate. Just return */ return( NULL ); } /* Cache miss, allocate a new entry */ /* XXX I do not think this can happen with avl dirent cache */ if(pfsdata->cookie != DIR_START) { /* added for sanity check */ LogDebug(COMPONENT_CACHE_INODE, "cache_inode_get: pfsdata->cookie != DIR_START (=%"PRIu64") on object whose type is %u", pfsdata->cookie, cache_inode_fsal_type_convert(fsal_attributes.type)); pfsdata->cookie = DIR_START; /* Free this key */ cache_inode_release_fsaldata_key(&key, pclient); /* redo the call */ return cache_inode_get(pfsdata, policy, pattr, ht, pclient, pcontext, pstatus); } /* First, call FSAL to know what the object is */ fsal_attributes.asked_attributes = pclient->attrmask; fsal_status = FSAL_getattrs(&pfsdata->handle, pcontext, &fsal_attributes); if(FSAL_IS_ERROR(fsal_status)) { *pstatus = cache_inode_error_convert(fsal_status); LogDebug(COMPONENT_CACHE_INODE, "cache_inode_get: cache_inode_status=%u fsal_status=%u,%u ", *pstatus, fsal_status.major, fsal_status.minor); if(fsal_status.major == ERR_FSAL_STALE) { char handle_str[256]; snprintHandle(handle_str, 256, &pfsdata->handle); LogEvent(COMPONENT_CACHE_INODE, "cache_inode_get: Stale FSAL File Handle %s, fsal_status=(%u,%u)", handle_str, fsal_status.major, fsal_status.minor); *pstatus = CACHE_INODE_FSAL_ESTALE; } /* stats */ pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_GET] += 1; /* Free this key */ cache_inode_release_fsaldata_key(&key, pclient); return NULL; } /* The type has to be set in the attributes */ if(!FSAL_TEST_MASK(fsal_attributes.supported_attributes, FSAL_ATTR_TYPE)) { *pstatus = CACHE_INODE_FSAL_ERROR; /* stats */ pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_GET] += 1; /* Free this key */ cache_inode_release_fsaldata_key(&key, pclient); return NULL; } /* Get the cache_inode file type */ type = cache_inode_fsal_type_convert(fsal_attributes.type); if(type == SYMBOLIC_LINK) { if( CACHE_INODE_KEEP_CONTENT( policy ) ) { FSAL_CLEAR_MASK(fsal_attributes.asked_attributes); FSAL_SET_MASK(fsal_attributes.asked_attributes, pclient->attrmask); fsal_status = FSAL_readlink(&pfsdata->handle, pcontext, &create_arg.link_content, &fsal_attributes); } else { fsal_status.major = ERR_FSAL_NO_ERROR ; fsal_status.minor = 0 ; } if(FSAL_IS_ERROR(fsal_status)) { *pstatus = cache_inode_error_convert(fsal_status); /* stats */ pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_GET] += 1; /* Free this key */ cache_inode_release_fsaldata_key(&key, pclient); if(fsal_status.major == ERR_FSAL_STALE) { cache_inode_status_t kill_status; LogEvent(COMPONENT_CACHE_INODE, "cache_inode_get: Stale FSAL File Handle detected for pentry = %p, fsal_status=(%u,%u)", pentry, fsal_status.major, fsal_status.minor); if(cache_inode_kill_entry(pentry, NO_LOCK, ht, pclient, &kill_status) != CACHE_INODE_SUCCESS) LogCrit(COMPONENT_CACHE_INODE, "cache_inode_get: Could not kill entry %p, status = %u, fsal_status=(%u,%u)", pentry, kill_status, fsal_status.major, fsal_status.minor); *pstatus = CACHE_INODE_FSAL_ESTALE; } return NULL; } } /* Add the entry to the cache */ if ( type == 1) LogCrit(COMPONENT_CACHE_INODE,"inode get"); if((pentry = cache_inode_new_entry( pfsdata, &fsal_attributes, type, policy, &create_arg, NULL, /* never used to add a new DIR_CONTINUE within this function */ ht, pclient, pcontext, FALSE, /* This is a population, not a creation */ pstatus ) ) == NULL ) { /* stats */ pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_GET] += 1; /* Free this key */ cache_inode_release_fsaldata_key(&key, pclient); return NULL; } /* Set the returned attributes */ *pattr = fsal_attributes; /* Now, exit the switch/case and returns */ break; default: /* This should not happened */ *pstatus = CACHE_INODE_INVALID_ARGUMENT; LogCrit(COMPONENT_CACHE_INODE, "cache_inode_get returning CACHE_INODE_INVALID_ARGUMENT - this should not have happened"); if ( !pclient ) { /* invalidate. Just return */ return( NULL ); } /* stats */ pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_GET] += 1; /* Free this key */ cache_inode_release_fsaldata_key(&key, pclient); return NULL; break; } /* Want to ASSERT pclient at this point */ *pstatus = CACHE_INODE_SUCCESS; if (pentry->object.symlink != NULL) { int stop_here; stop_here = 1; if (stop_here) { stop_here = 2; } } /* valid the found entry, if this is not feasable, returns nothing to the client */ if( plocation != NULL ) { if( plocation != pentry ) { P_w(&pentry->lock); if((*pstatus = cache_inode_valid(pentry, CACHE_INODE_OP_GET, pclient)) != CACHE_INODE_SUCCESS) { V_w(&pentry->lock); pentry = NULL; } V_w(&pentry->lock); } } /* stats */ pclient->stat.func_stats.nb_success[CACHE_INODE_GET] += 1; /* Free this key */ cache_inode_release_fsaldata_key(&key, pclient); return pentry; } /* cache_inode_get_located */
int nfs_Mkdir(nfs_arg_t * parg, exportlist_t * pexport, fsal_op_context_t * pcontext, cache_inode_client_t * pclient, hash_table_t * ht, struct svc_req *preq, nfs_res_t * pres) { static char __attribute__ ((__unused__)) funcName[] = "nfs_Mkdir"; char *str_dir_name = NULL; fsal_accessmode_t mode = 0; cache_entry_t *dir_pentry = NULL; cache_entry_t *parent_pentry = NULL; int rc = 0; fsal_attrib_list_t parent_attr; fsal_attrib_list_t attr; fsal_attrib_list_t *ppre_attr; fsal_attrib_list_t attr_parent_after; cache_inode_file_type_t parent_filetype; fsal_handle_t *pfsal_handle; fsal_name_t dir_name; cache_inode_status_t cache_status = CACHE_INODE_SUCCESS; cache_inode_status_t cache_status_lookup; if(isDebug(COMPONENT_NFSPROTO)) { char str[LEN_FH_STR]; switch (preq->rq_vers) { case NFS_V2: str_dir_name = parg->arg_mkdir2.where.name; break; case NFS_V3: str_dir_name = parg->arg_mkdir3.where.name; break; } nfs_FhandleToStr(preq->rq_vers, &(parg->arg_mkdir2.where.dir), &(parg->arg_mkdir3.where.dir), NULL, str); LogDebug(COMPONENT_NFSPROTO, "REQUEST PROCESSING: Calling nfs_Mkdir handle: %s name: %s", str, str_dir_name); } if(preq->rq_vers == NFS_V3) { /* to avoid setting it on each error case */ pres->res_mkdir3.MKDIR3res_u.resfail.dir_wcc.before.attributes_follow = FALSE; pres->res_mkdir3.MKDIR3res_u.resfail.dir_wcc.after.attributes_follow = FALSE; ppre_attr = NULL; } if((parent_pentry = nfs_FhandleToCache(preq->rq_vers, &(parg->arg_mkdir2.where.dir), &(parg->arg_mkdir3.where.dir), NULL, &(pres->res_dirop2.status), &(pres->res_mkdir3.status), NULL, &parent_attr, pcontext, pclient, ht, &rc)) == NULL) { /* Stale NFS FH ? */ return rc; } /* get directory attributes before action (for V3 reply) */ ppre_attr = &parent_attr; /* Extract the filetype */ parent_filetype = cache_inode_fsal_type_convert(parent_attr.type); /* * Sanity checks: */ if(parent_filetype != DIRECTORY) { switch (preq->rq_vers) { case NFS_V2: pres->res_dirop2.status = NFSERR_NOTDIR; break; case NFS_V3: pres->res_mkdir3.status = NFS3ERR_NOTDIR; break; } return NFS_REQ_OK; } switch (preq->rq_vers) { case NFS_V2: str_dir_name = parg->arg_mkdir2.where.name; if(parg->arg_mkdir2.attributes.mode != (unsigned int)-1) { mode = (fsal_accessmode_t) parg->arg_mkdir2.attributes.mode; } else { mode = (fsal_accessmode_t) 0; } break; case NFS_V3: str_dir_name = parg->arg_mkdir3.where.name; if(parg->arg_mkdir3.attributes.mode.set_it == TRUE) mode = (fsal_accessmode_t) parg->arg_mkdir3.attributes.mode.set_mode3_u.mode; else mode = (fsal_accessmode_t) 0; break; } //if(str_dir_name == NULL || strlen(str_dir_name) == 0) if(str_dir_name == NULL || *str_dir_name == '\0' ) { if(preq->rq_vers == NFS_V2) pres->res_dirop2.status = NFSERR_IO; if(preq->rq_vers == NFS_V3) pres->res_mkdir3.status = NFS3ERR_INVAL; } else { /* Make the directory */ if((cache_status = cache_inode_error_convert(FSAL_str2name(str_dir_name, FSAL_MAX_NAME_LEN, &dir_name))) == CACHE_INODE_SUCCESS) { /* * Lookup file to see if it exists. If so, use it. Otherwise * create a new one. */ dir_pentry = cache_inode_lookup( parent_pentry, &dir_name, pexport->cache_inode_policy, &attr, ht, pclient, pcontext, &cache_status_lookup); if(cache_status_lookup == CACHE_INODE_NOT_FOUND) { /* Create the directory */ if((dir_pentry = cache_inode_create(parent_pentry, &dir_name, DIRECTORY, pexport->cache_inode_policy, mode, NULL, &attr, ht, pclient, pcontext, &cache_status)) != NULL) { /* * Get the FSAL handle for this entry */ pfsal_handle = cache_inode_get_fsal_handle(dir_pentry, &cache_status); if(cache_status == CACHE_INODE_SUCCESS) { switch (preq->rq_vers) { case NFS_V2: /* Build file handle */ if(!nfs2_FSALToFhandle (&(pres->res_dirop2.DIROP2res_u.diropok.file), pfsal_handle, pexport)) pres->res_dirop2.status = NFSERR_IO; else { /* * Build entry * attributes */ if(nfs2_FSALattr_To_Fattr(pexport, &attr, &(pres->res_dirop2.DIROP2res_u. diropok.attributes)) == 0) pres->res_dirop2.status = NFSERR_IO; else pres->res_dirop2.status = NFS_OK; } break; case NFS_V3: /* Build file handle */ if((pres->res_mkdir3.MKDIR3res_u.resok.obj.post_op_fh3_u. handle.data.data_val = Mem_Alloc_Label(NFS3_FHSIZE, "Filehandle V3 in nfs3_mkdir")) == NULL) { pres->res_mkdir3.status = NFS3ERR_IO; return NFS_REQ_OK; } if(nfs3_FSALToFhandle (&pres->res_mkdir3.MKDIR3res_u.resok.obj.post_op_fh3_u. handle, pfsal_handle, pexport) == 0) { Mem_Free((char *)pres->res_mkdir3.MKDIR3res_u.resok.obj. post_op_fh3_u.handle.data.data_val); pres->res_mkdir3.status = NFS3ERR_INVAL; return NFS_REQ_OK; } else { /* Set Post Op Fh3 structure */ pres->res_mkdir3.MKDIR3res_u.resok.obj.handle_follows = TRUE; pres->res_mkdir3.MKDIR3res_u.resok.obj.post_op_fh3_u.handle. data.data_len = sizeof(file_handle_v3_t); /* * Build entry * attributes */ nfs_SetPostOpAttr(pcontext, pexport, dir_pentry, &attr, &(pres->res_mkdir3.MKDIR3res_u.resok. obj_attributes)); /* Get the attributes of the parent after the operation */ cache_inode_get_attributes(parent_pentry, &attr_parent_after); /* * Build Weak Cache * Coherency data */ nfs_SetWccData(pcontext, pexport, parent_pentry, ppre_attr, &attr_parent_after, &(pres->res_mkdir3.MKDIR3res_u.resok. dir_wcc)); pres->res_mkdir3.status = NFS3_OK; } break; } return NFS_REQ_OK; } } } /* If( cache_status_lookup == CACHE_INODE_NOT_FOUND ) */ else { /* object already exists or failure during lookup */ if(cache_status_lookup == CACHE_INODE_SUCCESS) { /* Trying to create a file that already exists */ cache_status = CACHE_INODE_ENTRY_EXISTS; switch (preq->rq_vers) { case NFS_V2: pres->res_dirop2.status = NFSERR_EXIST; break; case NFS_V3: pres->res_mkdir3.status = NFS3ERR_EXIST; break; } } else { /* Server fault */ cache_status = cache_status_lookup; switch (preq->rq_vers) { case NFS_V2: pres->res_dirop2.status = NFSERR_IO; break; case NFS_V3: pres->res_mkdir3.status = NFS3ERR_INVAL; break; } } nfs_SetFailedStatus(pcontext, pexport, preq->rq_vers, cache_status, &pres->res_dirop2.status, &pres->res_mkdir3.status, NULL, NULL, parent_pentry, ppre_attr, &(pres->res_mkdir3.MKDIR3res_u.resfail.dir_wcc), NULL, NULL, NULL); return NFS_REQ_OK; } } } /* If we are here, there was an error */ if(nfs_RetryableError(cache_status)) { return NFS_REQ_DROP; } nfs_SetFailedStatus(pcontext, pexport, preq->rq_vers, cache_status, &pres->res_dirop2.status, &pres->res_mkdir3.status, NULL, NULL, parent_pentry, ppre_attr, &(pres->res_mkdir3.MKDIR3res_u.resfail.dir_wcc), NULL, NULL, NULL); return NFS_REQ_OK; }
/** * * cache_inode_lookup_sw: looks up for a name in a directory indicated by a * cached entry. * * Looks up for a name in a directory indicated by a cached entry. The directory * should have been cached before. * * @param pentry_parent [IN] entry for the parent directory to be managed. * @param name [IN] name of the entry that we are looking for in the * cache. * @param pattr [OUT] attributes for the entry that we have found. * @param ht [IN] hash table used for the cache, unused in this * call. * @param pclient [INOUT] ressource allocated by the client for the nfs * management. * @param pcontext [IN] FSAL credentials * @param pstatus [OUT] returned status. * @param use_mutex [IN] if TRUE, mutex management is done, not if equal * to FALSE. * * @return CACHE_INODE_SUCCESS if operation is a success \n * @return CACHE_INODE_LRU_ERROR if allocation error occured when validating the * entry * */ cache_entry_t *cache_inode_lookup_sw(cache_entry_t * pentry_parent, fsal_name_t * pname, cache_inode_policy_t policy, fsal_attrib_list_t * pattr, hash_table_t * ht, cache_inode_client_t * pclient, fsal_op_context_t * pcontext, cache_inode_status_t * pstatus, int use_mutex) { cache_inode_dir_entry_t dirent_key[1], *dirent; struct avltree_node *dirent_node; cache_inode_dir_entry_t *new_dir_entry; cache_entry_t *pentry = NULL; fsal_status_t fsal_status; #ifdef _USE_MFSL mfsl_object_t object_handle; #else fsal_handle_t object_handle; #endif fsal_handle_t dir_handle; fsal_attrib_list_t object_attributes; cache_inode_create_arg_t create_arg; cache_inode_file_type_t type; cache_inode_status_t cache_status; cache_inode_fsal_data_t new_entry_fsdata; fsal_accessflags_t access_mask = 0; memset(&create_arg, 0, sizeof(create_arg)); memset( (char *)&new_entry_fsdata, 0, sizeof( new_entry_fsdata ) ) ; /* Set the return default to CACHE_INODE_SUCCESS */ *pstatus = CACHE_INODE_SUCCESS; /* stats */ (pclient->stat.nb_call_total)++; (pclient->stat.func_stats.nb_call[CACHE_INODE_LOOKUP])++; /* We should not renew entries when !use_mutex (because unless we * make the flag explicit (shared vs. exclusive), we don't know * whether a mutating operation is safe--and, the caller should have * already renewed the entry */ if(use_mutex == TRUE) { P_w(&pentry_parent->lock); cache_status = cache_inode_renew_entry(pentry_parent, pattr, ht, pclient, pcontext, pstatus); if(cache_status != CACHE_INODE_SUCCESS) { V_w(&pentry_parent->lock); inc_func_err_retryable(pclient, CACHE_INODE_GETATTR); LogDebug(COMPONENT_CACHE_INODE, "cache_inode_lookup: returning %d(%s) from cache_inode_renew_entry", *pstatus, cache_inode_err_str(*pstatus)); return NULL; } /* RW Lock goes for writer to reader */ rw_lock_downgrade(&pentry_parent->lock); } if(pentry_parent->internal_md.type != DIRECTORY) { /* Parent is no directory base, return NULL */ *pstatus = CACHE_INODE_NOT_A_DIRECTORY; /* stats */ (pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_LOOKUP])++; if(use_mutex == TRUE) V_r(&pentry_parent->lock); return NULL; } /* if name is ".", use the input value */ if(!FSAL_namecmp(pname, (fsal_name_t *) & FSAL_DOT)) { pentry = pentry_parent; } else if(!FSAL_namecmp(pname, (fsal_name_t *) & FSAL_DOT_DOT)) { /* Directory do only have exactly one parent. This a limitation in all FS, * which implies that hard link are forbidden on directories (so that * they exists only in one dir). Because of this, the parent list is * always limited to one element for a dir. Clients SHOULD never * 'lookup( .. )' in something that is no dir. */ pentry = cache_inode_lookupp_no_mutex(pentry_parent, ht, pclient, pcontext, pstatus); } else { /* This is a "regular lookup" (not on "." or "..") */ /* Check is user (as specified by the credentials) is authorized to * lookup the directory or not */ access_mask = FSAL_MODE_MASK_SET(FSAL_X_OK) | FSAL_ACE4_MASK_SET(FSAL_ACE_PERM_LIST_DIR); if(cache_inode_access_no_mutex(pentry_parent, access_mask, ht, pclient, pcontext, pstatus) != CACHE_INODE_SUCCESS) { if(use_mutex == TRUE) V_r(&pentry_parent->lock); (pclient->stat.func_stats.nb_err_retryable[CACHE_INODE_GETATTR])++; return NULL; } /* We first try avltree_lookup by name. If that fails, we dispatch to * the fsal. */ FSAL_namecpy(&dirent_key->name, pname); dirent_node = avltree_lookup(&dirent_key->node_n, &pentry_parent->object.dir.dentries); if (dirent_node) { dirent = avltree_container_of(dirent_node, cache_inode_dir_entry_t, node_n); pentry = dirent->pentry; } if(pentry == NULL) { LogDebug(COMPONENT_CACHE_INODE, "Cache Miss detected"); dir_handle = pentry_parent->handle; object_attributes.asked_attributes = pclient->attrmask; #ifdef _USE_MFSL #ifdef _USE_MFSL_ASYNC if(!mfsl_async_is_object_asynchronous(&pentry_parent->mobject)) { /* If the parent is asynchronous, rely on the content of the cache * inode parent entry. * * /!\ If the fs behind the FSAL is touched in a non-nfs way, * there will be huge incoherencies. */ #endif /* _USE_MFSL_ASYNC */ fsal_status = MFSL_lookup(&pentry_parent->mobject, pname, pcontext, &pclient->mfsl_context, &object_handle, &object_attributes, NULL); #ifdef _USE_MFSL_ASYNC } else { LogMidDebug(COMPONENT_CACHE_INODE, "cache_inode_lookup chose to bypass FSAL and trusted his cache for name=%s", pname->name); fsal_status.major = ERR_FSAL_NOENT; fsal_status.minor = ENOENT; } #endif /* _USE_MFSL_ASYNC */ #else fsal_status = FSAL_lookup(&dir_handle, pname, pcontext, &object_handle, &object_attributes); #endif /* _USE_MFSL */ if(FSAL_IS_ERROR(fsal_status)) { *pstatus = cache_inode_error_convert(fsal_status); if(use_mutex == TRUE) V_r(&pentry_parent->lock); /* Stale File Handle to be detected and managed */ if(fsal_status.major == ERR_FSAL_STALE) { cache_inode_status_t kill_status; LogEvent(COMPONENT_CACHE_INODE, "cache_inode_lookup: Stale FSAL File Handle detected for pentry = %p, fsal_status=(%u,%u)", pentry_parent, fsal_status.major, fsal_status.minor); if(cache_inode_kill_entry(pentry_parent, NO_LOCK, ht, pclient, &kill_status) != CACHE_INODE_SUCCESS) LogCrit(COMPONENT_CACHE_INODE, "cache_inode_pentry_parent: Could not kill entry %p, status = %u", pentry_parent, kill_status); *pstatus = CACHE_INODE_FSAL_ESTALE; } /* stats */ (pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_LOOKUP])++; return NULL; } type = cache_inode_fsal_type_convert(object_attributes.type); /* If entry is a symlink, this value for be cached */ if(type == SYMBOLIC_LINK) { if( CACHE_INODE_KEEP_CONTENT( policy ) ) #ifdef _USE_MFSL { fsal_status = MFSL_readlink(&object_handle, pcontext, &pclient->mfsl_context, &create_arg.link_content, &object_attributes, NULL); } #else { fsal_status = FSAL_readlink(&object_handle, pcontext, &create_arg.link_content, &object_attributes); } else { fsal_status.major = ERR_FSAL_NO_ERROR ; fsal_status.minor = 0 ; } #endif if(FSAL_IS_ERROR(fsal_status)) { *pstatus = cache_inode_error_convert(fsal_status); if(use_mutex == TRUE) V_r(&pentry_parent->lock); /* Stale File Handle to be detected and managed */ if(fsal_status.major == ERR_FSAL_STALE) { cache_inode_status_t kill_status; LogEvent(COMPONENT_CACHE_INODE, "cache_inode_lookup: Stale FSAL File Handle detected for pentry = %p, fsal_status=(%u,%u)", pentry_parent, fsal_status.major, fsal_status.minor); if(cache_inode_kill_entry(pentry_parent, NO_LOCK, ht, pclient, &kill_status) != CACHE_INODE_SUCCESS) LogCrit(COMPONENT_CACHE_INODE, "cache_inode_pentry_parent: Could not kill entry %p, status = %u", pentry_parent, kill_status); *pstatus = CACHE_INODE_FSAL_ESTALE; } /* stats */ (pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_LOOKUP])++; return NULL; } } /* Allocation of a new entry in the cache */ #ifdef _USE_MFSL new_entry_fsdata.handle = object_handle.handle; #else new_entry_fsdata.handle = object_handle; #endif new_entry_fsdata.cookie = 0; if((pentry = cache_inode_new_entry( &new_entry_fsdata, &object_attributes, type, policy, &create_arg, NULL, ht, pclient, pcontext, FALSE, /* This is a population and not a creation */ pstatus ) ) == NULL ) { if(use_mutex == TRUE) V_r(&pentry_parent->lock); /* stats */ (pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_LOOKUP])++; return NULL; } /* Entry was found in the FSAL, add this entry to the parent * directory */ cache_status = cache_inode_add_cached_dirent(pentry_parent, pname, pentry, ht, &new_dir_entry, pclient, pcontext, pstatus); if(cache_status != CACHE_INODE_SUCCESS && cache_status != CACHE_INODE_ENTRY_EXISTS) { if(use_mutex == TRUE) V_r(&pentry_parent->lock); /* stats */ (pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_LOOKUP])++; return NULL; } } /* cached lookup fail (try fsal) */
int nfs_Symlink(nfs_arg_t *parg, exportlist_t *pexport, fsal_op_context_t *pcontext, nfs_worker_data_t *pworker, struct svc_req *preq, nfs_res_t *pres) { char *str_symlink_name = NULL; fsal_name_t symlink_name; char *str_target_path = NULL; cache_inode_create_arg_t create_arg; fsal_accessmode_t mode = 0777; cache_entry_t *symlink_pentry = NULL; cache_entry_t *parent_pentry; cache_inode_file_type_t parent_filetype; fsal_attrib_list_t parent_attr; fsal_attrib_list_t attr_symlink; fsal_attrib_list_t attributes_symlink; fsal_attrib_list_t attr_parent_after; fsal_attrib_list_t *ppre_attr; cache_inode_status_t cache_status; cache_inode_status_t cache_status_parent; fsal_handle_t *pfsal_handle; int rc = NFS_REQ_OK; #ifdef _USE_QUOTA fsal_status_t fsal_status ; #endif memset(&create_arg, 0, sizeof(create_arg)); if(isDebug(COMPONENT_NFSPROTO)) { char str[LEN_FH_STR]; switch (preq->rq_vers) { case NFS_V2: str_symlink_name = parg->arg_symlink2.from.name; str_target_path = parg->arg_symlink2.to; break; case NFS_V3: str_symlink_name = parg->arg_symlink3.where.name; str_target_path = parg->arg_symlink3.symlink.symlink_data; break; } nfs_FhandleToStr(preq->rq_vers, &(parg->arg_symlink2.from.dir), &(parg->arg_symlink3.where.dir), NULL, str); LogDebug(COMPONENT_NFSPROTO, "REQUEST PROCESSING: Calling nfs_Symlink handle: %s name: %s target: %s", str, str_symlink_name, str_target_path); } if(preq->rq_vers == NFS_V3) { /* to avoid setting it on each error case */ pres->res_symlink3.SYMLINK3res_u.resfail.dir_wcc.before.attributes_follow = FALSE; pres->res_symlink3.SYMLINK3res_u.resfail.dir_wcc.after.attributes_follow = FALSE; ppre_attr = NULL; } /* Convert directory file handle into a vnode */ if((parent_pentry = nfs_FhandleToCache(preq->rq_vers, &(parg->arg_symlink2.from.dir), &(parg->arg_symlink3.where.dir), NULL, &(pres->res_stat2), &(pres->res_symlink3.status), NULL, &parent_attr, pcontext, &rc)) == NULL) { /* Stale NFS FH ? */ goto out;; } /* get directory attributes before action (for V3 reply) */ ppre_attr = &parent_attr; /* Extract the filetype */ parent_filetype = cache_inode_fsal_type_convert(parent_attr.type); /* * Sanity checks: new directory name must be non-null; parent must be * a directory. */ if(parent_filetype != DIRECTORY) { switch (preq->rq_vers) { case NFS_V2: pres->res_stat2 = NFSERR_NOTDIR; break; case NFS_V3: pres->res_symlink3.status = NFS3ERR_NOTDIR; break; } rc = NFS_REQ_OK; goto out; } #ifdef _USE_QUOTA /* if quota support is active, then we should check is the FSAL allows inode creation or not */ fsal_status = FSAL_check_quota( pexport->fullpath, FSAL_QUOTA_INODES, FSAL_OP_CONTEXT_TO_UID( pcontext ) ) ; if( FSAL_IS_ERROR( fsal_status ) ) { switch (preq->rq_vers) { case NFS_V2: pres->res_stat2 = NFSERR_DQUOT ; break; case NFS_V3: pres->res_symlink3.status = NFS3ERR_DQUOT; break; } rc = NFS_REQ_OK; goto out; } #endif /* _USE_QUOTA */ switch (preq->rq_vers) { case NFS_V2: str_symlink_name = parg->arg_symlink2.from.name; str_target_path = parg->arg_symlink2.to; break; case NFS_V3: str_symlink_name = parg->arg_symlink3.where.name; str_target_path = parg->arg_symlink3.symlink.symlink_data; break; } if(str_symlink_name == NULL || *str_symlink_name == '\0'|| str_target_path == NULL || *str_target_path == '\0' || FSAL_IS_ERROR(FSAL_str2name(str_symlink_name, 0, &symlink_name)) || FSAL_IS_ERROR(FSAL_str2path(str_target_path, 0, &create_arg.link_content))) { cache_status = CACHE_INODE_INVALID_ARGUMENT; } else { /* Make the symlink */ if((symlink_pentry = cache_inode_create(parent_pentry, &symlink_name, SYMBOLIC_LINK, mode, &create_arg, &attr_symlink, pcontext, &cache_status)) != NULL) { switch (preq->rq_vers) { case NFS_V2: pres->res_stat2 = NFS_OK; break; case NFS_V3: /* Build file handle */ pfsal_handle = &symlink_pentry->handle; /* Some clients (like the Spec NFS benchmark) set attributes with the NFSPROC3_SYMLINK request */ if(nfs3_Sattr_To_FSALattr(&attributes_symlink, &parg->arg_symlink3.symlink.symlink_attributes) == 0) { pres->res_create3.status = NFS3ERR_INVAL; rc = NFS_REQ_OK; goto out; } /* Mode is managed above (in cache_inode_create), there is no need * to manage it */ if(attributes_symlink.asked_attributes & FSAL_ATTR_MODE) attributes_symlink.asked_attributes &= ~FSAL_ATTR_MODE; /* Some clients (like Solaris 10) try to set the size of the file to 0 * at creation time. The FSAL create empty file, so we ignore this */ if(attributes_symlink.asked_attributes & FSAL_ATTR_SIZE) attributes_symlink.asked_attributes &= ~FSAL_ATTR_SIZE; if(attributes_symlink.asked_attributes & FSAL_ATTR_SPACEUSED) attributes_symlink.asked_attributes &= ~FSAL_ATTR_SPACEUSED; /* If owner or owner_group are set, and the credential was * squashed, then we must squash the set owner and owner_group. */ squash_setattr(&pworker->export_perms, &pworker->user_credentials, &attributes_symlink); /* Are there attributes to be set (additional to the mode) ? */ if(attributes_symlink.asked_attributes != 0ULL && attributes_symlink.asked_attributes != FSAL_ATTR_MODE) { /* A call to cache_inode_setattr is required */ if(cache_inode_setattr(symlink_pentry, &attributes_symlink, pcontext, FALSE, &cache_status) != CACHE_INODE_SUCCESS) { goto out_error; } } if ((pres->res_symlink3.status = (nfs3_AllocateFH(&pres->res_symlink3.SYMLINK3res_u .resok.obj.post_op_fh3_u.handle))) != NFS3_OK) { pres->res_symlink3.status = NFS3ERR_IO; rc = NFS_REQ_OK; goto out; } if(nfs3_FSALToFhandle (&pres->res_symlink3.SYMLINK3res_u.resok.obj.post_op_fh3_u.handle, pfsal_handle, pexport) == 0) { gsh_free(pres->res_symlink3.SYMLINK3res_u.resok.obj. post_op_fh3_u.handle.data.data_val); pres->res_symlink3.status = NFS3ERR_BADHANDLE; rc = NFS_REQ_OK; goto out; } /* The the parent pentry attributes for building Wcc Data */ if(cache_inode_getattr(parent_pentry, &attr_parent_after, pcontext, &cache_status_parent) != CACHE_INODE_SUCCESS) { gsh_free(pres->res_symlink3.SYMLINK3res_u.resok.obj. post_op_fh3_u.handle.data.data_val); pres->res_symlink3.status = NFS3ERR_BADHANDLE; rc = NFS_REQ_OK; goto out; } /* Set Post Op Fh3 structure */ pres->res_symlink3.SYMLINK3res_u.resok.obj.handle_follows = TRUE; /* Build entry attributes */ nfs_SetPostOpAttr(pexport, &attr_symlink, &(pres->res_symlink3.SYMLINK3res_u .resok.obj_attributes)); /* Build Weak Cache Coherency data */ nfs_SetWccData(pexport, ppre_attr, &attr_parent_after, &(pres->res_symlink3.SYMLINK3res_u.resok.dir_wcc)); pres->res_symlink3.status = NFS3_OK; break; } /* switch */ rc = NFS_REQ_OK; goto out; } } out_error: rc = nfs_SetFailedStatus(pexport, preq->rq_vers, cache_status, &pres->res_stat2, &pres->res_symlink3.status, NULL, ppre_attr, &(pres->res_symlink3.SYMLINK3res_u.resfail.dir_wcc), NULL, NULL); out: /* return references */ if (parent_pentry) cache_inode_put(parent_pentry); if (symlink_pentry) cache_inode_put(symlink_pentry); return (rc); } /* nfs_Symlink */
int nfs_Remove(nfs_arg_t *parg, exportlist_t *pexport, fsal_op_context_t *pcontext, nfs_worker_data_t *pworker, struct svc_req *preq, nfs_res_t *pres) { cache_entry_t *parent_pentry = NULL; cache_entry_t *pentry_child = NULL; fsal_attrib_list_t pre_parent_attr; fsal_attrib_list_t pentry_child_attr; fsal_attrib_list_t parent_attr; fsal_attrib_list_t *pparent_attr = NULL; cache_inode_file_type_t filetype; cache_inode_file_type_t childtype; cache_inode_status_t cache_status; char *file_name = NULL; fsal_name_t name; int rc = NFS_REQ_OK; if(isDebug(COMPONENT_NFSPROTO)) { char str[LEN_FH_STR]; switch (preq->rq_vers) { case NFS_V2: file_name = parg->arg_remove2.name; break; case NFS_V3: file_name = parg->arg_remove3.object.name; break; } nfs_FhandleToStr(preq->rq_vers, &(parg->arg_create2.where.dir), &(parg->arg_create3.where.dir), NULL, str); LogDebug(COMPONENT_NFSPROTO, "REQUEST PROCESSING: Calling nfs_Remove handle: %s name: %s", str, file_name); } if(preq->rq_vers == NFS_V3) { /* to avoid setting it on each error case */ pres->res_remove3.REMOVE3res_u.resfail.dir_wcc.before.attributes_follow = FALSE; pres->res_remove3.REMOVE3res_u.resfail.dir_wcc.after.attributes_follow = FALSE; pparent_attr = NULL; } /* Convert file handle into a pentry */ if((parent_pentry = nfs_FhandleToCache(preq->rq_vers, &(parg->arg_remove2.dir), &(parg->arg_remove3.object.dir), NULL, &(pres->res_dirop2.status), &(pres->res_remove3.status), NULL, &pre_parent_attr, pcontext, &rc)) == NULL) { /* Stale NFS FH ? */ goto out; } if((preq->rq_vers == NFS_V3) && (nfs3_Is_Fh_Xattr(&(parg->arg_remove3.object.dir)))) { rc = nfs3_Remove_Xattr(parg, pexport, pcontext, preq, pres); goto out; } /* get directory attributes before action (for V3 reply) */ pparent_attr = &pre_parent_attr; /* Extract the filetype */ filetype = cache_inode_fsal_type_convert(pre_parent_attr.type); /* * Sanity checks: new directory name must be non-null; parent must be * a directory. */ if(filetype != DIRECTORY) { switch (preq->rq_vers) { case NFS_V2: pres->res_stat2 = NFSERR_NOTDIR; break; case NFS_V3: pres->res_remove3.status = NFS3ERR_NOTDIR; break; } rc = NFS_REQ_OK; goto out; } switch (preq->rq_vers) { case NFS_V2: file_name = parg->arg_remove2.name; break; case NFS_V3: file_name = parg->arg_remove3.object.name; break; } //if(file_name == NULL || strlen(file_name) == 0) if(file_name == NULL || *file_name == '\0' ) { cache_status = CACHE_INODE_INVALID_ARGUMENT; /* for lack of better... */ } else { if((cache_status = cache_inode_error_convert(FSAL_str2name(file_name, 0, &name))) == CACHE_INODE_SUCCESS) { /* * Lookup to the child entry to check if it is a directory * */ if((pentry_child = cache_inode_lookup(parent_pentry, &name, &pentry_child_attr, pcontext, &cache_status)) != NULL) { /* Extract the filetype */ childtype = cache_inode_fsal_type_convert(pentry_child_attr.type); /* * Sanity check: make sure we are about to remove a directory */ if(childtype == DIRECTORY) { switch (preq->rq_vers) { case NFS_V2: pres->res_stat2 = NFSERR_ISDIR; break; case NFS_V3: pres->res_remove3.status = NFS3ERR_ISDIR; break; } rc = NFS_REQ_OK; goto out; } LogFullDebug(COMPONENT_NFSPROTO, "==== NFS REMOVE ====> Trying to remove file %s", name.name); /* * Remove the entry. */ if(cache_inode_remove(parent_pentry, &name, &parent_attr, pcontext, &cache_status) == CACHE_INODE_SUCCESS) { switch (preq->rq_vers) { case NFS_V2: pres->res_stat2 = NFS_OK; break; case NFS_V3: /* Build Weak Cache Coherency data */ nfs_SetWccData(pexport, pparent_attr, &parent_attr, &(pres->res_remove3.REMOVE3res_u.resok.dir_wcc)); pres->res_remove3.status = NFS3_OK; break; } rc = NFS_REQ_OK; goto out; } } } } /* If we are here, there was an error */ rc = nfs_SetFailedStatus(pexport, preq->rq_vers, cache_status, &pres->res_stat2, &pres->res_remove3.status, NULL, pparent_attr, &(pres->res_remove3.REMOVE3res_u.resfail.dir_wcc), NULL, NULL); out: /* return references */ if (pentry_child) cache_inode_put(pentry_child); if (parent_pentry) cache_inode_put(parent_pentry); return (rc); } /* nfs_Remove */
int nfs_Write(nfs_arg_t * parg, exportlist_t * pexport, fsal_op_context_t * pcontext, cache_inode_client_t * pclient, hash_table_t * ht, struct svc_req *preq, nfs_res_t * pres) { static char __attribute__ ((__unused__)) funcName[] = "nfs_Write"; cache_entry_t *pentry; fsal_attrib_list_t attr; fsal_attrib_list_t pre_attr; fsal_attrib_list_t *ppre_attr; int rc; cache_inode_status_t cache_status = CACHE_INODE_SUCCESS; cache_content_status_t content_status; fsal_seek_t seek_descriptor; fsal_size_t size = 0; fsal_size_t written_size; fsal_off_t offset = 0; caddr_t data = NULL; enum stable_how stable; /* NFS V3 storage stability, see RFC1813 page 50 */ cache_inode_file_type_t filetype; fsal_boolean_t eof_met; uint64_t stable_flag = FSAL_SAFE_WRITE_TO_FS; if(isDebug(COMPONENT_NFSPROTO)) { char str[LEN_FH_STR], *stables = ""; switch (preq->rq_vers) { case NFS_V2: offset = parg->arg_write2.offset; size = parg->arg_write2.data.nfsdata2_len; stables = "FILE_SYNC"; break; case NFS_V3: offset = parg->arg_write3.offset; size = parg->arg_write3.count; switch (parg->arg_write3.stable) { case UNSTABLE: stables = "UNSTABLE"; break; case DATA_SYNC: stables = "DATA_SYNC"; break; case FILE_SYNC: stables = "FILE_SYNC"; break; } } nfs_FhandleToStr(preq->rq_vers, &(parg->arg_write2.file), &(parg->arg_write3.file), NULL, str); LogDebug(COMPONENT_NFSPROTO, "REQUEST PROCESSING: Calling nfs_Write handle: %s start: %llx len: %llx %s", str, (unsigned long long) offset, (unsigned long long) size, stables); } cache_content_policy_data_t datapol; datapol.UseMaxCacheSize = FALSE; if(preq->rq_vers == NFS_V3) { /* to avoid setting it on each error case */ pres->res_write3.WRITE3res_u.resfail.file_wcc.before.attributes_follow = FALSE; pres->res_write3.WRITE3res_u.resfail.file_wcc.after.attributes_follow = FALSE; ppre_attr = NULL; } /* Convert file handle into a cache entry */ if((pentry = nfs_FhandleToCache(preq->rq_vers, &(parg->arg_write2.file), &(parg->arg_write3.file), NULL, &(pres->res_attr2.status), &(pres->res_write3.status), NULL, &pre_attr, pcontext, pclient, ht, &rc)) == NULL) { /* Stale NFS FH ? */ return rc; } if((preq->rq_vers == NFS_V3) && (nfs3_Is_Fh_Xattr(&(parg->arg_write3.file)))) return nfs3_Write_Xattr(parg, pexport, pcontext, pclient, ht, preq, pres); /* get directory attributes before action (for V3 reply) */ ppre_attr = &pre_attr; /* Extract the filetype */ filetype = cache_inode_fsal_type_convert(pre_attr.type); /* Sanity check: write only a regular file */ if(filetype != REGULAR_FILE) { switch (preq->rq_vers) { case NFS_V2: /* * In the RFC tell it not good but it does * not tell what to do ... * We use NFSERR_ISDIR for lack of better */ pres->res_attr2.status = NFSERR_ISDIR; break; case NFS_V3: if(filetype == DIR_BEGINNING || filetype == DIR_CONTINUE) pres->res_write3.status = NFS3ERR_ISDIR; else pres->res_write3.status = NFS3ERR_INVAL; break; } return NFS_REQ_OK; } /* For MDONLY export, reject write operation */ /* Request of type MDONLY_RO were rejected at the nfs_rpc_dispatcher level */ /* This is done by replying EDQUOT (this error is known for not disturbing the client's requests cache */ if(pexport->access_type == ACCESSTYPE_MDONLY) { switch (preq->rq_vers) { case NFS_V2: pres->res_attr2.status = NFSERR_DQUOT; break; case NFS_V3: pres->res_write3.status = NFS3ERR_DQUOT; break; } nfs_SetFailedStatus(pcontext, pexport, preq->rq_vers, cache_status, &pres->res_attr2.status, &pres->res_write3.status, NULL, NULL, pentry, ppre_attr, &(pres->res_write3.WRITE3res_u.resfail.file_wcc), NULL, NULL, NULL); return NFS_REQ_OK; } /* Extract the argument from the request */ switch (preq->rq_vers) { case NFS_V2: if(ppre_attr && ppre_attr->filesize > NFS2_MAX_FILESIZE) { /* * V2 clients don't understand filesizes > * 2GB, so we don't allow them to alter * them in any way. BJP 6/26/2001 */ pres->res_attr2.status = NFSERR_FBIG; return NFS_REQ_OK; } offset = parg->arg_write2.offset; /* beginoffset is obsolete */ size = parg->arg_write2.data.nfsdata2_len; /* totalcount is obsolete */ data = parg->arg_write2.data.nfsdata2_val; stable = FILE_SYNC; if (pexport->use_commit == TRUE) stable_flag = FSAL_SAFE_WRITE_TO_FS; break; case NFS_V3: offset = parg->arg_write3.offset; size = parg->arg_write3.count; if(size > parg->arg_write3.data.data_len) { /* should never happen */ pres->res_write3.status = NFS3ERR_INVAL; return NFS_REQ_OK; } if((pexport->use_commit == TRUE) && (pexport->use_ganesha_write_buffer == FALSE) && (parg->arg_write3.stable == UNSTABLE)) { stable_flag = FSAL_UNSAFE_WRITE_TO_FS_BUFFER; } else if((pexport->use_commit == TRUE) && (pexport->use_ganesha_write_buffer == TRUE) && (parg->arg_write3.stable == UNSTABLE)) { stable_flag = FSAL_UNSAFE_WRITE_TO_GANESHA_BUFFER; } else { stable_flag = FSAL_SAFE_WRITE_TO_FS; } data = parg->arg_write3.data.data_val; stable = parg->arg_write3.stable; break; } /* * do not exceed maxium WRITE offset if set */ if((pexport->options & EXPORT_OPTION_MAXOFFSETWRITE) == EXPORT_OPTION_MAXOFFSETWRITE) { LogFullDebug(COMPONENT_NFSPROTO, "-----> Write offset=%llu count=%llu MaxOffSet=%llu", (unsigned long long) offset, (unsigned long long) size, (unsigned long long) pexport->MaxOffsetWrite); if((fsal_off_t) (offset + size) > pexport->MaxOffsetWrite) { LogEvent(COMPONENT_NFSPROTO, "NFS WRITE: A client tryed to violate max file size %llu for exportid #%hu", (unsigned long long) pexport->MaxOffsetWrite, pexport->id); switch (preq->rq_vers) { case NFS_V2: pres->res_attr2.status = NFSERR_DQUOT; break; case NFS_V3: pres->res_write3.status = NFS3ERR_INVAL; break; } nfs_SetFailedStatus(pcontext, pexport, preq->rq_vers, cache_status, &pres->res_attr2.status, &pres->res_write3.status, NULL, NULL, pentry, ppre_attr, &(pres->res_write3.WRITE3res_u.resfail.file_wcc), NULL, NULL, NULL); return NFS_REQ_OK; } } /* * We should take care not to exceed FSINFO wtmax * field for the size */ if(((pexport->options & EXPORT_OPTION_MAXWRITE) == EXPORT_OPTION_MAXWRITE) && size > pexport->MaxWrite) { /* * The client asked for too much data, we * must restrict him */ size = pexport->MaxWrite; } if(size == 0) { cache_status = CACHE_INODE_SUCCESS; written_size = 0; } else { /* An actual write is to be made, prepare it */ /* If entry is not cached, cache it now */ datapol.UseMaxCacheSize = pexport->options & EXPORT_OPTION_MAXCACHESIZE; datapol.MaxCacheSize = pexport->MaxCacheSize; if((pexport->options & EXPORT_OPTION_USE_DATACACHE) && (cache_content_cache_behaviour(pentry, &datapol, (cache_content_client_t *) pclient->pcontent_client, &content_status) == CACHE_CONTENT_FULLY_CACHED) && (pentry->object.file.pentry_content == NULL)) { /* Entry is not in datacache, but should be in, cache it . * Several threads may call this function at the first time and a race condition can occur here * in order to avoid this, cache_inode_add_data_cache is "mutex protected" * The first call will create the file content cache entry, the further will return * with error CACHE_INODE_CACHE_CONTENT_EXISTS which is not a pathological thing here */ /* Status is set in last argument */ cache_inode_add_data_cache(pentry, ht, pclient, pcontext, &cache_status); if((cache_status != CACHE_INODE_SUCCESS) && (cache_status != CACHE_INODE_CACHE_CONTENT_EXISTS)) { /* If we are here, there was an error */ if(nfs_RetryableError(cache_status)) { return NFS_REQ_DROP; } nfs_SetFailedStatus(pcontext, pexport, preq->rq_vers, cache_status, &pres->res_attr2.status, &pres->res_write3.status, NULL, NULL, pentry, ppre_attr, &(pres->res_write3.WRITE3res_u.resfail.file_wcc), NULL, NULL, NULL); return NFS_REQ_OK; } } /* only FILE_SYNC mode is supported */ /* Set up uio to define the transfer */ seek_descriptor.whence = FSAL_SEEK_SET; seek_descriptor.offset = offset; if(cache_inode_rdwr(pentry, CACHE_INODE_WRITE, &seek_descriptor, size, &written_size, &attr, data, &eof_met, ht, pclient, pcontext, stable_flag, &cache_status) == CACHE_INODE_SUCCESS) { switch (preq->rq_vers) { case NFS_V2: nfs2_FSALattr_To_Fattr(pexport, &attr, &(pres->res_attr2.ATTR2res_u.attributes)); pres->res_attr2.status = NFS_OK; break; case NFS_V3: /* Build Weak Cache Coherency data */ nfs_SetWccData(pcontext, pexport, pentry, ppre_attr, &attr, &(pres->res_write3.WRITE3res_u.resok.file_wcc)); /* Set the written size */ pres->res_write3.WRITE3res_u.resok.count = written_size; /* How do we commit data ? */ if(stable_flag == FSAL_SAFE_WRITE_TO_FS) { pres->res_write3.WRITE3res_u.resok.committed = FILE_SYNC; } else { pres->res_write3.WRITE3res_u.resok.committed = UNSTABLE; } /* Set the write verifier */ memcpy(pres->res_write3.WRITE3res_u.resok.verf, NFS3_write_verifier, sizeof(writeverf3)); pres->res_write3.status = NFS3_OK; break; } return NFS_REQ_OK; } } LogFullDebug(COMPONENT_NFSPROTO, "---> failed write: cache_status=%d", cache_status); /* If we are here, there was an error */ if(nfs_RetryableError(cache_status)) { return NFS_REQ_DROP; } nfs_SetFailedStatus(pcontext, pexport, preq->rq_vers, cache_status, &pres->res_attr2.status, &pres->res_write3.status, NULL, NULL, pentry, ppre_attr, &(pres->res_write3.WRITE3res_u.resfail.file_wcc), NULL, NULL, NULL); return NFS_REQ_OK; } /* nfs_Write.c */