void remove_nfs4_owner(cache_inode_client_t * pclient, state_owner_t * powner, const char * str) { hash_buffer_t buffkey, old_key, old_value; state_nfs4_owner_name_t oname; int rc; oname.son_clientid = powner->so_owner.so_nfs4_owner.so_clientid; oname.son_owner_len = powner->so_owner_len; oname.son_islock = powner->so_type == STATE_LOCK_OWNER_NFSV4; memcpy(oname.son_owner_val, powner->so_owner_val, powner->so_owner_len); buffkey.pdata = (caddr_t) &oname; buffkey.len = sizeof(*powner); rc = HashTable_DelRef(ht_nfs4_owner, &buffkey, &old_key, &old_value, Hash_dec_state_owner_ref); switch(rc) { case HASHTABLE_SUCCESS: if(powner->so_type == STATE_LOCK_OWNER_NFSV4) dec_state_owner_ref(powner->so_owner.so_nfs4_owner.so_related_owner, pclient); /* Release the owner_name (key) and owner (data) back to appropriate pools */ LogFullDebug(COMPONENT_STATE, "Free %s", str); nfs4_Compound_FreeOne(&powner->so_owner.so_nfs4_owner.so_resp); ReleaseToPool(old_value.pdata, &pclient->pool_state_owner); ReleaseToPool(old_key.pdata, &pclient->pool_nfs4_owner_name); break; case HASHTABLE_NOT_DELETED: /* ref count didn't end up at 0, don't free. */ LogDebug(COMPONENT_STATE, "HashTable_DelRef didn't reduce refcount to 0 for %s", str); break; default: /* some problem occurred */ LogDebug(COMPONENT_STATE, "HashTable_DelRef failed (%s) for %s", hash_table_err_to_str(rc), str); break; } }
/** * * nfs_ip_stats_remove: Tries to remove an entry for ip_stats cache * * Tries to remove an entry for ip_stats cache. * * @param ipaddr [IN] the ip address to be uncached. * @param ip_stats_pool [INOUT] values pool for hash table * * @return the result previously set if *pstatus == IP_STATS_SUCCESS * */ int nfs_ip_stats_remove(hash_table_t * ht_ip_stats, sockaddr_t * ipaddr, struct prealloc_pool *ip_stats_pool) { hash_buffer_t buffkey, old_key, old_value; int status = IP_STATS_SUCCESS; nfs_ip_stats_t *pnfs_ip_stats = NULL; buffkey.pdata = (caddr_t) ipaddr; buffkey.len = sizeof(sockaddr_t); /* Do nothing if configuration disables IP_Stats */ if(nfs_param.core_param.dump_stats_per_client == 0) return IP_STATS_SUCCESS; if(HashTable_Del(ht_ip_stats, &buffkey, &old_key, &old_value) == HASHTABLE_SUCCESS) { Mem_Free((sockaddr_t *) old_key.pdata); pnfs_ip_stats = (nfs_ip_stats_t *) old_value.pdata; ReleaseToPool(pnfs_ip_stats, ip_stats_pool); } else { status = IP_STATS_NOT_FOUND; } return status; } /* nfs_ip_stats_remove */
int ___cache_content_invalidate_flushed(LRU_entry_t * plru_entry, void *addparam) { cache_content_entry_t *pentry = NULL; cache_content_client_t *pclient = NULL; pentry = (cache_content_entry_t *) plru_entry->buffdata.pdata; pclient = (cache_content_client_t *) addparam; if(pentry->local_fs_entry.sync_state != SYNC_OK) { /* Entry is not to be set invalid */ return LRU_LIST_DO_NOT_SET_INVALID; } /* Clean up and set the entry invalid */ P_w(&pentry->pentry_inode->lock); pentry->pentry_inode->object.file.pentry_content = NULL; V_w(&pentry->pentry_inode->lock); /* Release the entry */ ReleaseToPool(pentry, &pclient->content_pool); /* Retour de la pentry dans le pool */ return LRU_LIST_SET_INVALID; } /* cache_content_invalidate_flushed */
static void node_free(fsnode_t * p_node) { memset(p_node, 0, sizeof(fsnode_t)); P(node_pool_mutex); ReleaseToPool(p_node, &node_pool); V(node_pool_mutex); }
static void peer_free(lookup_peer_t * p_peer) { memset(p_peer, 0, sizeof(lookup_peer_t)); P(peer_pool_mutex); ReleaseToPool(p_peer, &peer_pool); V(peer_pool_mutex); }
static void nfs4_acl_free(fsal_acl_t *pacl) { if(!pacl) return; P(fsal_acl_pool_mutex); ReleaseToPool(pacl, &fsal_acl_pool); V(fsal_acl_pool_mutex); }
int nfs_ip_stats_add(hash_table_t * ht_ip_stats, sockaddr_t * ipaddr, struct prealloc_pool *ip_stats_pool) { hash_buffer_t buffkey; hash_buffer_t buffdata; nfs_ip_stats_t *pnfs_ip_stats = NULL; sockaddr_t *pipaddr = NULL; /* Do nothing if configuration disables IP_Stats */ if(nfs_param.core_param.dump_stats_per_client == 0) return IP_STATS_SUCCESS; /* Entry to be cached */ GetFromPool(pnfs_ip_stats, ip_stats_pool, nfs_ip_stats_t); if(pnfs_ip_stats == NULL) return IP_STATS_INSERT_MALLOC_ERROR; if((pipaddr = (sockaddr_t *) Mem_Alloc(sizeof(sockaddr_t))) == NULL) { ReleaseToPool(pnfs_ip_stats, ip_stats_pool); return IP_STATS_INSERT_MALLOC_ERROR; } /* I have to keep an integer as key, I wil use the pointer buffkey->pdata for this, * this also means that buffkey->len will be 0 */ memcpy(pipaddr, ipaddr, sizeof(sockaddr_t)); buffkey.pdata = (caddr_t) pipaddr; buffkey.len = sizeof(sockaddr_t); /* I build the data with the request pointer that should be in state 'IN USE' */ pnfs_ip_stats->nb_call = 0; pnfs_ip_stats->nb_req_nfs2 = 0; pnfs_ip_stats->nb_req_nfs3 = 0; pnfs_ip_stats->nb_req_nfs4 = 0; pnfs_ip_stats->nb_req_mnt1 = 0; pnfs_ip_stats->nb_req_mnt3 = 0; memset(pnfs_ip_stats->req_mnt1, 0, MNT_V1_NB_COMMAND * sizeof(int)); memset(pnfs_ip_stats->req_mnt3, 0, MNT_V3_NB_COMMAND * sizeof(int)); memset(pnfs_ip_stats->req_nfs2, 0, NFS_V2_NB_COMMAND * sizeof(int)); memset(pnfs_ip_stats->req_nfs3, 0, NFS_V3_NB_COMMAND * sizeof(int)); buffdata.pdata = (caddr_t) pnfs_ip_stats; buffdata.len = sizeof(nfs_ip_stats_t); if(HashTable_Set(ht_ip_stats, &buffkey, &buffdata) != HASHTABLE_SUCCESS) return IP_STATS_INSERT_MALLOC_ERROR; return IP_STATS_SUCCESS; } /* nfs_ip_stats_add */
/** * * cache_inode_release_dirent: releases dirents allocated by cache_inode_readdir_nonamecache. * * Releases dirents allocated by cache_inode_readdir_nonamecache. This is to be called only if the * related entry as a policy that prevents name to be cached. * * @param dirent_array [INOUT] array of pointers of dirents to be released * @param howmuch [IN] size of dirent_array * @param pclient [INOUT] resource allocated by the client for the nfs management. * * @ return nothing (void function) * */ void cache_inode_release_dirent( cache_inode_dir_entry_t ** dirent_array, unsigned int howmuch, cache_inode_client_t * pclient ) { unsigned int i = 0 ; for( i = 0 ; i < howmuch ; i++ ) { if( dirent_array[i] != NULL ) ReleaseToPool( dirent_array[i], &pclient->pool_dir_entry ) ; else break ; } } /* cache_inode_release_dirent */
static void nfs4_release_acldata_key(hash_buffer_t *pkey) { fsal_acl_key_t *pacl_key = NULL; if(!pkey) return; pacl_key = (fsal_acl_key_t *)pkey->pdata; if(!pacl_key) return; P(fsal_acl_key_pool_mutex); ReleaseToPool(pacl_key, &fsal_acl_key_pool); V(fsal_acl_key_pool_mutex); }
/** * * cache_inode_add_cached_dirent: Adds a directory entry to a cached directory. * * Adds a directory entry to a cached directory. This is use when creating a * new entry through nfs and keep it to the cache. It also allocates and caches * the entry. This function can be call iteratively, within a loop (like what * is done in cache_inode_readdir_populate). In this case, pentry_parent should * be set to the value returned in *pentry_next. This function should never be * used for managing a junction. * * @param pentry_parent [INOUT] cache entry representing the directory to be * managed. * @param name [IN] name of the entry to add. * @param pentry_added [IN] the pentry added to the dirent array * @param pentry_next [OUT] the next pentry to use for next call. * @param ht [IN] hash table used for the cache, unused in this * call. * @param pclient [INOUT] resource allocated by the client for the nfs * management. * @param pstatus [OUT] returned status. * * @return the DIRECTORY that contain this entry in its array_dirent\n * @return NULL if failed, see *pstatus for error's meaning. * */ cache_inode_status_t cache_inode_add_cached_dirent( cache_entry_t * pentry_parent, fsal_name_t * pname, cache_entry_t * pentry_added, hash_table_t * ht, cache_inode_dir_entry_t **pnew_dir_entry, cache_inode_client_t * pclient, fsal_op_context_t * pcontext, cache_inode_status_t * pstatus) { fsal_status_t fsal_status; cache_inode_parent_entry_t *next_parent_entry = NULL; cache_inode_dir_entry_t *new_dir_entry = NULL; struct avltree_node *tmpnode; *pstatus = CACHE_INODE_SUCCESS; /* Sanity check */ if(pentry_parent->internal_md.type != DIRECTORY) { *pstatus = CACHE_INODE_BAD_TYPE; return *pstatus; } /* in cache inode avl, we always insert on pentry_parent */ GetFromPool(new_dir_entry, &pclient->pool_dir_entry, cache_inode_dir_entry_t); if(new_dir_entry == NULL) { *pstatus = CACHE_INODE_MALLOC_ERROR; return *pstatus; } fsal_status = FSAL_namecpy(&new_dir_entry->name, pname); if(FSAL_IS_ERROR(fsal_status)) { *pstatus = CACHE_INODE_FSAL_ERROR; return *pstatus; } /* still need the parent list */ GetFromPool(next_parent_entry, &pclient->pool_parent, cache_inode_parent_entry_t); if(next_parent_entry == NULL) { *pstatus = CACHE_INODE_MALLOC_ERROR; return *pstatus; } /* Init the next_parent_entry variable */ next_parent_entry->parent = NULL; next_parent_entry->next_parent = NULL; /* add to avl */ tmpnode = avltree_insert(&new_dir_entry->node_n, &pentry_parent->object.dir.dentries); if (tmpnode) { /* collision, tree not updated--release both pool objects and return * err */ ReleaseToPool(next_parent_entry, &pclient->pool_parent); ReleaseToPool(new_dir_entry, &pclient->pool_dir_entry); *pstatus = CACHE_INODE_ENTRY_EXISTS; return *pstatus; } *pnew_dir_entry = new_dir_entry; /* we're going to succeed */ pentry_parent->object.dir.nbactive++; new_dir_entry->pentry = pentry_added; /* link with the parent entry (insert as first entry) */ next_parent_entry->parent = pentry_parent; next_parent_entry->next_parent = pentry_added->parent_list; pentry_added->parent_list = next_parent_entry; return *pstatus; } /* cache_inode_add_cached_dirent */
/** * * cache_inode_operate_cached_dirent: locates a dirent in the cached dirent, * and perform an operation on it. * * Looks up for an dirent in the cached dirent. Thus function searches only in * the entries listed in the dir_entries array. Some entries may be missing but * existing and not be cached (if no readdir was ever performed on the entry for * example. This function provides a way to operate on the dirent. * * @param pentry_parent [IN] directory entry to be searched. * @param name [IN] name for the searched entry. * @param newname [IN] newname if function is used to rename a dirent * @param pclient [INOUT] resource allocated by the client for the nfs management. * @param dirent_op [IN] operation (ADD, LOOKUP or REMOVE) to do on the dirent * if found. * @pstatus [OUT] returned status. * * @return the found entry if its exists and NULL if it is not in the dirent * cache. REMOVE always returns NULL. * */ cache_entry_t *cache_inode_operate_cached_dirent(cache_entry_t * pentry_parent, fsal_name_t * pname, fsal_name_t * newname, cache_inode_client_t * pclient, cache_inode_dirent_op_t dirent_op, cache_inode_status_t * pstatus) { cache_entry_t *pentry = NULL; cache_inode_dir_entry_t dirent_key[1], *dirent; struct avltree_node *dirent_node, *tmpnode; LRU_List_state_t vstate; /* Directory mutation generally invalidates outstanding * readdirs, hence any cached cookies, so in these cases we * clear the cookie avl */ /* Set the return default to CACHE_INODE_SUCCESS */ *pstatus = CACHE_INODE_SUCCESS; /* Sanity check */ if(pentry_parent->internal_md.type != DIRECTORY) { *pstatus = CACHE_INODE_BAD_TYPE; return NULL; } /* If no active entry, do nothing */ if (pentry_parent->object.dir.nbactive == 0) { *pstatus = CACHE_INODE_NOT_FOUND; return NULL; } FSAL_namecpy(&dirent_key->name, pname); dirent_node = avltree_lookup(&dirent_key->node_n, &pentry_parent->object.dir.dentries); if (! dirent_node) { *pstatus = CACHE_INODE_NOT_FOUND; /* Right error code (see above)? */ return NULL; } /* unpack avl node */ dirent = avltree_container_of(dirent_node, cache_inode_dir_entry_t, node_n); /* check state of cached dirent */ vstate = dirent->pentry->internal_md.valid_state; if (vstate == VALID || vstate == STALE) { if (vstate == STALE) LogDebug(COMPONENT_NFS_READDIR, "DIRECTORY: found STALE cache entry"); /* Entry was found */ pentry = dirent->pentry; *pstatus = CACHE_INODE_SUCCESS; } /* Did we find something */ if(pentry != NULL) { /* Yes, we did ! */ switch (dirent_op) { case CACHE_INODE_DIRENT_OP_REMOVE: avltree_remove(&dirent->node_n, &pentry_parent->object.dir.dentries); /* release to pool */ ReleaseToPool(dirent, &pclient->pool_dir_entry); pentry_parent->object.dir.nbactive--; *pstatus = CACHE_INODE_SUCCESS; break; case CACHE_INODE_DIRENT_OP_RENAME: /* change the installed inode only the rename can succeed */ FSAL_namecpy(&dirent_key->name, newname); tmpnode = avltree_lookup(&dirent_key->node_n, &pentry_parent->object.dir.dentries); if (tmpnode) { /* rename would cause a collision */ *pstatus = CACHE_INODE_ENTRY_EXISTS; } else { /* remove, rename, and re-insert the object with new keys */ avltree_remove(&dirent->node_n, &pentry_parent->object.dir.dentries); FSAL_namecpy(&dirent->name, newname); tmpnode = avltree_insert(&dirent->node_n, &pentry_parent->object.dir.dentries); if (tmpnode) { /* collision, tree state unchanged--this won't happen */ *pstatus = CACHE_INODE_ENTRY_EXISTS; /* still, try to revert the change in place */ FSAL_namecpy(&dirent->name, pname); tmpnode = avltree_insert(&dirent->node_n, &pentry_parent->object.dir.dentries); } else { *pstatus = CACHE_INODE_SUCCESS; } } /* !found */ break; default: /* Should never occurs, in any case, it cost nothing to handle * this situation */ *pstatus = CACHE_INODE_INVALID_ARGUMENT; break; } /* switch */ } if (*pstatus == CACHE_INODE_SUCCESS) { /* As noted, if a mutating operation was performed, we must * invalidate cached cookies. */ cache_inode_release_dirents( pentry_parent, pclient, CACHE_INODE_AVL_COOKIES); /* Someone has to repopulate the avl cookie cache. Populating it * lazily is ok, but the logic to do it makes supporting simultaneous * readers more involved. Another approach would be to do it in the * background, scheduled from here. */ } return pentry; } /* cache_inode_operate_cached_dirent */
/** * cache_inode_clean_internal: remove a pentry from cache and all LRUs, * and release related resources. * * @param pentry [IN] entry to be deleted from cache * @param hash_table_t [IN] The cache hash table * @param pclient [INOUT] ressource allocated by the client for the nfs management. */ cache_inode_status_t cache_inode_clean_internal(cache_entry_t * to_remove_entry, hash_table_t * ht, cache_inode_client_t * pclient) { fsal_handle_t *pfsal_handle_remove; cache_inode_parent_entry_t *parent_iter = NULL; cache_inode_parent_entry_t *parent_iter_next = NULL; cache_inode_fsal_data_t fsaldata; cache_inode_status_t status; hash_buffer_t key, old_key, old_value; int rc; memset( (char *)&fsaldata, 0, sizeof( fsaldata ) ) ; if((pfsal_handle_remove = cache_inode_get_fsal_handle(to_remove_entry, &status)) == NULL) { return status; } /* Invalidate the related LRU gc entry (no more required) */ if(to_remove_entry->gc_lru_entry != NULL) { if(LRU_invalidate(to_remove_entry->gc_lru, to_remove_entry->gc_lru_entry) != LRU_LIST_SUCCESS) { return CACHE_INODE_LRU_ERROR; } } /* delete the entry from the cache */ fsaldata.handle = *pfsal_handle_remove; /* XXX always DIR_START */ fsaldata.cookie = DIR_START; if(cache_inode_fsaldata_2_key(&key, &fsaldata, pclient)) { return CACHE_INODE_INCONSISTENT_ENTRY; } /* use the key to delete the entry */ rc = HashTable_Del(ht, &key, &old_key, &old_value); if(rc) LogCrit(COMPONENT_CACHE_INODE, "HashTable_Del error %d in cache_inode_clean_internal", rc); if((rc != HASHTABLE_SUCCESS) && (rc != HASHTABLE_ERROR_NO_SUCH_KEY)) { cache_inode_release_fsaldata_key(&key, pclient); return CACHE_INODE_INCONSISTENT_ENTRY; } /* release the key that was stored in hash table */ if(rc != HASHTABLE_ERROR_NO_SUCH_KEY) { cache_inode_release_fsaldata_key(&old_key, pclient); /* Sanity check: old_value.pdata is expected to be equal to pentry, * and is released later in this function */ if((cache_entry_t *) old_value.pdata != to_remove_entry) { LogCrit(COMPONENT_CACHE_INODE, "cache_inode_remove: unexpected pdata %p from hash table (pentry=%p)", old_value.pdata, to_remove_entry); } } /* release the key used for hash query */ cache_inode_release_fsaldata_key(&key, pclient); /* Free the parent list entries */ parent_iter = to_remove_entry->parent_list; while(parent_iter != NULL) { parent_iter_next = parent_iter->next_parent; ReleaseToPool(parent_iter, &pclient->pool_parent); parent_iter = parent_iter_next; } return CACHE_INODE_SUCCESS; } /* cache_inode_clean_internal */
cache_entry_t *cache_inode_get_located(cache_inode_fsal_data_t * pfsdata, cache_entry_t * plocation, cache_inode_policy_t policy, fsal_attrib_list_t * pattr, hash_table_t * ht, cache_inode_client_t * pclient, fsal_op_context_t * pcontext, cache_inode_status_t * pstatus) { hash_buffer_t key, value; cache_entry_t *pentry = NULL; fsal_status_t fsal_status; cache_inode_create_arg_t create_arg; cache_inode_file_type_t type; int hrc = 0; fsal_attrib_list_t fsal_attributes; cache_inode_fsal_data_t *ppoolfsdata = NULL; memset(&create_arg, 0, sizeof(create_arg)); /* Set the return default to CACHE_INODE_SUCCESS */ *pstatus = CACHE_INODE_SUCCESS; /* stats */ /* cache_invalidate calls this with no context or client */ if (pclient) { pclient->stat.nb_call_total += 1; pclient->stat.func_stats.nb_call[CACHE_INODE_GET] += 1; } /* Turn the input to a hash key */ if(cache_inode_fsaldata_2_key(&key, pfsdata, pclient)) { *pstatus = CACHE_INODE_UNAPPROPRIATED_KEY; /* stats */ /* cache_invalidate calls this with no context or client */ if (pclient) { pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_GET] += 1; ppoolfsdata = (cache_inode_fsal_data_t *) key.pdata; ReleaseToPool(ppoolfsdata, &pclient->pool_key); } return NULL; } switch (hrc = HashTable_Get(ht, &key, &value)) { case HASHTABLE_SUCCESS: /* Entry exists in the cache and was found */ pentry = (cache_entry_t *) value.pdata; /* return attributes additionally */ *pattr = pentry->attributes; if ( !pclient ) { /* invalidate. Just return it to mark it stale and go on. */ return( pentry ); } break; case HASHTABLE_ERROR_NO_SUCH_KEY: if ( !pclient ) { /* invalidate. Just return */ return( NULL ); } /* Cache miss, allocate a new entry */ /* XXX I do not think this can happen with avl dirent cache */ if(pfsdata->cookie != DIR_START) { /* added for sanity check */ LogDebug(COMPONENT_CACHE_INODE, "cache_inode_get: pfsdata->cookie != DIR_START (=%"PRIu64") on object whose type is %u", pfsdata->cookie, cache_inode_fsal_type_convert(fsal_attributes.type)); pfsdata->cookie = DIR_START; /* Free this key */ cache_inode_release_fsaldata_key(&key, pclient); /* redo the call */ return cache_inode_get(pfsdata, policy, pattr, ht, pclient, pcontext, pstatus); } /* First, call FSAL to know what the object is */ fsal_attributes.asked_attributes = pclient->attrmask; fsal_status = FSAL_getattrs(&pfsdata->handle, pcontext, &fsal_attributes); if(FSAL_IS_ERROR(fsal_status)) { *pstatus = cache_inode_error_convert(fsal_status); LogDebug(COMPONENT_CACHE_INODE, "cache_inode_get: cache_inode_status=%u fsal_status=%u,%u ", *pstatus, fsal_status.major, fsal_status.minor); if(fsal_status.major == ERR_FSAL_STALE) { char handle_str[256]; snprintHandle(handle_str, 256, &pfsdata->handle); LogEvent(COMPONENT_CACHE_INODE, "cache_inode_get: Stale FSAL File Handle %s, fsal_status=(%u,%u)", handle_str, fsal_status.major, fsal_status.minor); *pstatus = CACHE_INODE_FSAL_ESTALE; } /* stats */ pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_GET] += 1; /* Free this key */ cache_inode_release_fsaldata_key(&key, pclient); return NULL; } /* The type has to be set in the attributes */ if(!FSAL_TEST_MASK(fsal_attributes.supported_attributes, FSAL_ATTR_TYPE)) { *pstatus = CACHE_INODE_FSAL_ERROR; /* stats */ pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_GET] += 1; /* Free this key */ cache_inode_release_fsaldata_key(&key, pclient); return NULL; } /* Get the cache_inode file type */ type = cache_inode_fsal_type_convert(fsal_attributes.type); if(type == SYMBOLIC_LINK) { if( CACHE_INODE_KEEP_CONTENT( policy ) ) { FSAL_CLEAR_MASK(fsal_attributes.asked_attributes); FSAL_SET_MASK(fsal_attributes.asked_attributes, pclient->attrmask); fsal_status = FSAL_readlink(&pfsdata->handle, pcontext, &create_arg.link_content, &fsal_attributes); } else { fsal_status.major = ERR_FSAL_NO_ERROR ; fsal_status.minor = 0 ; } if(FSAL_IS_ERROR(fsal_status)) { *pstatus = cache_inode_error_convert(fsal_status); /* stats */ pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_GET] += 1; /* Free this key */ cache_inode_release_fsaldata_key(&key, pclient); if(fsal_status.major == ERR_FSAL_STALE) { cache_inode_status_t kill_status; LogEvent(COMPONENT_CACHE_INODE, "cache_inode_get: Stale FSAL File Handle detected for pentry = %p, fsal_status=(%u,%u)", pentry, fsal_status.major, fsal_status.minor); if(cache_inode_kill_entry(pentry, NO_LOCK, ht, pclient, &kill_status) != CACHE_INODE_SUCCESS) LogCrit(COMPONENT_CACHE_INODE, "cache_inode_get: Could not kill entry %p, status = %u, fsal_status=(%u,%u)", pentry, kill_status, fsal_status.major, fsal_status.minor); *pstatus = CACHE_INODE_FSAL_ESTALE; } return NULL; } } /* Add the entry to the cache */ if ( type == 1) LogCrit(COMPONENT_CACHE_INODE,"inode get"); if((pentry = cache_inode_new_entry( pfsdata, &fsal_attributes, type, policy, &create_arg, NULL, /* never used to add a new DIR_CONTINUE within this function */ ht, pclient, pcontext, FALSE, /* This is a population, not a creation */ pstatus ) ) == NULL ) { /* stats */ pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_GET] += 1; /* Free this key */ cache_inode_release_fsaldata_key(&key, pclient); return NULL; } /* Set the returned attributes */ *pattr = fsal_attributes; /* Now, exit the switch/case and returns */ break; default: /* This should not happened */ *pstatus = CACHE_INODE_INVALID_ARGUMENT; LogCrit(COMPONENT_CACHE_INODE, "cache_inode_get returning CACHE_INODE_INVALID_ARGUMENT - this should not have happened"); if ( !pclient ) { /* invalidate. Just return */ return( NULL ); } /* stats */ pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_GET] += 1; /* Free this key */ cache_inode_release_fsaldata_key(&key, pclient); return NULL; break; } /* Want to ASSERT pclient at this point */ *pstatus = CACHE_INODE_SUCCESS; if (pentry->object.symlink != NULL) { int stop_here; stop_here = 1; if (stop_here) { stop_here = 2; } } /* valid the found entry, if this is not feasable, returns nothing to the client */ if( plocation != NULL ) { if( plocation != pentry ) { P_w(&pentry->lock); if((*pstatus = cache_inode_valid(pentry, CACHE_INODE_OP_GET, pclient)) != CACHE_INODE_SUCCESS) { V_w(&pentry->lock); pentry = NULL; } V_w(&pentry->lock); } } /* stats */ pclient->stat.func_stats.nb_success[CACHE_INODE_GET] += 1; /* Free this key */ cache_inode_release_fsaldata_key(&key, pclient); return pentry; } /* cache_inode_get_located */
int nfs_dupreq_add_not_finished(long xid, struct svc_req *ptr_req, SVCXPRT *xprt, struct prealloc_pool *dupreq_pool, nfs_res_t *res_nfs) { hash_buffer_t buffkey; hash_buffer_t buffval; hash_buffer_t buffdata; dupreq_entry_t *pdupreq = NULL; int status = 0; dupreq_key_t *pdupkey = NULL; /* Entry to be cached */ GetFromPool(pdupreq, dupreq_pool, dupreq_entry_t); if(pdupreq == NULL) return DUPREQ_INSERT_MALLOC_ERROR; if((pdupkey = (dupreq_key_t *) Mem_Alloc(sizeof(dupreq_key_t))) == NULL) { ReleaseToPool(pdupreq, dupreq_pool); return DUPREQ_INSERT_MALLOC_ERROR; } /* Get the socket address for the key and the request */ if(copy_xprt_addr(&pdupkey->addr, xprt) == 0 || copy_xprt_addr(&pdupreq->addr, xprt) == 0) { Mem_Free(pdupkey); ReleaseToPool(pdupreq, dupreq_pool); return DUPREQ_INSERT_MALLOC_ERROR; } pdupkey->xid = xid; pdupreq->xid = xid; /* Checksum the request */ pdupkey->checksum = 0; pdupreq->checksum = 0; /* I have to keep an integer as key, I wil use the pointer buffkey->pdata for this, * this also means that buffkey->len will be 0 */ buffkey.pdata = (caddr_t) pdupkey; buffkey.len = sizeof(dupreq_key_t); /* I build the data with the request pointer that should be in state 'IN USE' */ pdupreq->rq_prog = ptr_req->rq_prog; pdupreq->rq_vers = ptr_req->rq_vers; pdupreq->rq_proc = ptr_req->rq_proc; pdupreq->timestamp = time(NULL); pdupreq->processing = 1; buffdata.pdata = (caddr_t) pdupreq; buffdata.len = sizeof(dupreq_entry_t); LogDupReq("Add Not Finished", &pdupreq->addr, pdupreq->xid, pdupreq->rq_prog); status = HashTable_Test_And_Set(ht_dupreq, &buffkey, &buffdata, HASHTABLE_SET_HOW_SET_NO_OVERWRITE); if (status == HASHTABLE_ERROR_KEY_ALREADY_EXISTS) { if(HashTable_Get(ht_dupreq, &buffkey, &buffval) == HASHTABLE_SUCCESS) { P(((dupreq_entry_t *)buffval.pdata)->dupreq_mutex); if ( ((dupreq_entry_t *)buffval.pdata)->processing == 1) { V(((dupreq_entry_t *)buffval.pdata)->dupreq_mutex); status = DUPREQ_BEING_PROCESSED; } else { *res_nfs = ((dupreq_entry_t *) buffval.pdata)->res_nfs; V(((dupreq_entry_t *)buffval.pdata)->dupreq_mutex); status = DUPREQ_ALREADY_EXISTS; } } else status = DUPREQ_NOT_FOUND; } else if (status == HASHTABLE_INSERT_MALLOC_ERROR) status = DUPREQ_INSERT_MALLOC_ERROR; else status = DUPREQ_SUCCESS; if (status != DUPREQ_SUCCESS) ReleaseToPool(pdupreq, dupreq_pool); return status; } /* nfs_dupreq_add_not_finished */
state_owner_t *create_nfs4_owner(cache_inode_client_t * pclient, state_nfs4_owner_name_t * pname, state_owner_type_t type, state_owner_t * related_owner, unsigned int init_seqid) { state_owner_t * powner; state_nfs4_owner_name_t * powner_name; /* This lock owner is not known yet, allocated and set up a new one */ GetFromPool(powner, &pclient->pool_state_owner, state_owner_t); if(powner == NULL) return NULL; GetFromPool(powner_name, &pclient->pool_nfs4_owner_name, state_nfs4_owner_name_t); if(powner_name == NULL) { ReleaseToPool(powner, &pclient->pool_state_owner); return NULL; } *powner_name = *pname; /* set up the content of the open_owner */ memset(powner, 0, sizeof(*powner)); powner->so_type = type; powner->so_owner.so_nfs4_owner.so_seqid = init_seqid; powner->so_owner.so_nfs4_owner.so_related_owner = related_owner; powner->so_owner.so_nfs4_owner.so_clientid = pname->son_clientid; powner->so_owner_len = pname->son_owner_len; powner->so_owner.so_nfs4_owner.so_resp.resop = NFS4_OP_ILLEGAL; powner->so_owner.so_nfs4_owner.so_args.argop = NFS4_OP_ILLEGAL; powner->so_refcount = 1; init_glist(&powner->so_lock_list); memcpy(powner->so_owner_val, pname->son_owner_val, pname->son_owner_len); powner->so_owner_val[powner->so_owner_len] = '\0'; if(pthread_mutex_init(&powner->so_mutex, NULL) == -1) { ReleaseToPool(powner, &pclient->pool_state_owner); ReleaseToPool(powner_name, &pclient->pool_nfs4_owner_name); return NULL; } if(!nfs4_owner_Set(powner_name, powner)) { ReleaseToPool(powner, &pclient->pool_state_owner); ReleaseToPool(powner_name, &pclient->pool_nfs4_owner_name); return NULL; } if(isFullDebug(COMPONENT_STATE)) { char str[HASHTABLE_DISPLAY_STRLEN]; DisplayOwner(powner, str); LogFullDebug(COMPONENT_STATE, "New Open Owner %s", str); } return powner; }
/** * * cache_inode_remove_cached_dirent: Removes a directory entry to a cached * directory. * * Removes a directory entry to a cached directory. No MT safety managed here !! * * @param pentry_parent [INOUT] cache entry representing the directory to be * managed. * @param name [IN] name of the entry to remove. * @param ht [IN] hash table used for the cache, unused in this call. * @param pclient [INOUT] ressource allocated by the client for the nfs * management. * @param pstatus [OUT] returned status. * * @return the same as *pstatus * */ cache_inode_status_t cache_inode_remove_cached_dirent( cache_entry_t * pentry_parent, fsal_name_t * pname, hash_table_t * ht, cache_inode_client_t * pclient, cache_inode_status_t * pstatus) { cache_entry_t *removed_pentry = NULL; cache_inode_parent_entry_t *parent_iter = NULL; cache_inode_parent_entry_t *previous_iter = NULL; int found = 0; /* Set the return default to CACHE_INODE_SUCCESS */ *pstatus = CACHE_INODE_SUCCESS; /* Sanity check */ if(pentry_parent->internal_md.type != DIRECTORY) { *pstatus = CACHE_INODE_BAD_TYPE; return *pstatus; } /* BUGAZOMEU: Ne pas oublier de jarter un dir dont toutes les entrees sont * inactives */ if((removed_pentry = cache_inode_operate_cached_dirent(pentry_parent, pname, NULL, pclient, CACHE_INODE_DIRENT_OP_REMOVE, pstatus)) == NULL) return *pstatus; /* Remove the parent entry from the entry whose dirent is removed */ for(previous_iter = NULL, parent_iter = removed_pentry->parent_list; (parent_iter != NULL) && (parent_iter->parent != NULL); previous_iter = parent_iter, parent_iter = parent_iter->next_parent) { if(parent_iter->parent == pentry_parent) { found = 1; break; } } /* Check for pentry cache inconsistency */ if(!found) { *pstatus = CACHE_INODE_INCONSISTENT_ENTRY; } else { if(previous_iter == NULL) { /* this is the first parent */ removed_pentry->parent_list = parent_iter->next_parent; } else { /* This is not the first parent */ previous_iter->next_parent = parent_iter->next_parent; } /* It is now time to put parent_iter back to its pool */ ReleaseToPool(parent_iter, &pclient->pool_parent); } return CACHE_INODE_SUCCESS; } /* cache_inode_remove_cached_dirent */
static int _remove_dupreq(hash_buffer_t *buffkey, dupreq_entry_t *pdupreq, struct prealloc_pool *dupreq_pool, int nfs_req_status) { int rc; nfs_function_desc_t funcdesc = nfs2_func_desc[0]; rc = HashTable_Del(ht_dupreq, buffkey, NULL, NULL); /* if hashtable no such key => dupreq garbaged by another thread */ if(rc != HASHTABLE_SUCCESS && rc != HASHTABLE_ERROR_NO_SUCH_KEY) return 1; /* Error while cleaning */ else if(rc == HASHTABLE_ERROR_NO_SUCH_KEY) return 0; /* don't free the dupreq twice */ /* Locate the function descriptor associated with this cached request */ if(pdupreq->rq_prog == nfs_param.core_param.program[P_NFS]) { switch (pdupreq->rq_vers) { case NFS_V2: funcdesc = nfs2_func_desc[pdupreq->rq_proc]; break; case NFS_V3: funcdesc = nfs3_func_desc[pdupreq->rq_proc]; break; case NFS_V4: funcdesc = nfs4_func_desc[pdupreq->rq_proc]; break; default: /* We should never go there (this situation is filtered in nfs_rpc_getreq) */ LogMajor(COMPONENT_DUPREQ, "NFS Protocol version %d unknown in dupreq_gc", (int)pdupreq->rq_vers); } } else if(pdupreq->rq_prog == nfs_param.core_param.program[P_MNT]) { switch (pdupreq->rq_vers) { case MOUNT_V1: funcdesc = mnt1_func_desc[pdupreq->rq_proc]; break; case MOUNT_V3: funcdesc = mnt3_func_desc[pdupreq->rq_proc]; break; default: /* We should never go there (this situation is filtered in nfs_rpc_getreq) */ LogMajor(COMPONENT_DUPREQ, "MOUNT Protocol version %d unknown in dupreq_gc", (int)pdupreq->rq_vers); break; } /* switch( pdupreq->vers ) */ } #ifdef _USE_NLM else if(pdupreq->rq_prog == nfs_param.core_param.program[P_NLM]) { switch (pdupreq->rq_vers) { case NLM4_VERS: funcdesc = nlm4_func_desc[pdupreq->rq_proc]; break; } /* switch( pdupreq->vers ) */ } #endif /* _USE_NLM */ #ifdef _USE_QUOTA else if(pdupreq->rq_prog == nfs_param.core_param.program[P_RQUOTA]) { switch (pdupreq->rq_vers) { case RQUOTAVERS: funcdesc = rquota1_func_desc[pdupreq->rq_proc]; break; case EXT_RQUOTAVERS: funcdesc = rquota2_func_desc[pdupreq->rq_proc]; break; } /* switch( pdupreq->vers ) */ } #endif else { /* We should never go there (this situation is filtered in nfs_rpc_getreq) */ LogMajor(COMPONENT_DUPREQ, "protocol %d is not managed", (int)pdupreq->rq_prog); } /* Call the free function */ if (nfs_req_status == NFS_REQ_OK) funcdesc.free_function(&(pdupreq->res_nfs)); /* Send the entry back to the pool */ ReleaseToPool(pdupreq, dupreq_pool); return DUPREQ_SUCCESS; }
cache_entry_t *cache_inode_get_located(cache_inode_fsal_data_t * pfsdata, cache_entry_t * plocation, fsal_attrib_list_t * pattr, hash_table_t * ht, cache_inode_client_t * pclient, fsal_op_context_t * pcontext, cache_inode_status_t * pstatus) { hash_buffer_t key, value; cache_entry_t *pentry = NULL; fsal_status_t fsal_status; cache_inode_create_arg_t create_arg; cache_inode_file_type_t type; int hrc = 0; fsal_attrib_list_t fsal_attributes; cache_inode_fsal_data_t *ppoolfsdata = NULL; /* Set the return default to CACHE_INODE_SUCCESS */ *pstatus = CACHE_INODE_SUCCESS; /* stats */ pclient->stat.nb_call_total += 1; pclient->stat.func_stats.nb_call[CACHE_INODE_GET] += 1; /* Turn the input to a hash key */ if(cache_inode_fsaldata_2_key(&key, pfsdata, pclient)) { *pstatus = CACHE_INODE_UNAPPROPRIATED_KEY; /* stats */ pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_GET] += 1; ppoolfsdata = (cache_inode_fsal_data_t *) key.pdata; ReleaseToPool(ppoolfsdata, &pclient->pool_key); return NULL; } switch (hrc = HashTable_Get(ht, &key, &value)) { case HASHTABLE_SUCCESS: /* Entry exists in the cache and was found */ pentry = (cache_entry_t *) value.pdata; /* return attributes additionally */ cache_inode_get_attributes(pentry, pattr); break; case HASHTABLE_ERROR_NO_SUCH_KEY: /* Cache miss, allocate a new entry */ /* If we ask for a dir cont (in this case pfsdata.cookie != FSAL_DIR_BEGINNING, we have * a client who performs a readdir in the middle of a directory, when the direcctories * have been garbbage. we must search for the DIR_BEGIN related to this DIR_CONT */ if(pfsdata->cookie != DIR_START) { /* added for sanity check */ LogFullDebug(COMPONENT_CACHE_INODE, "cache_inode_get: pfsdata->cookie != DIR_START (=%u) on object whose type is %u", pfsdata->cookie, cache_inode_fsal_type_convert(fsal_attributes.type)); pfsdata->cookie = DIR_START; /* Free this key */ cache_inode_release_fsaldata_key(&key, pclient); /* redo the call */ return cache_inode_get(pfsdata, pattr, ht, pclient, pcontext, pstatus); } /* First, call FSAL to know what the object is */ fsal_attributes.asked_attributes = pclient->attrmask; fsal_status = FSAL_getattrs(&pfsdata->handle, pcontext, &fsal_attributes); if(FSAL_IS_ERROR(fsal_status)) { *pstatus = cache_inode_error_convert(fsal_status); LogDebug(COMPONENT_CACHE_INODE, "cache_inode_get: cache_inode_status=%u fsal_status=%u,%u ", *pstatus, fsal_status.major, fsal_status.minor); if(fsal_status.major == ERR_FSAL_STALE) { char handle_str[256]; snprintHandle(handle_str, 256, &pfsdata->handle); LogEvent(COMPONENT_CACHE_INODE, "cache_inode_get: Stale FSAL File Handle %s", handle_str); *pstatus = CACHE_INODE_FSAL_ESTALE; } /* stats */ pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_GET] += 1; /* Free this key */ cache_inode_release_fsaldata_key(&key, pclient); return NULL; } /* The type has to be set in the attributes */ if(!FSAL_TEST_MASK(fsal_attributes.supported_attributes, FSAL_ATTR_TYPE)) { *pstatus = CACHE_INODE_FSAL_ERROR; /* stats */ pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_GET] += 1; /* Free this key */ cache_inode_release_fsaldata_key(&key, pclient); return NULL; } /* Get the cache_inode file type */ type = cache_inode_fsal_type_convert(fsal_attributes.type); if(type == SYMBOLIC_LINK) { FSAL_CLEAR_MASK(fsal_attributes.asked_attributes); FSAL_SET_MASK(fsal_attributes.asked_attributes, pclient->attrmask); fsal_status = FSAL_readlink(&pfsdata->handle, pcontext, &create_arg.link_content, &fsal_attributes); if(FSAL_IS_ERROR(fsal_status)) { *pstatus = cache_inode_error_convert(fsal_status); /* stats */ pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_GET] += 1; /* Free this key */ cache_inode_release_fsaldata_key(&key, pclient); if(fsal_status.major == ERR_FSAL_STALE) { cache_inode_status_t kill_status; LogEvent(COMPONENT_CACHE_INODE, "cache_inode_get: Stale FSAL File Handle detected for pentry = %p", pentry); if(cache_inode_kill_entry(pentry, ht, pclient, &kill_status) != CACHE_INODE_SUCCESS) LogCrit(COMPONENT_CACHE_INODE, "cache_inode_get: Could not kill entry %p, status = %u", pentry, kill_status); *pstatus = CACHE_INODE_FSAL_ESTALE; } return NULL; } } /* Add the entry to the cache */ if((pentry = cache_inode_new_entry(pfsdata, &fsal_attributes, type, &create_arg, NULL, /* never used to add a new DIR_CONTINUE within the scope of this function */ ht, pclient, pcontext, FALSE, /* This is a population, not a creation */ pstatus)) == NULL) { /* stats */ pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_GET] += 1; /* Free this key */ cache_inode_release_fsaldata_key(&key, pclient); return NULL; } /* Set the returned attributes */ *pattr = fsal_attributes; /* Now, exit the switch/case and returns */ break; default: /* This should not happened */ *pstatus = CACHE_INODE_INVALID_ARGUMENT; LogCrit(COMPONENT_CACHE_INODE, "cache_inode_get returning CACHE_INODE_INVALID_ARGUMENT - this should not have happened"); /* stats */ pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_GET] += 1; /* Free this key */ cache_inode_release_fsaldata_key(&key, pclient); return NULL; break; } *pstatus = CACHE_INODE_SUCCESS; /* valid the found entry, if this is not feasable, returns nothing to the client */ if( plocation != NULL ) { if( plocation != pentry ) { P_w(&pentry->lock); if((*pstatus = cache_inode_valid(pentry, CACHE_INODE_OP_GET, pclient)) != CACHE_INODE_SUCCESS) { V_w(&pentry->lock); pentry = NULL; } V_w(&pentry->lock); } } /* stats */ pclient->stat.func_stats.nb_success[CACHE_INODE_GET] += 1; /* Free this key */ cache_inode_release_fsaldata_key(&key, pclient); return pentry; } /* cache_inode_get_located */
/** * * cache_inode_gc_clean_entry: cleans a entry in the cache_inode. * * cleans an entry in the cache_inode. * * @param pentry [INOUT] entry to be cleaned. * @param addparam [IN] additional parameter used for cleaning. * * @return LRU_LIST_SET_INVALID if ok, LRU_LIST_DO_NOT_SET_INVALID otherwise * */ static int cache_inode_gc_clean_entry(cache_entry_t * pentry, cache_inode_param_gc_t * pgcparam) { fsal_handle_t *pfsal_handle = NULL; cache_inode_parent_entry_t *parent_iter = NULL; cache_inode_parent_entry_t *parent_iter_next = NULL; cache_inode_fsal_data_t fsaldata; cache_inode_status_t status; fsal_status_t fsal_status; hash_buffer_t key, old_key, old_value; int rc; LogFullDebug(COMPONENT_CACHE_INODE_GC, "(pthread_self=%p): About to remove pentry=%p, type=%d", (caddr_t)pthread_self(), pentry, pentry->internal_md.type); /* sanity check */ if((pentry->gc_lru_entry != NULL) && ((cache_entry_t *) pentry->gc_lru_entry->buffdata.pdata) != pentry) { LogCrit(COMPONENT_CACHE_INODE_GC, "cache_inode_gc_clean_entry: LRU entry pointed by this pentry doesn't match the GC LRU"); } /* Get the FSAL handle */ if((pfsal_handle = cache_inode_get_fsal_handle(pentry, &status)) == NULL) { LogCrit(COMPONENT_CACHE_INODE_GC, "cache_inode_gc_clean_entry: unable to retrieve pentry's specific filesystem info"); return LRU_LIST_DO_NOT_SET_INVALID; } fsaldata.handle = *pfsal_handle; if(pentry->internal_md.type != DIR_CONTINUE) fsaldata.cookie = DIR_START; else fsaldata.cookie = pentry->object.dir_cont.dir_cont_pos; /* Use the handle to build the key */ if(cache_inode_fsaldata_2_key(&key, &fsaldata, pgcparam->pclient)) { LogCrit(COMPONENT_CACHE_INODE_GC, "cache_inode_gc_clean_entry: could not build hashtable key"); cache_inode_release_fsaldata_key(&key, pgcparam->pclient); return LRU_LIST_DO_NOT_SET_INVALID; } /* use the key to delete the entry */ rc = HashTable_Del(pgcparam->ht, &key, &old_key, &old_value); if((rc != HASHTABLE_SUCCESS) && (rc != HASHTABLE_ERROR_NO_SUCH_KEY)) { LogCrit(COMPONENT_CACHE_INODE_GC, "cache_inode_gc_clean_entry: entry could not be deleted, status = %d", rc); cache_inode_release_fsaldata_key(&key, pgcparam->pclient); return LRU_LIST_DO_NOT_SET_INVALID; } else if(rc == HASHTABLE_ERROR_NO_SUCH_KEY) { LogEvent(COMPONENT_CACHE_INODE_GC, "cache_inode_gc_clean_entry: entry already deleted, type=%d, status=%d", pentry->internal_md.type, rc); cache_inode_release_fsaldata_key(&key, pgcparam->pclient); return LRU_LIST_SET_INVALID; } /* Clean up the associated ressources in the FSAL */ if(FSAL_IS_ERROR(fsal_status = FSAL_CleanObjectResources(pfsal_handle))) { LogCrit(COMPONENT_CACHE_INODE_GC, "cache_inode_gc_clean_entry: Could'nt free FSAL ressources fsal_status.major=%u", fsal_status.major); } LogFullDebug(COMPONENT_CACHE_INODE_GC, "++++> pentry %p deleted from HashTable", pentry); /* Release the hash key data */ cache_inode_release_fsaldata_key(&old_key, pgcparam->pclient); /* Sanity check: old_value.pdata is expected to be equal to pentry, * and is released later in this function */ if((cache_entry_t *) old_value.pdata != pentry) { LogCrit(COMPONENT_CACHE_INODE_GC, "cache_inode_gc_clean_entry: unexpected pdata %p from hash table (pentry=%p)", old_value.pdata, pentry); } cache_inode_release_fsaldata_key(&key, pgcparam->pclient); /* Recover the parent list entries */ parent_iter = pentry->parent_list; while(parent_iter != NULL) { parent_iter_next = parent_iter->next_parent; ReleaseToPool(parent_iter, &pgcparam->pclient->pool_parent); parent_iter = parent_iter_next; } LogFullDebug(COMPONENT_CACHE_INODE_GC, "++++> parent directory sent back to pool"); /* If entry is a DIR_CONTINUE or a DIR_BEGINNING, release pdir_data */ if(pentry->internal_md.type == DIR_BEGINNING) { /* Put the pentry back to the pool */ ReleaseToPool(pentry->object.dir_begin.pdir_data, &pgcparam->pclient->pool_dir_data); } if(pentry->internal_md.type == DIR_CONTINUE) { /* Put the pentry back to the pool */ ReleaseToPool(pentry->object.dir_cont.pdir_data, &pgcparam->pclient->pool_dir_data); } LogFullDebug(COMPONENT_CACHE_INODE_GC, "++++> pdir_data (if needed) sent back to pool"); #ifdef _USE_NFS4_ACL /* If entry has NFS4 ACL, release it. */ cache_inode_gc_acl(pentry); #endif /* _USE_NFS4_ACL */ /* Free and Destroy the mutex associated with the pentry */ V_w(&pentry->lock); cache_inode_mutex_destroy(pentry); /* Put the pentry back to the pool */ ReleaseToPool(pentry, &pgcparam->pclient->pool_entry); /* Regular exit */ pgcparam->nb_to_be_purged = pgcparam->nb_to_be_purged - 1; LogFullDebug(COMPONENT_CACHE_INODE_GC, "++++> pentry %p: clean entry is ok", pentry); return LRU_LIST_SET_INVALID; /* Cleaning ok */ }
/** * * cache_inode_remove_sw: removes a pentry addressed by its parent pentry and * its FSAL name. Mutex management is switched. * * Removes a pentry addressed by its parent pentry and its FSAL name. Mutex * management is switched. * * @param pentry [IN] entry for the parent directory to be managed. * @param name [IN] name of the entry that we are looking for in the cache. * @param pattr [OUT] attributes for the entry that we have found. * @param ht [IN] hash table used for the cache, unused in this call. * @param pclient [INOUT] ressource allocated by the client for the nfs management. * @param pcontext [IN] FSAL credentials * @param pstatus [OUT] returned status. * * @return CACHE_INODE_SUCCESS if operation is a success \n * @return CACHE_INODE_LRU_ERROR if allocation error occured when validating the entry * */ cache_inode_status_t cache_inode_remove_sw(cache_entry_t * pentry, /**< Parent entry */ fsal_name_t * pnode_name, fsal_attrib_list_t * pattr, hash_table_t * ht, cache_inode_client_t * pclient, fsal_op_context_t * pcontext, cache_inode_status_t * pstatus, int use_mutex) { fsal_status_t fsal_status; cache_entry_t *to_remove_entry; fsal_handle_t fsal_handle_parent; fsal_attrib_list_t remove_attr; fsal_attrib_list_t after_attr; cache_inode_status_t status; cache_content_status_t cache_content_status; int to_remove_numlinks = 0; fsal_accessflags_t access_mask = 0; /* stats */ (pclient->stat.nb_call_total)++; (pclient->stat.func_stats.nb_call[CACHE_INODE_REMOVE])++; /* pentry is a directory */ if(use_mutex) P_w(&pentry->lock); /* Check if caller is allowed to perform the operation */ access_mask = FSAL_MODE_MASK_SET(FSAL_W_OK) | FSAL_ACE4_MASK_SET(FSAL_ACE_PERM_DELETE_CHILD); if((status = cache_inode_access_sw(pentry, access_mask, ht, pclient, pcontext, &status, FALSE)) != CACHE_INODE_SUCCESS) { *pstatus = status; /* pentry is a directory */ if(use_mutex) V_w(&pentry->lock); return *pstatus; } /* Looks up for the entry to remove */ if((to_remove_entry = cache_inode_lookup_sw( pentry, pnode_name, CACHE_INODE_JOKER_POLICY, &remove_attr, ht, pclient, pcontext, &status, FALSE)) == NULL) { *pstatus = status; /* pentry is a directory */ if(use_mutex) V_w(&pentry->lock); return *pstatus; } /* lock it */ if(use_mutex) P_w(&to_remove_entry->lock); if(pentry->internal_md.type != DIRECTORY) { if(use_mutex) { V_w(&to_remove_entry->lock); V_w(&pentry->lock); } *pstatus = CACHE_INODE_BAD_TYPE; return *pstatus; } LogDebug(COMPONENT_CACHE_INODE, "---> Cache_inode_remove : %s", pnode_name->name); /* Non-empty directories should not be removed. */ if(to_remove_entry->internal_md.type == DIRECTORY && to_remove_entry->object.dir.has_been_readdir == CACHE_INODE_YES) { if(cache_inode_is_dir_empty(to_remove_entry) != CACHE_INODE_SUCCESS) { if(use_mutex) { V_w(&to_remove_entry->lock); V_w(&pentry->lock); } *pstatus = CACHE_INODE_DIR_NOT_EMPTY; return *pstatus; } } /* pentry->internal_md.type == DIRECTORY */ fsal_handle_parent = pentry->object.dir.handle; if(status == CACHE_INODE_SUCCESS) { /* Remove the file from FSAL */ after_attr.asked_attributes = pclient->attrmask; #ifdef _USE_MFSL cache_inode_get_attributes(pentry, &after_attr); #ifdef _USE_PNFS after_attr.numlinks = remove_attr.numlinks ; /* Hook used to pass nlinks to MFSL_unlink */ if( to_remove_entry->internal_md.type == REGULAR_FILE ) fsal_status = MFSL_unlink(&pentry->mobject, pnode_name, &to_remove_entry->mobject, pcontext, &pclient->mfsl_context, &after_attr, &to_remove_entry->object.file.pnfs_file ); else #endif /* _USE_PNFS */ fsal_status = MFSL_unlink(&pentry->mobject, pnode_name, &to_remove_entry->mobject, pcontext, &pclient->mfsl_context, &after_attr, NULL); #else fsal_status = FSAL_unlink(&fsal_handle_parent, pnode_name, pcontext, &after_attr); #endif /* Set the 'after' attr */ if(pattr != NULL) *pattr = after_attr; if(FSAL_IS_ERROR(fsal_status)) { if(fsal_status.major == ERR_FSAL_STALE) { cache_inode_status_t kill_status; LogDebug(COMPONENT_CACHE_INODE, "cache_inode_remove: Stale FSAL FH detected for pentry %p", pentry); if(cache_inode_kill_entry(pentry, WT_LOCK, ht, pclient, &kill_status) != CACHE_INODE_SUCCESS) LogCrit(COMPONENT_CACHE_INODE, "cache_inode_remove: Could not kill entry %p, status = %u", pentry, kill_status); *pstatus = CACHE_INODE_FSAL_ESTALE; } *pstatus = cache_inode_error_convert(fsal_status); if(use_mutex) { V_w(&to_remove_entry->lock); V_w(&pentry->lock); } return *pstatus; } } /* CACHE_INODE_SUCCESS */ else { if(use_mutex) { V_w(&to_remove_entry->lock); V_w(&pentry->lock); } (pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_REMOVE])++; return status; } /* Remove the entry from parent dir_entries avl */ cache_inode_remove_cached_dirent(pentry, pnode_name, ht, pclient, &status); LogFullDebug(COMPONENT_CACHE_INODE, "cache_inode_remove_cached_dirent: status=%d", status); /* Update the cached attributes */ pentry->object.dir.attributes = after_attr; /* Update the attributes for the removed entry */ if(remove_attr.type != FSAL_TYPE_DIR) { if(remove_attr.numlinks > 1) { switch (to_remove_entry->internal_md.type) { case SYMBOLIC_LINK: assert(to_remove_entry->object.symlink); to_remove_entry->object.symlink->attributes.numlinks -= 1; cache_inode_set_time_current( &to_remove_entry->object.symlink->attributes.ctime ) ; to_remove_numlinks = to_remove_entry->object.symlink->attributes.numlinks; break; case REGULAR_FILE: to_remove_entry->object.file.attributes.numlinks -= 1; cache_inode_set_time_current( &to_remove_entry->object.file.attributes.ctime ) ; to_remove_numlinks = to_remove_entry->object.file.attributes.numlinks; break; case CHARACTER_FILE: case BLOCK_FILE: case SOCKET_FILE: case FIFO_FILE: to_remove_entry->object.special_obj.attributes.numlinks -= 1; cache_inode_set_time_current( &to_remove_entry->object.special_obj.attributes.ctime ) ; to_remove_numlinks = to_remove_entry->object.special_obj.attributes.numlinks; break; default: /* Other objects should not be hard linked */ if(use_mutex) { V_w(&to_remove_entry->lock); V_w(&pentry->lock); } *pstatus = CACHE_INODE_BAD_TYPE; return *pstatus; break; } } } else { /* No hardlink counter to be decremented for a directory: hardlink are not allowed for them */ } /* Now, delete "to_remove_entry" from the cache inode and free its associated resources, but only if * numlinks == 0 */ if(to_remove_numlinks == 0) { /* If pentry is a regular file, data cached, the related data cache entry should be removed as well */ if(to_remove_entry->internal_md.type == REGULAR_FILE) { if(to_remove_entry->object.file.pentry_content != NULL) { /* Something is to be deleted, release the cache data entry */ if(cache_content_release_entry ((cache_content_entry_t *) to_remove_entry->object.file.pentry_content, (cache_content_client_t *) pclient->pcontent_client, &cache_content_status) != CACHE_CONTENT_SUCCESS) { LogEvent(COMPONENT_CACHE_INODE, "pentry %p, named %s could not be released from data cache, status=%d", to_remove_entry, pnode_name->name, cache_content_status); } } } if((*pstatus = cache_inode_clean_internal(to_remove_entry, ht, pclient)) != CACHE_INODE_SUCCESS) { if(use_mutex) { V_w(&pentry->lock); V_w(&to_remove_entry->lock); } LogCrit(COMPONENT_CACHE_INODE, "cache_inode_clean_internal ERROR %d", *pstatus); return *pstatus; } /* Finally put the main pentry back to pool */ if(use_mutex) V_w(&to_remove_entry->lock); /* Destroy the mutex associated with the pentry */ cache_inode_mutex_destroy(to_remove_entry); ReleaseToPool(to_remove_entry, &pclient->pool_entry); } /* to_remove->numlinks == 0 */ /* Validate the entries */ *pstatus = cache_inode_valid(pentry, CACHE_INODE_OP_SET, pclient); /* Regular exit */ if(use_mutex) { if(to_remove_numlinks != 0) V_w(&to_remove_entry->lock); /* This was not release yet, it should be done here */ V_w(&pentry->lock); } if(status == CACHE_INODE_SUCCESS) (pclient->stat.func_stats.nb_success[CACHE_INODE_REMOVE])++; else (pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_REMOVE])++; return status; } /* cache_inode_remove */
cache_inode_status_t cache_inode_kill_entry( cache_entry_t * pentry, cache_inode_lock_how_t lock_how, hash_table_t * ht, cache_inode_client_t * pclient, cache_inode_status_t * pstatus ) { fsal_handle_t *pfsal_handle = NULL; cache_inode_fsal_data_t fsaldata; cache_inode_parent_entry_t *parent_iter = NULL; cache_inode_parent_entry_t *parent_iter_next = NULL; hash_buffer_t key, old_key; hash_buffer_t old_value; int rc; fsal_status_t fsal_status; memset( (char *)&fsaldata, 0, sizeof( fsaldata ) ) ; LogInfo(COMPONENT_CACHE_INODE, "Using cache_inode_kill_entry for entry %p", pentry); /* Invalidation is not for junctions or special files */ if( ( pentry->internal_md.type == FS_JUNCTION ) || ( pentry->internal_md.type == SOCKET_FILE ) || ( pentry->internal_md.type == FIFO_FILE ) || ( pentry->internal_md.type == CHARACTER_FILE ) || ( pentry->internal_md.type == BLOCK_FILE ) ) { free_lock( pentry, lock_how ) ; *pstatus = CACHE_INODE_SUCCESS; return *pstatus; } #if 0 /** @todo: BUGAZOMEU : directory invalidation seems quite tricky, temporarily avoid it */ if( pentry->internal_md.type == DIRECTORY ) { free_lock( pentry, lock_how ) ; *pstatus = CACHE_INODE_SUCCESS; return *pstatus; } /** @todo: BUGAZOMEU : file invalidation seems quite tricky, temporarily avoid it */ /* We need to know how to manage how to deal with "files with states" */ if( pentry->internal_md.type == REGULAR_FILE ) { free_lock( pentry, lock_how ) ; *pstatus = CACHE_INODE_SUCCESS; return *pstatus; } #endif if(pstatus == NULL) return CACHE_INODE_INVALID_ARGUMENT; if(pentry == NULL || pclient == NULL || ht == NULL) { free_lock( pentry, lock_how ) ; *pstatus = CACHE_INODE_INVALID_ARGUMENT; return *pstatus; } /* Get the FSAL handle */ if((pfsal_handle = cache_inode_get_fsal_handle(pentry, pstatus)) == NULL) { free_lock( pentry, lock_how ) ; LogCrit(COMPONENT_CACHE_INODE, "cache_inode_kill_entry: unable to retrieve pentry's specific filesystem info"); return *pstatus; } /* Invalidate the related LRU gc entry (no more required) */ if(pentry->gc_lru_entry != NULL) { if(LRU_invalidate(pentry->gc_lru, pentry->gc_lru_entry) != LRU_LIST_SUCCESS) { free_lock( pentry, lock_how ) ; *pstatus = CACHE_INODE_LRU_ERROR; return *pstatus; } } fsaldata.handle = *pfsal_handle; fsaldata.cookie = DIR_START; /* Use the handle to build the key */ if(cache_inode_fsaldata_2_key(&key, &fsaldata, pclient)) { free_lock( pentry, lock_how ) ; LogCrit(COMPONENT_CACHE_INODE, "cache_inode_kill_entry: could not build hashtable key"); cache_inode_release_fsaldata_key(&key, pclient); *pstatus = CACHE_INODE_NOT_FOUND; return *pstatus; } /* use the key to delete the entry */ if((rc = HashTable_Del(ht, &key, &old_key, &old_value)) != HASHTABLE_SUCCESS) { if( rc != HASHTABLE_ERROR_NO_SUCH_KEY) /* rc=3 => Entry was previously removed */ LogCrit( COMPONENT_CACHE_INODE, "cache_inode_kill_entry: entry could not be deleted, status = %d", rc); cache_inode_release_fsaldata_key(&key, pclient); *pstatus = CACHE_INODE_NOT_FOUND; return *pstatus; } /* Release the hash key data */ cache_inode_release_fsaldata_key(&old_key, pclient); /* Clean up the associated ressources in the FSAL */ if(FSAL_IS_ERROR(fsal_status = FSAL_CleanObjectResources(pfsal_handle))) { LogCrit(COMPONENT_CACHE_INODE, "cache_inode_kill_entry: Couldn't free FSAL ressources fsal_status.major=%u", fsal_status.major); } /* Sanity check: old_value.pdata is expected to be equal to pentry, * and is released later in this function */ if((cache_entry_t *) old_value.pdata != pentry) { LogCrit(COMPONENT_CACHE_INODE, "cache_inode_kill_entry: unexpected pdata %p from hash table (pentry=%p)", old_value.pdata, pentry); } /* Release the current key */ cache_inode_release_fsaldata_key(&key, pclient); /* Recover the parent list entries */ parent_iter = pentry->parent_list; while(parent_iter != NULL) { parent_iter_next = parent_iter->next_parent; ReleaseToPool(parent_iter, &pclient->pool_parent); parent_iter = parent_iter_next; } /* If entry is datacached, remove it from the cache */ if(pentry->internal_md.type == REGULAR_FILE) { cache_content_status_t cache_content_status; if(pentry->object.file.pentry_content != NULL) if(cache_content_release_entry ((cache_content_entry_t *) pentry->object.file.pentry_content, (cache_content_client_t *) pclient->pcontent_client, &cache_content_status) != CACHE_CONTENT_SUCCESS) LogCrit(COMPONENT_CACHE_INODE, "Could not removed datacached entry for pentry %p", pentry); } /* If entry is a DIRECTORY, invalidate dirents */ if(pentry->internal_md.type == DIRECTORY) { cache_inode_invalidate_related_dirents(pentry, pclient); } // free_lock( pentry, lock_how ) ; /* Really needed ? The pentry is unaccessible now and will be destroyed */ /* Destroy the mutex associated with the pentry */ cache_inode_mutex_destroy(pentry); /* Put the pentry back to the pool */ ReleaseToPool(pentry, &pclient->pool_entry); *pstatus = CACHE_INODE_SUCCESS; return *pstatus; } /* cache_inode_kill_entry */
void *fsal_up_thread(void *Arg) { fsal_status_t status; int rc; fsal_up_arg_t *fsal_up_args = (fsal_up_arg_t *)Arg; fsal_up_event_bus_context_t fsal_up_context; fsal_up_event_bus_parameter_t fsal_up_bus_param; fsal_up_event_bus_filter_t * pupebfilter = NULL; fsal_up_filter_list_t *filter = NULL; fsal_up_event_t *pevent_head, *event, *tmpevent; fsal_up_event_functions_t *event_func; fsal_count_t nb_events_found, event_nb; fsal_time_t timeout; char thr_name[40]; memset(&fsal_up_bus_param, 0, sizeof(fsal_up_event_bus_parameter_t)); memset(&fsal_up_context, 0, sizeof(fsal_up_event_bus_context_t)); snprintf(thr_name, sizeof(thr_name), "FSAL UP Thread for filesystem %llu.%llu", fsal_up_args->export_entry->filesystem_id.major, fsal_up_args->export_entry->filesystem_id.minor); SetNameFunction(thr_name); #ifndef _NO_BUDDY_SYSTEM if((rc = BuddyInit(&nfs_param.buddy_param_fsal_up)) != BUDDY_SUCCESS) { /* Failed init */ LogFatal(COMPONENT_FSAL_UP, "FSAL_UP: Memory manager could not be initialized"); Fatal(); } LogInfo(COMPONENT_FSAL_UP, "FSAL_UP: Memory manager for filesystem %llu.%llu export id %d" " successfully initialized", fsal_up_args->export_entry->filesystem_id.major, fsal_up_args->export_entry->filesystem_id.minor, fsal_up_args->export_entry->id); #endif /* Set the FSAL UP functions that will be used to process events. */ event_func = get_fsal_up_functions(fsal_up_args->export_entry->fsal_up_type); if (event_func == NULL) { LogCrit(COMPONENT_FSAL_UP, "Error: FSAL UP TYPE: %s does not exist. " "Exiting FSAL UP thread.", fsal_up_args->export_entry->fsal_up_type); Mem_Free(Arg); return NULL; } /* Get fsal up context from FSAL */ /* It is expected that the export entry and event_pool will be referenced * in the returned callback context structure. */ memcpy(&fsal_up_context.FS_export_context, &fsal_up_args->export_entry->FS_export_context, sizeof(fsal_export_context_t)); fsal_up_context.event_pool = &nfs_param.fsal_up_param.event_pool; LogDebug(COMPONENT_FSAL_UP, "Initializing FSAL Callback context."); status = FSAL_UP_Init(&fsal_up_bus_param, &fsal_up_context); if (FSAL_IS_ERROR(status)) { LogCrit(COMPONENT_FSAL_UP, "Error: Could not initialize FSAL UP for" " filesystem %llu.%llu export %d. Exiting FSAL UP thread.", fsal_up_args->export_entry->filesystem_id.major, fsal_up_args->export_entry->filesystem_id.minor, fsal_up_args->export_entry->id); } /* Add filters ... later if needed we could add arguments to filters * configurable from configuration files. */ for(filter = fsal_up_args->export_entry->fsal_up_filter_list; filter != NULL; filter = filter->next) { LogEvent(COMPONENT_FSAL_UP, "Applying filter \"%s\" to FSAL UP thread " "for filesystem id %llu.%llu export id %d.", filter->name, fsal_up_args->export_entry->filesystem_id.major, fsal_up_args->export_entry->filesystem_id.minor, fsal_up_args->export_entry->id); /* Find predefined filter */ pupebfilter = find_filter(filter->name); if (pupebfilter == NULL) { LogCrit(COMPONENT_FSAL_UP, "Error: Could not find filter named \"%s\".", filter->name); } /* Applying filter */ FSAL_UP_AddFilter(pupebfilter, &fsal_up_context); } /* Set the timeout for getting events. */ timeout = fsal_up_args->export_entry->fsal_up_timeout; /* Start querying for events and processing. */ while(1) { /* pevent is passed in as a single empty node, it's expected the * FSAL will use the event_pool in the bus_context to populate * this array by adding to the pevent_head->next attribute. */ event_nb = 0; nb_events_found = 0; pevent_head = NULL; LogDebug(COMPONENT_FSAL_UP, "Requesting event from FSAL Callback interface."); status = FSAL_UP_GetEvents(&pevent_head, /* out */ &event_nb, /* in/out */ timeout, /* in */ &nb_events_found, /* out */ &fsal_up_context);/* in */ if (FSAL_IS_ERROR(status)) { if (status.major == ERR_FSAL_TIMEOUT) LogDebug(COMPONENT_FSAL_UP, "FSAL_UP_EB_GetEvents() hit the timeout" " limit of %u.%u seconds for filesystem id %llu.%llu export id" " %d.", timeout.seconds, timeout.nseconds, fsal_up_args->export_entry->filesystem_id.major, fsal_up_args->export_entry->filesystem_id.minor, fsal_up_args->export_entry->id); else if (status.major == ERR_FSAL_NOTSUPP) { LogCrit(COMPONENT_FSAL_UP, "Exiting FSAL UP Thread for filesystem" " id %llu.%llu export id %u because the FSAL Callback" " Interface is not supported for this FSAL type.", fsal_up_args->export_entry->filesystem_id.major, fsal_up_args->export_entry->filesystem_id.minor, fsal_up_args->export_entry->id); return NULL; } else LogDebug(COMPONENT_FSAL_UP, "Error: FSAL_UP_EB_GetEvents() " "failed"); } LogDebug(COMPONENT_FSAL_UP, "Received %lu events to process for filesystem" " id %llu.%llu export id %u.", event_nb, fsal_up_args->export_entry->filesystem_id.major, fsal_up_args->export_entry->filesystem_id.minor, fsal_up_args->export_entry->id); /* process the list of events */ for(event = pevent_head; event != NULL;) { status = process_event(event, event_func); if (FSAL_IS_ERROR(status)) { LogDebug(COMPONENT_FSAL_UP, "Error: Event could not be processed " "for filesystem %llu.%llu export id %u.", fsal_up_args->export_entry->filesystem_id.major, fsal_up_args->export_entry->filesystem_id.minor, fsal_up_args->export_entry->id); } tmpevent = event; event = event->next_event; ReleaseToPool(tmpevent, &nfs_param.fsal_up_param.event_pool); event_nb--; } LogDebug(COMPONENT_FSAL_UP, "%lu events not found for filesystem" " %llu.%llu export id %u", event_nb, fsal_up_args->export_entry->filesystem_id.major, fsal_up_args->export_entry->filesystem_id.minor, fsal_up_args->export_entry->id); } Mem_Free(Arg); return NULL; } /* fsal_up_thread */