static bool lookup_by_uname(const struct gsh_buffdesc *name, struct cache_info **info) { struct cache_info prototype = { .uname = *name }; struct avltree_node *found_node = avltree_lookup(&prototype.uname_node, &uname_tree); struct cache_info *found_info; void **cache_slot; if (unlikely(!found_node)) return false; found_info = avltree_container_of(found_node, struct cache_info, uname_node); /* I assume that if someone likes this user enough to look it up by name, they'll like it enough to look it up by ID later. */ cache_slot = (void **) &uid_grplist_cache[found_info->uid % id_cache_size]; atomic_store_voidptr(cache_slot, &found_info->uid_node); *info = found_info; return true; }
static bool lookup_by_uid(const uid_t uid, struct cache_info **info) { struct cache_info prototype = { .uid = uid }; void **cache_slot = (void **) &uid_grplist_cache[prototype.uid % id_cache_size]; struct avltree_node *found_node = atomic_fetch_voidptr(cache_slot); struct cache_info *found_info; bool found = false; /* Verify that the node found in the cache array is in fact what we * want. */ if (likely(found_node)) { found_info = avltree_container_of(found_node, struct cache_info, uid_node); if (found_info->uid == uid) found = true; } if (unlikely(!found)) { found_node = avltree_lookup(&prototype.uid_node, &uid_tree); if (unlikely(!found_node)) return false; atomic_store_voidptr(cache_slot, found_node); found_info = avltree_container_of(found_node, struct cache_info, uid_node); }
static inline void gweakref_delete_impl(gweakref_table_t *wt, gweakref_t *ref, uint32_t flags) { struct avltree_node *node; gweakref_priv_t refk, *tref; gweakref_partition_t *wp; /* lookup up ref.ptr, delete iff ref.ptr is found and * ref.gen == found.gen */ refk.k = *ref; wp = gwt_partition_of_addr_k(wt, refk.k.ptr); if (!(flags & GWR_FLAG_WLOCKED)) pthread_rwlock_wrlock(&wp->lock); node = avltree_lookup(&refk.node_k, &wp->t); if (node) { /* found it, maybe */ tref = avltree_container_of(node, gweakref_priv_t, node_k); /* XXX generation mismatch would be in error, we think */ if (tref->k.gen == ref->gen) { /* unhook it */ avltree_remove(node, &wp->t); gsh_free(tref); if (wp->cache) wp->cache[cache_offsetof(wt, refk.k.ptr)] = NULL; } } if (!(flags & GWR_FLAG_WLOCKED)) pthread_rwlock_unlock(&wp->lock); }
void check_delete_1(void) { struct avltree_node *node; avl_unit_val_t *v, *v2; avl_unit_clear_and_destroy_tree(&avl_tree_1); avltree_init(&avl_tree_1, avl_unit_cmpf, 0 /* flags */); insert_long_val(&avl_tree_1, 4); insert_long_val(&avl_tree_1, 1); insert_long_val(&avl_tree_1, 10010); insert_long_val(&avl_tree_1, 267); insert_long_val(&avl_tree_1, 3382); insert_long_val(&avl_tree_1, 22); insert_long_val(&avl_tree_1, 82); insert_long_val(&avl_tree_1, 3); node = avltree_first(&avl_tree_1); v2 = avltree_container_of(node, avl_unit_val_t, node_k); CU_ASSERT(v2->val == (1 + 1)); delete_long_val(&avl_tree_1, 1); /* new key */ v = avl_unit_new_val(4); v->key = 4; node = avltree_lookup(&v->node_k, &avl_tree_1); v2 = avltree_container_of(node, avl_unit_val_t, node_k); CU_ASSERT(v2->val == (4 + 1)); delete_long_val(&avl_tree_1, 267); v->key = 3382; node = avltree_lookup(&v->node_k, &avl_tree_1); v2 = avltree_container_of(node, avl_unit_val_t, node_k); CU_ASSERT(v2->val == (3382 + 1)); avl_unit_free_val(v); }
void insert_long_val_safe(struct avltree *t, unsigned long l) { struct avltree_node *node; avl_unit_val_t *v; /* new k, v */ v = avl_unit_new_val(l); v->key = l; node = avltree_lookup(&v->node_k, t); if (node == NULL) avltree_insert(&v->node_k, t); else avl_unit_free_val(v); }
void lookups_tree_10000(void) { struct avltree_node *node; avl_unit_val_t *v2, *v = avl_unit_new_val(0); int ix; for (ix = 1; ix < 2; ++ix) { /* reuse v */ v->key = ix; /* lookup mapping */ node = avltree_lookup(&v->node_k, &avl_tree_10000); v2 = avltree_container_of(node, avl_unit_val_t, node_k); CU_ASSERT((unsigned long)v2->val == (ix + 1)); } /* free v */ avl_unit_free_val(v); }
void *gweakref_lookupex(gweakref_table_t *wt, gweakref_t *ref, pthread_rwlock_t **lock) { struct avltree_node *node = NULL; gweakref_priv_t refk, *tref; gweakref_partition_t *wp; void *ret = NULL; /* look up ref.ptr--return !NULL iff ref.ptr is found and * ref.gen == found.gen */ refk.k = *ref; wp = gwt_partition_of_addr_k(wt, refk.k.ptr); pthread_rwlock_rdlock(&wp->lock); /* check cache */ if (wp->cache) node = wp->cache[cache_offsetof(wt, refk.k.ptr)]; if (! node) node = avltree_lookup(&refk.node_k, &wp->t); if (node) { /* found it, maybe */ tref = avltree_container_of(node, gweakref_priv_t, node_k); if (tref->k.gen == ref->gen) { ret = ref->ptr; if (wp->cache) wp->cache[cache_offsetof(wt, refk.k.ptr)] = node; } } if (ret) { *lock = &wp->lock; } else { pthread_rwlock_unlock(&wp->lock); } return (ret); }
void delete_long_val(struct avltree *t, unsigned long l) { struct avltree_node *node; avl_unit_val_t *v, *v2; /* new key, v */ v = avl_unit_new_val(l); v->key = l; /* find mapping */ node = avltree_lookup(&v->node_k, t); v2 = avltree_container_of(node, avl_unit_val_t, node_k); CU_ASSERT(v2->key == l); /* delete mapping */ avltree_remove(&v2->node_k, t); /* free original v */ avl_unit_free_val(v2); /* free search k, v */ avl_unit_free_val(v); }
void deletes_tree_10000(void) { struct avltree_node *node; avl_unit_val_t *v2, *v = avl_unit_new_val(0); int ix; for (ix = 1; ix < 10001; ++ix) { /* reuse key */ v->key = ix; /* find mapping */ node = avltree_lookup(&v->node_k, &avl_tree_10000); v2 = avltree_container_of(node, avl_unit_val_t, node_k); CU_ASSERT(v2->val == (ix + 1)); /* and remove it */ avltree_remove(&v2->node_k, &avl_tree_10000); avl_unit_free_val(v2); } /* free search k */ avl_unit_free_val(v); }
/** * * cache_inode_lookup_sw: looks up for a name in a directory indicated by a * cached entry. * * Looks up for a name in a directory indicated by a cached entry. The directory * should have been cached before. * * @param pentry_parent [IN] entry for the parent directory to be managed. * @param name [IN] name of the entry that we are looking for in the * cache. * @param pattr [OUT] attributes for the entry that we have found. * @param ht [IN] hash table used for the cache, unused in this * call. * @param pclient [INOUT] ressource allocated by the client for the nfs * management. * @param pcontext [IN] FSAL credentials * @param pstatus [OUT] returned status. * @param use_mutex [IN] if TRUE, mutex management is done, not if equal * to FALSE. * * @return CACHE_INODE_SUCCESS if operation is a success \n * @return CACHE_INODE_LRU_ERROR if allocation error occured when validating the * entry * */ cache_entry_t *cache_inode_lookup_sw(cache_entry_t * pentry_parent, fsal_name_t * pname, cache_inode_policy_t policy, fsal_attrib_list_t * pattr, hash_table_t * ht, cache_inode_client_t * pclient, fsal_op_context_t * pcontext, cache_inode_status_t * pstatus, int use_mutex) { cache_inode_dir_entry_t dirent_key[1], *dirent; struct avltree_node *dirent_node; cache_inode_dir_entry_t *new_dir_entry; cache_entry_t *pentry = NULL; fsal_status_t fsal_status; #ifdef _USE_MFSL mfsl_object_t object_handle; #else fsal_handle_t object_handle; #endif fsal_handle_t dir_handle; fsal_attrib_list_t object_attributes; cache_inode_create_arg_t create_arg; cache_inode_file_type_t type; cache_inode_status_t cache_status; cache_inode_fsal_data_t new_entry_fsdata; fsal_accessflags_t access_mask = 0; memset(&create_arg, 0, sizeof(create_arg)); memset( (char *)&new_entry_fsdata, 0, sizeof( new_entry_fsdata ) ) ; /* Set the return default to CACHE_INODE_SUCCESS */ *pstatus = CACHE_INODE_SUCCESS; /* stats */ (pclient->stat.nb_call_total)++; (pclient->stat.func_stats.nb_call[CACHE_INODE_LOOKUP])++; /* We should not renew entries when !use_mutex (because unless we * make the flag explicit (shared vs. exclusive), we don't know * whether a mutating operation is safe--and, the caller should have * already renewed the entry */ if(use_mutex == TRUE) { P_w(&pentry_parent->lock); cache_status = cache_inode_renew_entry(pentry_parent, pattr, ht, pclient, pcontext, pstatus); if(cache_status != CACHE_INODE_SUCCESS) { V_w(&pentry_parent->lock); inc_func_err_retryable(pclient, CACHE_INODE_GETATTR); LogDebug(COMPONENT_CACHE_INODE, "cache_inode_lookup: returning %d(%s) from cache_inode_renew_entry", *pstatus, cache_inode_err_str(*pstatus)); return NULL; } /* RW Lock goes for writer to reader */ rw_lock_downgrade(&pentry_parent->lock); } if(pentry_parent->internal_md.type != DIRECTORY) { /* Parent is no directory base, return NULL */ *pstatus = CACHE_INODE_NOT_A_DIRECTORY; /* stats */ (pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_LOOKUP])++; if(use_mutex == TRUE) V_r(&pentry_parent->lock); return NULL; } /* if name is ".", use the input value */ if(!FSAL_namecmp(pname, (fsal_name_t *) & FSAL_DOT)) { pentry = pentry_parent; } else if(!FSAL_namecmp(pname, (fsal_name_t *) & FSAL_DOT_DOT)) { /* Directory do only have exactly one parent. This a limitation in all FS, * which implies that hard link are forbidden on directories (so that * they exists only in one dir). Because of this, the parent list is * always limited to one element for a dir. Clients SHOULD never * 'lookup( .. )' in something that is no dir. */ pentry = cache_inode_lookupp_no_mutex(pentry_parent, ht, pclient, pcontext, pstatus); } else { /* This is a "regular lookup" (not on "." or "..") */ /* Check is user (as specified by the credentials) is authorized to * lookup the directory or not */ access_mask = FSAL_MODE_MASK_SET(FSAL_X_OK) | FSAL_ACE4_MASK_SET(FSAL_ACE_PERM_LIST_DIR); if(cache_inode_access_no_mutex(pentry_parent, access_mask, ht, pclient, pcontext, pstatus) != CACHE_INODE_SUCCESS) { if(use_mutex == TRUE) V_r(&pentry_parent->lock); (pclient->stat.func_stats.nb_err_retryable[CACHE_INODE_GETATTR])++; return NULL; } /* We first try avltree_lookup by name. If that fails, we dispatch to * the fsal. */ FSAL_namecpy(&dirent_key->name, pname); dirent_node = avltree_lookup(&dirent_key->node_n, &pentry_parent->object.dir.dentries); if (dirent_node) { dirent = avltree_container_of(dirent_node, cache_inode_dir_entry_t, node_n); pentry = dirent->pentry; } if(pentry == NULL) { LogDebug(COMPONENT_CACHE_INODE, "Cache Miss detected"); dir_handle = pentry_parent->handle; object_attributes.asked_attributes = pclient->attrmask; #ifdef _USE_MFSL #ifdef _USE_MFSL_ASYNC if(!mfsl_async_is_object_asynchronous(&pentry_parent->mobject)) { /* If the parent is asynchronous, rely on the content of the cache * inode parent entry. * * /!\ If the fs behind the FSAL is touched in a non-nfs way, * there will be huge incoherencies. */ #endif /* _USE_MFSL_ASYNC */ fsal_status = MFSL_lookup(&pentry_parent->mobject, pname, pcontext, &pclient->mfsl_context, &object_handle, &object_attributes, NULL); #ifdef _USE_MFSL_ASYNC } else { LogMidDebug(COMPONENT_CACHE_INODE, "cache_inode_lookup chose to bypass FSAL and trusted his cache for name=%s", pname->name); fsal_status.major = ERR_FSAL_NOENT; fsal_status.minor = ENOENT; } #endif /* _USE_MFSL_ASYNC */ #else fsal_status = FSAL_lookup(&dir_handle, pname, pcontext, &object_handle, &object_attributes); #endif /* _USE_MFSL */ if(FSAL_IS_ERROR(fsal_status)) { *pstatus = cache_inode_error_convert(fsal_status); if(use_mutex == TRUE) V_r(&pentry_parent->lock); /* Stale File Handle to be detected and managed */ if(fsal_status.major == ERR_FSAL_STALE) { cache_inode_status_t kill_status; LogEvent(COMPONENT_CACHE_INODE, "cache_inode_lookup: Stale FSAL File Handle detected for pentry = %p, fsal_status=(%u,%u)", pentry_parent, fsal_status.major, fsal_status.minor); if(cache_inode_kill_entry(pentry_parent, NO_LOCK, ht, pclient, &kill_status) != CACHE_INODE_SUCCESS) LogCrit(COMPONENT_CACHE_INODE, "cache_inode_pentry_parent: Could not kill entry %p, status = %u", pentry_parent, kill_status); *pstatus = CACHE_INODE_FSAL_ESTALE; } /* stats */ (pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_LOOKUP])++; return NULL; } type = cache_inode_fsal_type_convert(object_attributes.type); /* If entry is a symlink, this value for be cached */ if(type == SYMBOLIC_LINK) { if( CACHE_INODE_KEEP_CONTENT( policy ) ) #ifdef _USE_MFSL { fsal_status = MFSL_readlink(&object_handle, pcontext, &pclient->mfsl_context, &create_arg.link_content, &object_attributes, NULL); } #else { fsal_status = FSAL_readlink(&object_handle, pcontext, &create_arg.link_content, &object_attributes); } else { fsal_status.major = ERR_FSAL_NO_ERROR ; fsal_status.minor = 0 ; } #endif if(FSAL_IS_ERROR(fsal_status)) { *pstatus = cache_inode_error_convert(fsal_status); if(use_mutex == TRUE) V_r(&pentry_parent->lock); /* Stale File Handle to be detected and managed */ if(fsal_status.major == ERR_FSAL_STALE) { cache_inode_status_t kill_status; LogEvent(COMPONENT_CACHE_INODE, "cache_inode_lookup: Stale FSAL File Handle detected for pentry = %p, fsal_status=(%u,%u)", pentry_parent, fsal_status.major, fsal_status.minor); if(cache_inode_kill_entry(pentry_parent, NO_LOCK, ht, pclient, &kill_status) != CACHE_INODE_SUCCESS) LogCrit(COMPONENT_CACHE_INODE, "cache_inode_pentry_parent: Could not kill entry %p, status = %u", pentry_parent, kill_status); *pstatus = CACHE_INODE_FSAL_ESTALE; } /* stats */ (pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_LOOKUP])++; return NULL; } } /* Allocation of a new entry in the cache */ #ifdef _USE_MFSL new_entry_fsdata.handle = object_handle.handle; #else new_entry_fsdata.handle = object_handle; #endif new_entry_fsdata.cookie = 0; if((pentry = cache_inode_new_entry( &new_entry_fsdata, &object_attributes, type, policy, &create_arg, NULL, ht, pclient, pcontext, FALSE, /* This is a population and not a creation */ pstatus ) ) == NULL ) { if(use_mutex == TRUE) V_r(&pentry_parent->lock); /* stats */ (pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_LOOKUP])++; return NULL; } /* Entry was found in the FSAL, add this entry to the parent * directory */ cache_status = cache_inode_add_cached_dirent(pentry_parent, pname, pentry, ht, &new_dir_entry, pclient, pcontext, pstatus); if(cache_status != CACHE_INODE_SUCCESS && cache_status != CACHE_INODE_ENTRY_EXISTS) { if(use_mutex == TRUE) V_r(&pentry_parent->lock); /* stats */ (pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_LOOKUP])++; return NULL; } } /* cached lookup fail (try fsal) */
/** * * cache_inode_operate_cached_dirent: locates a dirent in the cached dirent, * and perform an operation on it. * * Looks up for an dirent in the cached dirent. Thus function searches only in * the entries listed in the dir_entries array. Some entries may be missing but * existing and not be cached (if no readdir was ever performed on the entry for * example. This function provides a way to operate on the dirent. * * @param pentry_parent [IN] directory entry to be searched. * @param name [IN] name for the searched entry. * @param newname [IN] newname if function is used to rename a dirent * @param pclient [INOUT] resource allocated by the client for the nfs management. * @param dirent_op [IN] operation (ADD, LOOKUP or REMOVE) to do on the dirent * if found. * @pstatus [OUT] returned status. * * @return the found entry if its exists and NULL if it is not in the dirent * cache. REMOVE always returns NULL. * */ cache_entry_t *cache_inode_operate_cached_dirent(cache_entry_t * pentry_parent, fsal_name_t * pname, fsal_name_t * newname, cache_inode_client_t * pclient, cache_inode_dirent_op_t dirent_op, cache_inode_status_t * pstatus) { cache_entry_t *pentry = NULL; cache_inode_dir_entry_t dirent_key[1], *dirent; struct avltree_node *dirent_node, *tmpnode; LRU_List_state_t vstate; /* Directory mutation generally invalidates outstanding * readdirs, hence any cached cookies, so in these cases we * clear the cookie avl */ /* Set the return default to CACHE_INODE_SUCCESS */ *pstatus = CACHE_INODE_SUCCESS; /* Sanity check */ if(pentry_parent->internal_md.type != DIRECTORY) { *pstatus = CACHE_INODE_BAD_TYPE; return NULL; } /* If no active entry, do nothing */ if (pentry_parent->object.dir.nbactive == 0) { *pstatus = CACHE_INODE_NOT_FOUND; return NULL; } FSAL_namecpy(&dirent_key->name, pname); dirent_node = avltree_lookup(&dirent_key->node_n, &pentry_parent->object.dir.dentries); if (! dirent_node) { *pstatus = CACHE_INODE_NOT_FOUND; /* Right error code (see above)? */ return NULL; } /* unpack avl node */ dirent = avltree_container_of(dirent_node, cache_inode_dir_entry_t, node_n); /* check state of cached dirent */ vstate = dirent->pentry->internal_md.valid_state; if (vstate == VALID || vstate == STALE) { if (vstate == STALE) LogDebug(COMPONENT_NFS_READDIR, "DIRECTORY: found STALE cache entry"); /* Entry was found */ pentry = dirent->pentry; *pstatus = CACHE_INODE_SUCCESS; } /* Did we find something */ if(pentry != NULL) { /* Yes, we did ! */ switch (dirent_op) { case CACHE_INODE_DIRENT_OP_REMOVE: avltree_remove(&dirent->node_n, &pentry_parent->object.dir.dentries); /* release to pool */ ReleaseToPool(dirent, &pclient->pool_dir_entry); pentry_parent->object.dir.nbactive--; *pstatus = CACHE_INODE_SUCCESS; break; case CACHE_INODE_DIRENT_OP_RENAME: /* change the installed inode only the rename can succeed */ FSAL_namecpy(&dirent_key->name, newname); tmpnode = avltree_lookup(&dirent_key->node_n, &pentry_parent->object.dir.dentries); if (tmpnode) { /* rename would cause a collision */ *pstatus = CACHE_INODE_ENTRY_EXISTS; } else { /* remove, rename, and re-insert the object with new keys */ avltree_remove(&dirent->node_n, &pentry_parent->object.dir.dentries); FSAL_namecpy(&dirent->name, newname); tmpnode = avltree_insert(&dirent->node_n, &pentry_parent->object.dir.dentries); if (tmpnode) { /* collision, tree state unchanged--this won't happen */ *pstatus = CACHE_INODE_ENTRY_EXISTS; /* still, try to revert the change in place */ FSAL_namecpy(&dirent->name, pname); tmpnode = avltree_insert(&dirent->node_n, &pentry_parent->object.dir.dentries); } else { *pstatus = CACHE_INODE_SUCCESS; } } /* !found */ break; default: /* Should never occurs, in any case, it cost nothing to handle * this situation */ *pstatus = CACHE_INODE_INVALID_ARGUMENT; break; } /* switch */ } if (*pstatus == CACHE_INODE_SUCCESS) { /* As noted, if a mutating operation was performed, we must * invalidate cached cookies. */ cache_inode_release_dirents( pentry_parent, pclient, CACHE_INODE_AVL_COOKIES); /* Someone has to repopulate the avl cookie cache. Populating it * lazily is ok, but the logic to do it makes supporting simultaneous * readers more involved. Another approach would be to do it in the * background, scheduled from here. */ } return pentry; } /* cache_inode_operate_cached_dirent */
/** * * cache_inode_readdir: Reads a directory. * * Looks up for a name in a directory indicated by a cached entry. The * directory should have been cached before. * * NEW: pending new (C-language) callback based dirent unpacking into caller * structures, we eliminate copies by returning dir entries by pointer. To * permit this, we introduce lock donation. If new int pointer argument * unlock is 1 on return, the calling thread holds pentry read-locked and * must release this lock after dirent processing. * * This is the only function in the cache_inode_readdir.c file that manages MT * safety on a directory cache entry. * * @param pentry [IN] entry for the parent directory to be read. * @param cookie [IN] cookie for the readdir operation (basically the offset). * @param nbwanted [IN] Maximum number of directory entries wanted. * @param peod_met [OUT] A flag to know if end of directory was met during this call. * @param dirent_array [OUT] the resulting array of found directory entries. * @param ht [IN] hash table used for the cache, unused in this call. * @param unlock [OUT] the caller shall release read-lock on pentry when done * @param pclient [INOUT] ressource allocated by the client for the nfs management. * @param pcontext [IN] FSAL credentials * @param pstatus [OUT] returned status. * * @return CACHE_INODE_SUCCESS if operation is a success \n * @return CACHE_INODE_BAD_TYPE if entry is not related to a directory\n * @return CACHE_INODE_LRU_ERROR if allocation error occured when validating the entry * */ cache_inode_status_t cache_inode_readdir(cache_entry_t * dir_pentry, cache_inode_policy_t policy, uint64_t cookie, unsigned int nbwanted, unsigned int *pnbfound, uint64_t *pend_cookie, cache_inode_endofdir_t *peod_met, cache_inode_dir_entry_t **dirent_array, hash_table_t *ht, int *unlock, cache_inode_client_t *pclient, fsal_op_context_t *pcontext, cache_inode_status_t *pstatus) { cache_inode_dir_entry_t dirent_key[1], *dirent; struct avltree_node *dirent_node; fsal_accessflags_t access_mask = 0; uint64_t inoff = cookie; int i = 0; /* Guide to parameters: * the first cookie is parameter 'cookie' * number of entries queried is set by parameter 'nbwanted' * number of found entries before eod is return is '*pnbfound' * '*peod_met' is set if end of directory is encountered */ /* Set the return default to CACHE_INODE_SUCCESS */ *pstatus = CACHE_INODE_SUCCESS; dirent = NULL; /* Set initial value of unlock */ *unlock = FALSE; /* end cookie initial value is the begin cookie */ LogFullDebug(COMPONENT_NFS_READDIR, "--> Cache_inode_readdir: setting pend_cookie to cookie=%" PRIu64, cookie); *pend_cookie = cookie; /* stats */ pclient->stat.nb_call_total++; (pclient->stat.func_stats.nb_call[CACHE_INODE_READDIR])++; LogFullDebug(COMPONENT_NFS_READDIR, "--> Cache_inode_readdir: parameters are cookie=%"PRIu64 "nbwanted=%u", cookie, nbwanted); /* Sanity check */ if(nbwanted == 0) { /* Asking for nothing is not a crime !!!!! * build a 'dummy' return in this case */ *pstatus = CACHE_INODE_SUCCESS; *pnbfound = 0; *peod_met = TO_BE_CONTINUED; /* stats */ (pclient->stat.func_stats.nb_success[CACHE_INODE_READDIR])++; return *pstatus; } /* Force dir content invalidation if policy enforced no name cache */ if( !CACHE_INODE_KEEP_CONTENT( dir_pentry->policy ) ) return cache_inode_readdir_nonamecache( dir_pentry, policy, cookie, nbwanted, pnbfound, pend_cookie, peod_met, dirent_array, ht, unlock, pclient, pcontext, pstatus ) ; P_w(&dir_pentry->lock); /* Renew the entry (to avoid having it being garbagged */ if(cache_inode_renew_entry(dir_pentry, NULL, ht, pclient, pcontext, pstatus) != CACHE_INODE_SUCCESS) { (pclient->stat.func_stats.nb_err_retryable[CACHE_INODE_GETATTR])++; V_w(&dir_pentry->lock); return *pstatus; } /* readdir can be done only with a directory */ if(dir_pentry->internal_md.type != DIRECTORY) { V_w(&dir_pentry->lock); *pstatus = CACHE_INODE_BAD_TYPE; /* stats */ (pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_READDIR])++; return *pstatus; } /* Check is user (as specified by the credentials) is authorized to read * the directory or not */ access_mask = FSAL_MODE_MASK_SET(FSAL_R_OK) | FSAL_ACE4_MASK_SET(FSAL_ACE_PERM_LIST_DIR); if(cache_inode_access_no_mutex(dir_pentry, access_mask, ht, pclient, pcontext, pstatus) != CACHE_INODE_SUCCESS) { V_w(&dir_pentry->lock); (pclient->stat.func_stats.nb_err_retryable[CACHE_INODE_READDIR])++; return *pstatus; } /* Is the directory fully cached (this is done if a readdir call is done on the directory) */ if(dir_pentry->object.dir.has_been_readdir != CACHE_INODE_YES) { /* populate the cache */ if(cache_inode_readdir_populate(dir_pentry, policy, ht, pclient, pcontext, pstatus) != CACHE_INODE_SUCCESS) { /* stats */ (pclient->stat.func_stats.nb_err_unrecover[CACHE_INODE_READDIR])++; V_w(&dir_pentry->lock); return *pstatus; } } /* deal with dentry cache invalidates */ revalidate_cookie_cache(dir_pentry, pclient); /* Downgrade Writer lock to a reader one. */ rw_lock_downgrade(&dir_pentry->lock); /* deal with initial cookie value: * 1. cookie is invalid (-should- be checked by caller) * 2. cookie is 0 (first cookie) -- ok * 3. cookie is > than highest dirent position (error) * 4. cookie <= highest dirent position but > highest cached cookie * (currently equivalent to #2, because we pre-populate the cookie avl) * 5. cookie is in cached range -- ok */ if (cookie > 0) { if (cookie < 3) { *pstatus = CACHE_INODE_BAD_COOKIE; V_r(&dir_pentry->lock); return *pstatus; } if ((inoff-3) > avltree_size(&dir_pentry->object.dir.dentries)) { LogCrit(COMPONENT_NFS_V4, "Bad initial cookie %"PRIu64, inoff); *pstatus = CACHE_INODE_BAD_COOKIE; V_r(&dir_pentry->lock); return *pstatus; } /* we assert this can now succeed */ dirent_key->cookie = inoff; dirent_node = avltree_lookup(&dirent_key->node_c, &dir_pentry->object.dir.cookies); if (! dirent_node) { LogCrit(COMPONENT_NFS_READDIR, "%s: seek to cookie=%"PRIu64" fail", __func__, inoff); *pstatus = CACHE_INODE_NOT_FOUND; V_r(&dir_pentry->lock); return *pstatus; } /* switch avls */ dirent = avltree_container_of(dirent_node, cache_inode_dir_entry_t, node_c); dirent_node = &dirent->node_n; /* client wants the cookie -after- the last we sent, and * the Linux 3.0 and 3.1.0-rc7 clients misbehave if we * resend the last one */ dirent_node = avltree_next(dirent_node); } else { /* initial readdir */ dirent_node = avltree_first(&dir_pentry->object.dir.dentries); } LogFullDebug(COMPONENT_NFS_READDIR, "About to readdir in cache_inode_readdir: pentry=%p " "cookie=%"PRIu64, dir_pentry, cookie); /* Now satisfy the request from the cached readdir--stop when either * the requested sequence or dirent sequence is exhausted */ *pnbfound = 0; *peod_met = TO_BE_CONTINUED; for(i = 0; i < nbwanted; ++i) { if (!dirent_node) break; dirent = avltree_container_of(dirent_node, cache_inode_dir_entry_t, node_n); dirent_array[i] = dirent; (*pnbfound)++; dirent_node = avltree_next(dirent_node); } if (*pnbfound > 0) { if (!dirent) { LogCrit(COMPONENT_CACHE_INODE, "cache_inode_readdir: " "UNEXPECTED CASE: dirent is NULL whereas nbfound>0"); *pstatus = CACHE_INODE_INCONSISTENT_ENTRY; return CACHE_INODE_INCONSISTENT_ENTRY; } *pend_cookie = dirent->cookie; } if (! dirent_node) *peod_met = END_OF_DIR; *pstatus = cache_inode_valid(dir_pentry, CACHE_INODE_OP_GET, pclient); /* stats */ if(*pstatus != CACHE_INODE_SUCCESS) { (pclient->stat.func_stats.nb_err_retryable[CACHE_INODE_READDIR])++; V_r(&dir_pentry->lock); } else { (pclient->stat.func_stats.nb_success[CACHE_INODE_READDIR])++; *unlock = TRUE; } return *pstatus; } /* cache_inode_readdir */
GAULFUNC boolean avltree_test(void) #endif { int i, j; AVLTree *tree; char chars[62]; char chx='x', chX='X', *ch; printf("Testing my dodgy AVL tree routines.\n"); tree = avltree_new(test_avltree_generate); i = 0; for (j = 0; j < 26; j++, i++) { chars[i] = 'A' + (char) j; avltree_insert(tree, &chars[i]); } for (j = 0; j < 26; j++, i++) { chars[i] = 'a' + (char) j; avltree_insert(tree, &chars[i]); } for (j = 0; j < 10; j++, i++) { chars[i] = '0' + (char) j; avltree_insert(tree, &chars[i]); } printf("height: %d\n", avltree_height(tree)); printf("num nodes: %d\n", avltree_num_nodes(tree)); printf("tree: "); avltree_traverse(tree, test_avltree_traverse, NULL); printf("\n"); printf("tree to 'S' then foo: "); avltree_traverse(tree, test_avltree_traverse, "foo"); printf("\n"); for (i = 0; i < 26; i++) if ( !avltree_remove(tree, &chars[i]) ) printf("%c not found.\n", chars[i]); printf("height: %d\n", avltree_height(tree)); printf("num nodes: %d\n", avltree_num_nodes(tree)); printf("tree: "); avltree_traverse(tree, test_avltree_traverse, NULL); printf("\n"); printf("Lookup for 'x': "); ch = (char *) avltree_lookup(tree, (vpointer) &chx); if (ch) printf("Found '%c'\n", *ch); else printf("Not found.\n"); printf("Lookup for 'X': "); ch = (char *) avltree_lookup(tree, (vpointer) &chX); if (ch) printf("Found '%c'\n", *ch); else printf("Not found.\n"); printf("Tests: %s\n", failed?"FAILED":"PASSED"); avltree_delete(tree); return failed; }
&(client_by_ip.cache[eip_cache_offsetof(&client_by_ip, ipaddr)]); node = (struct avltree_node *)atomic_fetch_voidptr(cache_slot); if (node) { if (client_ip_cmpf(&v.node_k, node) == 0) { /* got it in 1 */ LogDebug(COMPONENT_HASHTABLE_CACHE, "client_mgr cache hit slot %d\n", eip_cache_offsetof(&client_by_ip, ipaddr)); cl = avltree_container_of(node, struct gsh_client, node_k); goto out; } } /* fall back to AVL */ node = avltree_lookup(&v.node_k, &client_by_ip.t); if (node) { cl = avltree_container_of(node, struct gsh_client, node_k); /* update cache */ atomic_store_voidptr(cache_slot, node); goto out; } else if (lookup_only) { PTHREAD_RWLOCK_unlock(&client_by_ip.lock); return NULL; } PTHREAD_RWLOCK_unlock(&client_by_ip.lock); server_st = gsh_calloc((sizeof(struct server_stats) + addr_len), 1); if (server_st == NULL) return NULL;