static cache_entry_t * choose_pentry( hash_table_t * ht) { struct rbt_node *it; struct rbt_head *tete_rbt; cache_entry_t * pentry = NULL ; unsigned int i = 0; hash_data_t *pdata = NULL; unsigned int counter = 0 ; /* Sanity check */ if(ht == NULL) return NULL ; for(i = 0; i < ht->parameter.index_size; i++) { tete_rbt = &((ht->array_rbt)[i]); RBT_LOOP(tete_rbt, it) { counter += 1 ; pdata = (hash_data_t *) it->rbt_opaq; pentry = (cache_entry_t *) (pdata->buffval.pdata) ; /* No file invalidation for the moment (file can handle state) */ if( counter >= 10 ) return pentry ; RBT_INCREMENT(it); } }
void hashtable_log(log_components_t component, struct hash_table *ht) { /* The current position in the hash table */ struct rbt_node *it = NULL; /* The root of the tree currently being inspected */ struct rbt_head *root; /* Buffer descriptors for the key and value */ struct hash_data *data = NULL; /* String representation of the key */ char dispkey[HASHTABLE_DISPLAY_STRLEN]; /* String representation of the stored value */ char dispval[HASHTABLE_DISPLAY_STRLEN]; /* Index for traversing the partitions */ uint32_t i = 0; /* Running count of entries */ size_t nb_entries = 0; /* Recomputed partitionindex */ uint32_t index = 0; /* Recomputed hash for Red-Black tree */ uint64_t rbt_hash = 0; LogFullDebug(component, "The hash is partitioned into %d trees", ht->parameter.index_size); for (i = 0; i < ht->parameter.index_size; i++) nb_entries += ht->partitions[i].count; LogFullDebug(component, "The hash contains %zd entries", nb_entries); for (i = 0; i < ht->parameter.index_size; i++) { root = &ht->partitions[i].rbt; LogFullDebug(component, "The partition in position %" PRIu32 "contains: %u entries", i, root->rbt_num_node); PTHREAD_RWLOCK_rdlock(&ht->partitions[i].lock); RBT_LOOP(root, it) { data = it->rbt_opaq; ht->parameter.key_to_str(&(data->key), dispkey); ht->parameter.val_to_str(&(data->val), dispval); if (compute(ht, &data->key, &index, &rbt_hash) != HASHTABLE_SUCCESS) { LogCrit(component, "Possible implementation error in hash_func_both"); index = 0; rbt_hash = 0; } LogFullDebug(component, "%s => %s; index=%" PRIu32 " rbt_hash=%" PRIu64, dispkey, dispval, index, rbt_hash); RBT_INCREMENT(it); } PTHREAD_RWLOCK_unlock(&ht->partitions[i].lock); }
void print_node(struct rbt_head *head) { struct rbt_node *it; printf("header 0x%lx : root 0x%lx lm 0x%lx rm 0x%lx num = %d\n", head, head->root, head->leftmost, head->rightmost, head->rbt_num_node); RBT_LOOP(head, it) { printf("node 0x%lx : flags 0%o p 0x%lx a 0x%lx " "l 0x%lx r 0x%lx val = %d\n", it, it->rbt_flags, it->parent, it->anchor, it->left, it->next, it->rbt_value); RBT_INCREMENT(it); }
/** * * HashTable_Print: Print information about the hashtable (mostly for debugging purpose). * * Print information about the hashtable (mostly for debugging purpose). * * @param component the component debugging config to use. * @param ht the hashtable to be used. * @return none (returns void). * * @see HashTable_Set * @see HashTable_Init * @see HashTable_Get */ void HashTable_Log(log_components_t component, hash_table_t * ht) { struct rbt_node *it; struct rbt_head *tete_rbt; hash_data_t *pdata = NULL; char dispkey[HASHTABLE_DISPLAY_STRLEN]; char dispval[HASHTABLE_DISPLAY_STRLEN]; unsigned int i = 0; int nb_entries = 0; unsigned long rbtval; unsigned long hashval; /* Sanity check */ if(ht == NULL) return; LogFullDebug(COMPONENT_HASHTABLE, "The hash has %d nodes (this number MUST be a prime integer for performance's issues)\n", ht->parameter.index_size); for(i = 0; i < ht->parameter.index_size; i++) nb_entries += ht->stat_dynamic[i].nb_entries; LogFullDebug(COMPONENT_HASHTABLE,"The hash contains %d entries\n", nb_entries); for(i = 0; i < ht->parameter.index_size; i++) { tete_rbt = &((ht->array_rbt)[i]); LogFullDebug(COMPONENT_HASHTABLE,"The node in position %d contains: %d entries \n", i, tete_rbt->rbt_num_node); RBT_LOOP(tete_rbt, it) { pdata = (hash_data_t *) it->rbt_opaq; ht->parameter.key_to_str(&(pdata->buffkey), dispkey); ht->parameter.val_to_str(&(pdata->buffval), dispval); hashval = (*(ht->parameter.hash_func_key)) (&ht->parameter, &(pdata->buffkey)); rbtval = (*(ht->parameter.hash_func_rbt)) (&ht->parameter, &(pdata->buffkey)); LogFullDebug(component, "%s => %s; hashval=%lu rbtval=%lu\n ", dispkey, dispval, hashval, rbtval); RBT_INCREMENT(it); } }
/** * * Key_Locate: Locate a buffer key in the hash table, as a rbt node. * * This function is for internal use only * * @param ht the hashtable to be used. * @param buffkey a pointeur to an object of type hash_buffer_t which describe the key location in memory. * @param hashval hash value associated with the key (in order to avoid computing it a second time) * @param rbt_value rbt value associated with the key (in order to avoid computing it a second time) * @param ppnode if successfull,will point to the pointer to the rbt node to be used * * @return HASHTABLE_SUCCESS if successfull\n. * @return HASHTABLE_NO_SUCH_KEY if key was not found * */ static int Key_Locate(hash_table_t * ht, hash_buffer_t * buffkey, unsigned int hashval, int rbt_value, struct rbt_node **ppnode) { struct rbt_head *tete_rbt; hash_data_t *pdata = NULL; struct rbt_node *pn; int found = 0; /* Sanity check */ if(ht == NULL || buffkey == NULL || ppnode == NULL) return HASHTABLE_ERROR_INVALID_ARGUMENT; /* Find the head of the rbt */ tete_rbt = &(ht->array_rbt[hashval]); /* I get the node with this value that is located on the left (first with this value in the rbtree) */ RBT_FIND_LEFT(tete_rbt, pn, rbt_value); /* Find was successfull ? */ if(pn == NULL) return HASHTABLE_ERROR_NO_SUCH_KEY; /* For each entry with this value, compare the key value */ while((pn != 0) && (RBT_VALUE(pn) == rbt_value)) { pdata = (hash_data_t *) RBT_OPAQ(pn); /* Verify the key value : this function returns 0 if key are indentical */ if(!ht->parameter.compare_key(buffkey, &(pdata->buffkey))) { found = 1; break; /* exit the while loop */ } RBT_INCREMENT(pn); } /* while */ /* We didn't find anything */ if(!found) return HASHTABLE_ERROR_NO_SUCH_KEY; /* Key was found */ *ppnode = pn; return HASHTABLE_SUCCESS; } /* Key_Locate */
static bool nfs_rpc_cbsim_get_v40_client_ids(DBusMessageIter *args, DBusMessage *reply) { uint32_t i; hash_table_t *ht = ht_confirmed_client_id; struct rbt_head *head_rbt; struct hash_data *pdata = NULL; struct rbt_node *pn; nfs_client_id_t *pclientid; uint64_t clientid; DBusMessageIter iter, sub_iter; struct timespec ts; /* create a reply from the message */ now(&ts); dbus_message_iter_init_append(reply, &iter); dbus_append_timestamp(&iter, &ts); dbus_message_iter_open_container(&iter, DBUS_TYPE_ARRAY, DBUS_TYPE_UINT64_AS_STRING, &sub_iter); /* For each bucket of the hashtable */ for (i = 0; i < ht->parameter.index_size; i++) { head_rbt = &(ht->partitions[i].rbt); /* acquire mutex */ PTHREAD_RWLOCK_wrlock(&(ht->partitions[i].lock)); /* go through all entries in the red-black-tree */ RBT_LOOP(head_rbt, pn) { pdata = RBT_OPAQ(pn); pclientid = pdata->val.addr; clientid = pclientid->cid_clientid; dbus_message_iter_append_basic(&sub_iter, DBUS_TYPE_UINT64, &clientid); RBT_INCREMENT(pn); } PTHREAD_RWLOCK_unlock(&(ht->partitions[i].lock)); }
static DBusHandlerResult nfs_rpc_cbsim_get_client_ids(DBusConnection *conn, DBusMessage *msg, void *user_data) { DBusMessage* reply; static uint32_t i, serial = 1; hash_table_t *ht = ht_confirmed_client_id; struct rbt_head *head_rbt; hash_data_t *pdata = NULL; struct rbt_node *pn; nfs_client_id_t *pclientid; uint64_t clientid; DBusMessageIter iter, sub_iter; /* create a reply from the message */ reply = dbus_message_new_method_return(msg); dbus_message_iter_init_append(reply, &iter); dbus_message_iter_open_container(&iter, DBUS_TYPE_ARRAY, DBUS_TYPE_UINT64_AS_STRING, &sub_iter); /* For each bucket of the hashtable */ for(i = 0; i < ht->parameter.index_size; i++) { head_rbt = &(ht->partitions[i].rbt); /* acquire mutex */ pthread_rwlock_wrlock(&(ht->partitions[i].lock)); /* go through all entries in the red-black-tree*/ RBT_LOOP(head_rbt, pn) { pdata = RBT_OPAQ(pn); pclientid = (nfs_client_id_t *)pdata->buffval.pdata; clientid = pclientid->cid_clientid; dbus_message_iter_append_basic(&sub_iter, DBUS_TYPE_UINT64, &clientid); RBT_INCREMENT(pn); } pthread_rwlock_unlock(&(ht->partitions[i].lock)); }
/** * @brief Locate a key within a partition * * This function traverses the red-black tree within a hash table * partition and returns, if one exists, a pointer to a node matching * the supplied key. * * @param[in] ht The hashtable to be used * @param[in] key The key to look up * @param[in] index Index into RBT array * @param[in] rbthash Hash in red-black tree * @param[out] node On success, the found node, NULL otherwise * * @retval HASHTABLE_SUCCESS if successfull * @retval HASHTABLE_NO_SUCH_KEY if key was not found */ static hash_error_t key_locate(struct hash_table *ht, const struct gsh_buffdesc *key, uint32_t index, uint64_t rbthash, struct rbt_node **node) { /* The current partition */ struct hash_partition *partition = &(ht->partitions[index]); /* The root of the red black tree matching this index */ struct rbt_head *root = NULL; /* A pair of buffer descriptors locating key and value for this entry */ struct hash_data *data = NULL; /* The node in the red-black tree currently being traversed */ struct rbt_node *cursor = NULL; /* true if we have located the key */ int found = false; *node = NULL; if (partition->cache) { void **cache_slot = (void **) &(partition->cache[cache_offsetof(ht, rbthash)]); cursor = atomic_fetch_voidptr(cache_slot); LogFullDebug(COMPONENT_HASHTABLE_CACHE, "hash %s index %" PRIu32 " slot %d", (cursor) ? "hit" : "miss", index, cache_offsetof(ht, rbthash)); if (cursor) { data = RBT_OPAQ(cursor); if (ht->parameter. compare_key((struct gsh_buffdesc *)key, &(data->key)) == 0) { goto out; } } } root = &(ht->partitions[index].rbt); /* The lefmost occurrence of the value is the one from which we may start iteration to visit all nodes containing a value. */ RBT_FIND_LEFT(root, cursor, rbthash); if (cursor == NULL) { if (isFullDebug(COMPONENT_HASHTABLE) && isFullDebug(ht->parameter.ht_log_component)) LogFullDebug(ht->parameter.ht_log_component, "Key not found: rbthash = %" PRIu64, rbthash); return HASHTABLE_ERROR_NO_SUCH_KEY; } while ((cursor != NULL) && (RBT_VALUE(cursor) == rbthash)) { data = RBT_OPAQ(cursor); if (ht->parameter. compare_key((struct gsh_buffdesc *)key, &(data->key)) == 0) { if (partition->cache) { void **cache_slot = (void **) &(partition-> cache[cache_offsetof(ht, rbthash)]); atomic_store_voidptr(cache_slot, cursor); } found = true; break; } RBT_INCREMENT(cursor); } if (!found) { if (isFullDebug(COMPONENT_HASHTABLE) && isFullDebug(ht->parameter.ht_log_component)) LogFullDebug(ht->parameter.ht_log_component, "Matching hash found, but no matching key."); return HASHTABLE_ERROR_NO_SUCH_KEY; } out: *node = cursor; return HASHTABLE_SUCCESS; }
/** * * nfs_ip_stats_dump: Dumps the IP Stats for each client to a file per client * * @param ht_ip_stats [IN] hash table to be dumped * @param path_stat [IN] pattern used to build path used for dumping stats * * @return nothing (void function). * */ void nfs_ip_stats_dump(hash_table_t ** ht_ip_stats, unsigned int nb_worker, char *path_stat) { struct rbt_node *it; struct rbt_head *tete_rbt; hash_data_t *pdata = NULL; unsigned int i = 0; unsigned int j = 0; unsigned int k = 0; nfs_ip_stats_t *pnfs_ip_stats[NB_MAX_WORKER_THREAD]; nfs_ip_stats_t ip_stats_aggreg; // enough to hold an IPv4 or IPv6 address as a string char ipaddrbuf[40]; char ifpathdump[MAXPATHLEN]; sockaddr_t * ipaddr; time_t current_time; struct tm current_time_struct; char strdate[1024]; FILE *flushipstat = NULL; /* Do nothing if configuration disables IP_Stats */ if(nfs_param.core_param.dump_stats_per_client == 0) return; /* Compute the current time */ current_time = time(NULL); memcpy(¤t_time_struct, localtime(¤t_time), sizeof(current_time_struct)); snprintf(strdate, 1024, "%u, %.2d/%.2d/%.4d %.2d:%.2d:%.2d ", (unsigned int)current_time, current_time_struct.tm_mday, current_time_struct.tm_mon + 1, 1900 + current_time_struct.tm_year, current_time_struct.tm_hour, current_time_struct.tm_min, current_time_struct.tm_sec); /* All clients are supposed to have call at least one time worker #0 * we loop on every client in the HashTable */ for(i = 0; i < ht_ip_stats[0]->parameter.index_size; i++) { tete_rbt = &((ht_ip_stats[0]->array_rbt)[i]); RBT_LOOP(tete_rbt, it) { pdata = (hash_data_t *) it->rbt_opaq; ipaddr = (sockaddr_t *) pdata->buffkey.pdata; sprint_sockaddr(ipaddr, ipaddrbuf, sizeof(ipaddrbuf)); snprintf(ifpathdump, MAXPATHLEN, "%s/stats_nfs-%s", path_stat, ipaddrbuf); if((flushipstat = fopen(ifpathdump, "a")) == NULL) return; /* Collect stats for each worker and aggregate them */ memset(&ip_stats_aggreg, 0, sizeof(ip_stats_aggreg)); for(j = 0; j < nb_worker; j++) { if(nfs_ip_stats_get(ht_ip_stats[j], ipaddr, &pnfs_ip_stats[j]) != IP_STATS_SUCCESS) { fclose(flushipstat); return; } ip_stats_aggreg.nb_call += (pnfs_ip_stats[j])->nb_call; ip_stats_aggreg.nb_req_nfs2 += (pnfs_ip_stats[j])->nb_req_nfs2; ip_stats_aggreg.nb_req_nfs3 += (pnfs_ip_stats[j])->nb_req_nfs3; ip_stats_aggreg.nb_req_nfs4 += (pnfs_ip_stats[j])->nb_req_nfs4; ip_stats_aggreg.nb_req_mnt1 += (pnfs_ip_stats[j])->nb_req_mnt1; ip_stats_aggreg.nb_req_mnt3 += (pnfs_ip_stats[j])->nb_req_mnt3; for(k = 0; k < MNT_V1_NB_COMMAND; k++) ip_stats_aggreg.req_mnt1[k] += (pnfs_ip_stats[j])->req_mnt1[k]; for(k = 0; k < MNT_V3_NB_COMMAND; k++) ip_stats_aggreg.req_mnt3[k] += (pnfs_ip_stats[j])->req_mnt3[k]; for(k = 0; k < NFS_V2_NB_COMMAND; k++) ip_stats_aggreg.req_nfs2[k] += (pnfs_ip_stats[j])->req_nfs2[k]; for(k = 0; k < NFS_V3_NB_COMMAND; k++) ip_stats_aggreg.req_nfs3[k] += (pnfs_ip_stats[j])->req_nfs3[k]; } /* Write stats to file */ fprintf(flushipstat, "NFS/MOUNT STATISTICS,%s;%u|%u,%u,%u,%u,%u\n", strdate, ip_stats_aggreg.nb_call, ip_stats_aggreg.nb_req_mnt1, ip_stats_aggreg.nb_req_mnt3, ip_stats_aggreg.nb_req_nfs2, ip_stats_aggreg.nb_req_nfs3, ip_stats_aggreg.nb_req_nfs4); fprintf(flushipstat, "MNT V1 REQUEST,%s;%u|", strdate, ip_stats_aggreg.nb_req_mnt1); for(k = 0; k < MNT_V1_NB_COMMAND - 1; k++) fprintf(flushipstat, "%u,", ip_stats_aggreg.req_mnt1[k]); fprintf(flushipstat, "%u\n", ip_stats_aggreg.req_mnt1[MNT_V1_NB_COMMAND - 1]); fprintf(flushipstat, "MNT V3 REQUEST,%s;%u|", strdate, ip_stats_aggreg.nb_req_mnt3); for(k = 0; k < MNT_V3_NB_COMMAND - 1; k++) fprintf(flushipstat, "%u,", ip_stats_aggreg.req_mnt3[k]); fprintf(flushipstat, "%u\n", ip_stats_aggreg.req_mnt3[MNT_V3_NB_COMMAND - 1]); fprintf(flushipstat, "NFS V2 REQUEST,%s;%u|", strdate, ip_stats_aggreg.nb_req_nfs2); for(k = 0; k < NFS_V2_NB_COMMAND - 1; k++) fprintf(flushipstat, "%u,", ip_stats_aggreg.req_nfs2[k]); fprintf(flushipstat, "%u\n", ip_stats_aggreg.req_nfs2[NFS_V2_NB_COMMAND - 1]); fprintf(flushipstat, "NFS V3 REQUEST,%s;%u|", strdate, ip_stats_aggreg.nb_req_nfs3); for(k = 0; k < NFS_V3_NB_COMMAND - 1; k++) fprintf(flushipstat, "%u,", ip_stats_aggreg.req_nfs3[k]); fprintf(flushipstat, "%u\n", ip_stats_aggreg.req_nfs3[NFS_V3_NB_COMMAND - 1]); fprintf(flushipstat, "END, ----- NO MORE STATS FOR THIS PASS ----\n"); fflush(flushipstat); /* Check next client */ RBT_INCREMENT(it); fclose(flushipstat); } }
static void reap_hash_table(hash_table_t * ht_reap) { struct rbt_head * head_rbt; hash_data_t * pdata = NULL; uint32_t i; int v4, rc; struct rbt_node * pn; nfs_client_id_t * pclientid; nfs_client_record_t * precord; /* For each bucket of the requested hashtable */ for(i = 0; i < ht_reap->parameter.index_size; i++) { head_rbt = &ht_reap->partitions[i].rbt; restart: /* acquire mutex */ pthread_rwlock_wrlock(&ht_reap->partitions[i].lock); /* go through all entries in the red-black-tree*/ RBT_LOOP(head_rbt, pn) { pdata = RBT_OPAQ(pn); pclientid = (nfs_client_id_t *)pdata->buffval.pdata; /* * little hack: only want to reap v4 clients * 4.1 initializess this field to '1' */ v4 = (pclientid->cid_create_session_sequence == 0); P(pclientid->cid_mutex); if(!valid_lease(pclientid) && v4) { inc_client_id_ref(pclientid); /* Take a reference to the client record */ precord = pclientid->cid_client_record; inc_client_record_ref(precord); V(pclientid->cid_mutex); pthread_rwlock_unlock(&ht_reap->partitions[i].lock); if(isDebug(COMPONENT_CLIENTID)) { char str[HASHTABLE_DISPLAY_STRLEN]; display_client_id_rec(pclientid, str); LogFullDebug(COMPONENT_CLIENTID, "Expire index %d %s", i, str); } /* Take cr_mutex and expire clientid */ P(precord->cr_mutex); rc = nfs_client_id_expire(pclientid); V(precord->cr_mutex); dec_client_id_ref(pclientid); dec_client_record_ref(precord); if(rc) goto restart; } else { V(pclientid->cid_mutex); } RBT_INCREMENT(pn); } pthread_rwlock_unlock(&ht_reap->partitions[i].lock); }
void *reaper_thread(void *unused) { hash_table_t *ht = ht_client_id; struct rbt_head *head_rbt; hash_data_t *pdata = NULL; int i, v4; struct rbt_node *pn; nfs_client_id_t *clientp; #ifndef _NO_BUDDY_SYSTEM if((i = BuddyInit(&nfs_param.buddy_param_admin)) != BUDDY_SUCCESS) { /* Failed init */ LogFatal(COMPONENT_MAIN, "Memory manager could not be initialized"); } LogInfo(COMPONENT_MAIN, "Memory manager successfully initialized"); #endif SetNameFunction("reaper_thr"); while(1) { /* Initial wait */ /* TODO: should this be configurable? */ /* sleep(nfs_param.core_param.reaper_delay); */ sleep(reaper_delay); LogFullDebug(COMPONENT_MAIN, "NFS reaper : now checking clients"); /* For each bucket of the hashtable */ for(i = 0; i < ht->parameter.index_size; i++) { head_rbt = &(ht->array_rbt[i]); restart: /* acquire mutex */ P_w(&(ht->array_lock[i])); /* go through all entries in the red-black-tree*/ RBT_LOOP(head_rbt, pn) { pdata = RBT_OPAQ(pn); clientp = (nfs_client_id_t *)pdata->buffval.pdata; /* * little hack: only want to reap v4 clients * 4.1 initializess this field to '1' */ v4 = (clientp->create_session_sequence == 0); if (clientp->confirmed != EXPIRED_CLIENT_ID && nfs4_is_lease_expired(clientp) && v4) { V_w(&(ht->array_lock[i])); LogDebug(COMPONENT_MAIN, "NFS reaper: expire client %s", clientp->client_name); nfs_client_id_expire(clientp); goto restart; } if (clientp->confirmed == EXPIRED_CLIENT_ID) { LogDebug(COMPONENT_MAIN, "reaper: client %s already expired", clientp->client_name); } RBT_INCREMENT(pn); } V_w(&(ht->array_lock[i])); } } /* while ( 1 ) */
static int reap_hash_table(hash_table_t *ht_reap) { struct rbt_head *head_rbt; struct hash_data *addr = NULL; uint32_t i; int rc; struct rbt_node *pn; nfs_client_id_t *pclientid; nfs_client_record_t *precord; int count = 0; struct req_op_context req_ctx; struct user_cred creds; /* We need a real context. Make all reaping done * by root,root */ memset(&creds, 0, sizeof(creds)); req_ctx.creds = &creds; /* For each bucket of the requested hashtable */ for (i = 0; i < ht_reap->parameter.index_size; i++) { head_rbt = &ht_reap->partitions[i].rbt; restart: /* acquire mutex */ PTHREAD_RWLOCK_wrlock(&ht_reap->partitions[i].lock); /* go through all entries in the red-black-tree */ RBT_LOOP(head_rbt, pn) { addr = RBT_OPAQ(pn); pclientid = addr->val.addr; count++; pthread_mutex_lock(&pclientid->cid_mutex); if (!valid_lease(pclientid)) { inc_client_id_ref(pclientid); /* Take a reference to the client record */ precord = pclientid->cid_client_record; inc_client_record_ref(precord); pthread_mutex_unlock(&pclientid->cid_mutex); PTHREAD_RWLOCK_unlock(&ht_reap->partitions[i]. lock); if (isDebug(COMPONENT_CLIENTID)) { char str[HASHTABLE_DISPLAY_STRLEN]; display_client_id_rec(pclientid, str); LogFullDebug(COMPONENT_CLIENTID, "Expire index %d %s", i, str); } /* Take cr_mutex and expire clientid */ pthread_mutex_lock(&precord->cr_mutex); /** * @TODO This is incomplete! the context has to be filled in from * somewhere */ memset(&req_ctx, 0, sizeof(req_ctx)); rc = nfs_client_id_expire(pclientid, &req_ctx); pthread_mutex_unlock(&precord->cr_mutex); dec_client_id_ref(pclientid); dec_client_record_ref(precord); if (rc) goto restart; } else { pthread_mutex_unlock(&pclientid->cid_mutex); } RBT_INCREMENT(pn); } PTHREAD_RWLOCK_unlock(&ht_reap->partitions[i].lock); }