/** * mono_conc_hashtable_insert * * @Returns the old value if key is already present or null */ gpointer mono_conc_hashtable_insert (MonoConcurrentHashTable *hash_table, gpointer key, gpointer value) { conc_table *table; key_value_pair *kvs; int hash, i, table_mask; g_assert (key != NULL && key != TOMBSTONE); g_assert (value != NULL); hash = mix_hash (hash_table->hash_func (key)); mono_mutex_lock (hash_table->mutex); if (hash_table->element_count >= hash_table->overflow_count) expand_table (hash_table); table = (conc_table*)hash_table->table; kvs = table->kvs; table_mask = table->table_size - 1; i = hash & table_mask; if (!hash_table->equal_func) { for (;;) { if (!kvs [i].key || kvs [i].key == TOMBSTONE) { kvs [i].value = value; /* The write to values must happen after the write to keys */ mono_memory_barrier (); kvs [i].key = key; ++hash_table->element_count; mono_mutex_unlock (hash_table->mutex); return NULL; } if (key == kvs [i].key) { gpointer value = kvs [i].value; mono_mutex_unlock (hash_table->mutex); return value; } i = (i + 1) & table_mask; } } else { GEqualFunc equal = hash_table->equal_func; for (;;) { if (!kvs [i].key || kvs [i].key == TOMBSTONE) { kvs [i].value = value; /* The write to values must happen after the write to keys */ mono_memory_barrier (); kvs [i].key = key; ++hash_table->element_count; mono_mutex_unlock (hash_table->mutex); return NULL; } if (equal (key, kvs [i].key)) { gpointer value = kvs [i].value; mono_mutex_unlock (hash_table->mutex); return value; } i = (i + 1) & table_mask; } } }
// insert key into hash table void hash_put(hash_table *h, int key) { int index = get_pos(h, key); // disallow insertion of duplicates into the hash table if (h->table[index] != NULL) return; // create space for the new string h->table[index] = malloc(sizeof(int)); // copy string into hash table *h->table[index] = key; // record keeping :) h->size++; // expand the table if it's over half full if (h->size > h->capacity / 2) expand_table(h); }
static bool search_slot(dhash_tbl_pt table,_key_t key,ulong* p_hashcode,slot_pt** pp_slot){ //-------------------------------------------------- if(D_LOAD_FACTOR(table)>DEFAULT_LOAD_FACTOR){ expand_table(table); } //-------------------------------------------------- ulong hashcode=table->hash(key); ulong slot_num=locate_slot(table,hashcode); ulong seg_idx=slot_num>>table->seg_shift; ulong loc=MOD_POW2(slot_num,DEFAULT_SEG_SIZE); slot_pt* p_slot=&table->seg_arr[seg_idx]->slot_arr[loc]; while(*p_slot!=NIL){ if(hashcode==(*p_slot)->hashcode&&table->is_equal(key,(*p_slot)->key)){ break; }else{ p_slot=&(*p_slot)->next; } } *p_hashcode=hashcode; *pp_slot=p_slot; return *p_slot!=NIL; }
uLong oz_knl_handle_assign (void *object, OZ_Procmode procmode, OZ_Handle *handle_r) { Handleent *he; OZ_Handle hi; OZ_Handletbl *handletbl; OZ_Objtype objtype; OZ_Secaccmsk secaccmsk; OZ_Secattr *secattr; OZ_Seckeys *seckeys; uLong sts; #if 000 handletbl = OZ_SYS_PDATA_FROM_KNL (OZ_PROCMODE_KNL) -> handletbl; #else { OZ_Pdata *pdata; pdata = OZ_SYS_PDATA_FROM_KNL (OZ_PROCMODE_KNL); handletbl = pdata -> handletbl; // oz_knl_printk ("oz_knl_handle_assign*: %s pdata %p, handletbl %p\n", oz_knl_process_getname (NULL), pdata, handletbl); } #endif OZ_KNL_CHKOBJTYPE (handletbl, OZ_OBJTYPE_HANDLETBL); /* Make sure object type is ok and get its security attributes */ objtype = OZ_KNL_GETOBJTYPE (object); switch (objtype) { case OZ_OBJTYPE_EVENT: { secattr = oz_knl_event_getsecattr (object); break; } case OZ_OBJTYPE_PROCESS: { secattr = oz_knl_process_getsecattr (object); break; } case OZ_OBJTYPE_SECTION: { secattr = oz_knl_section_getsecattr (object); break; } case OZ_OBJTYPE_THREAD: { secattr = oz_knl_thread_getsecattr (object); break; } case OZ_OBJTYPE_IOCHAN: { secattr = oz_knl_iochan_getsecattr (object); break; } case OZ_OBJTYPE_IMAGE: { secattr = oz_knl_image_getsecattr (object); break; } case OZ_OBJTYPE_LOGNAME: { secattr = oz_knl_logname_getsecattr (object); break; } case OZ_OBJTYPE_USER: { secattr = oz_knl_logname_getsecattr (oz_knl_user_getlognamdir (object)); break; } case OZ_OBJTYPE_JOB: { secattr = oz_knl_logname_getsecattr (oz_knl_job_getlognamdir (object)); break; } case OZ_OBJTYPE_DEVUNIT: { secattr = oz_knl_devunit_getsecattr (object); break; } default: return (OZ_BADHANDOBJTYPE); } seckeys = oz_knl_thread_getseckeys (NULL); secaccmsk = oz_knl_security_getsecaccmsk (seckeys, secattr); oz_knl_secattr_increfc (secattr, -1); oz_knl_seckeys_increfc (seckeys, -1); /* Find an usable entry in the handle table, expand it if full */ LOCKTABLE_EX (handletbl); hi = handletbl -> free_h; // get first free entry if (hi == 0) { sts = expand_table (handletbl); // expand table if (sts != OZ_SUCCESS) { UNLOCKTABLE_EX (handletbl); // failed, unlock table return (sts); // return error status } hi = handletbl -> free_h; // expanded, get first free entry } he = handletbl -> ents + hi; // point to entry we are about to assign handletbl -> free_h = he -> nextfree; // unlink it from free list if (handletbl -> free_h == 0) handletbl -> free_t = 0; CVTLOCKTABLE_EX_SH (handletbl); // convert from exclusive to shared access /* Set up the handle table entry */ he -> object = object; // save object pointer he -> objtype = objtype; // save object type he -> thread = oz_knl_thread_getcur (); // save thread that allocated it he -> procmode = procmode; // save processor mode that allocated it he -> secaccmsk = secaccmsk; // save granted security access bits he -> refcount = 1; // initialize entry's reference count *handle_r = hi + he -> reuse; // return handle including the re-use counter OBJINCREFC (objtype, object, 1); // increment object's reference count UNLOCKTABLE_SH (handletbl); // release shared lock return (OZ_SUCCESS); // return completion status }
/* * hash_search -- look up key in table and perform action * * action is one of HASH_FIND/HASH_ENTER/HASH_REMOVE * * RETURNS: NULL if table is corrupted, a pointer to the element * found/removed/entered if applicable, TRUE otherwise. * foundPtr is TRUE if we found an element in the table * (FALSE if we entered one). */ long * hash_search(HTAB *hashp, char *keyPtr, HASHACTION action, /* * HASH_FIND / HASH_ENTER / HASH_REMOVE * HASH_FIND_SAVE / HASH_REMOVE_SAVED */ bool *foundPtr) { uint32 bucket; long segment_num; long segment_ndx; SEGMENT segp; register ELEMENT *curr; HHDR *hctl; BUCKET_INDEX currIndex; BUCKET_INDEX *prevIndexPtr; char * destAddr; static struct State { ELEMENT *currElem; BUCKET_INDEX currIndex; BUCKET_INDEX *prevIndex; } saveState; Assert((hashp && keyPtr)); Assert((action == HASH_FIND) || (action == HASH_REMOVE) || (action == HASH_ENTER) || (action == HASH_FIND_SAVE) || (action == HASH_REMOVE_SAVED)); hctl = hashp->hctl; # if HASH_STATISTICS hash_accesses++; hashp->hctl->accesses++; # endif if (action == HASH_REMOVE_SAVED) { curr = saveState.currElem; currIndex = saveState.currIndex; prevIndexPtr = saveState.prevIndex; /* * Try to catch subsequent errors */ Assert(saveState.currElem && !(saveState.currElem = 0)); } else { bucket = call_hash(hashp, keyPtr, hctl->keysize); segment_num = bucket >> hctl->sshift; segment_ndx = bucket & ( hctl->ssize - 1 ); segp = GET_SEG(hashp,segment_num); Assert(segp); prevIndexPtr = &segp[segment_ndx]; currIndex = *prevIndexPtr; /* * Follow collision chain */ for (curr = NULL;currIndex != INVALID_INDEX;) { /* coerce bucket index into a pointer */ curr = GET_BUCKET(hashp,currIndex); if (! memcmp((char *)&(curr->key), keyPtr, hctl->keysize)) { break; } prevIndexPtr = &(curr->next); currIndex = *prevIndexPtr; # if HASH_STATISTICS hash_collisions++; hashp->hctl->collisions++; # endif } } /* * if we found an entry or if we weren't trying * to insert, we're done now. */ *foundPtr = (bool) (currIndex != INVALID_INDEX); switch (action) { case HASH_ENTER: if (currIndex != INVALID_INDEX) return(&(curr->key)); break; case HASH_REMOVE: case HASH_REMOVE_SAVED: if (currIndex != INVALID_INDEX) { Assert(hctl->nkeys > 0); hctl->nkeys--; /* add the bucket to the freelist for this table. */ *prevIndexPtr = curr->next; curr->next = hctl->freeBucketIndex; hctl->freeBucketIndex = currIndex; /* better hope the caller is synchronizing access to * this element, because someone else is going to reuse * it the next time something is added to the table */ return (&(curr->key)); } return((long *) TRUE); case HASH_FIND: if (currIndex != INVALID_INDEX) return(&(curr->key)); return((long *)TRUE); case HASH_FIND_SAVE: if (currIndex != INVALID_INDEX) { saveState.currElem = curr; saveState.prevIndex = prevIndexPtr; saveState.currIndex = currIndex; return(&(curr->key)); } return((long *)TRUE); default: /* can't get here */ return (NULL); } /* If we got here, then we didn't find the element and we have to insert it into the hash table */ Assert(currIndex == INVALID_INDEX); /* get the next free bucket */ currIndex = hctl->freeBucketIndex; if (currIndex == INVALID_INDEX) { /* no free elements. allocate another chunk of buckets */ if (! bucket_alloc(hashp)) { return(NULL); } currIndex = hctl->freeBucketIndex; } Assert(currIndex != INVALID_INDEX); curr = GET_BUCKET(hashp,currIndex); hctl->freeBucketIndex = curr->next; /* link into chain */ *prevIndexPtr = currIndex; /* copy key and data */ destAddr = (char *) &(curr->key); memmove(destAddr,keyPtr,hctl->keysize); curr->next = INVALID_INDEX; /* let the caller initialize the data field after * hash_search returns. */ /* memmove(destAddr,keyPtr,hctl->keysize+hctl->datasize);*/ /* * Check if it is time to split the segment */ if (++hctl->nkeys / (hctl->max_bucket+1) > hctl->ffactor) { /* fprintf(stderr,"expanding on '%s'\n",keyPtr); hash_stats("expanded table",hashp); */ if (! expand_table(hashp)) return(NULL); } return (&(curr->key)); }
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr) { HASHHDR *hctl = hashp->hctl; Size keysize; uint32 bucket; long segment_num; long segment_ndx; HASHSEGMENT segp; HASHBUCKET currBucket; HASHBUCKET *prevBucketPtr; HashCompareFunc match; #if HASH_STATISTICS hash_accesses++; hctl->accesses++; #endif /* * If inserting, check if it is time to split a bucket. * * NOTE: failure to expand table is not a fatal error, it just means we * have to run at higher fill factor than we wanted. However, if we're * using the palloc allocator then it will throw error anyway on * out-of-memory, so we must do this before modifying the table. */ if (action == HASH_ENTER || action == HASH_ENTER_NULL) { /* * Can't split if running in partitioned mode, nor if frozen, nor if * table is the subject of any active hash_seq_search scans. Strange * order of these tests is to try to check cheaper conditions first. */ if (!IS_PARTITIONED(hctl) && !hashp->frozen && hctl->nentries / (long) (hctl->max_bucket + 1) >= hctl->ffactor && !has_seq_scans(hashp)) (void) expand_table(hashp); } /* * Do the initial lookup */ bucket = calc_bucket(hctl, hashvalue); segment_num = bucket >> hashp->sshift; segment_ndx = MOD(bucket, hashp->ssize); segp = hashp->dir[segment_num]; if (segp == NULL) hash_corrupted(hashp); prevBucketPtr = &segp[segment_ndx]; currBucket = *prevBucketPtr; /* * Follow collision chain looking for matching key */ match = hashp->match; /* save one fetch in inner loop */ keysize = hashp->keysize; /* ditto */ while (currBucket != NULL) { if (currBucket->hashvalue == hashvalue && match(ELEMENTKEY(currBucket), keyPtr, keysize) == 0) break; prevBucketPtr = &(currBucket->link); currBucket = *prevBucketPtr; #if HASH_STATISTICS hash_collisions++; hctl->collisions++; #endif } if (foundPtr) *foundPtr = (bool) (currBucket != NULL); /* * OK, now what? */ switch (action) { case HASH_FIND: if (currBucket != NULL) return (void *) ELEMENTKEY(currBucket); return NULL; case HASH_REMOVE: if (currBucket != NULL) { /* use volatile pointer to prevent code rearrangement */ volatile HASHHDR *hctlv = hctl; /* if partitioned, must lock to touch nentries and freeList */ if (IS_PARTITIONED(hctlv)) SpinLockAcquire(&hctlv->mutex); Assert(hctlv->nentries > 0); hctlv->nentries--; /* remove record from hash bucket's chain. */ *prevBucketPtr = currBucket->link; /* add the record to the freelist for this table. */ currBucket->link = hctlv->freeList; hctlv->freeList = currBucket; if (IS_PARTITIONED(hctlv)) SpinLockRelease(&hctlv->mutex); /* * better hope the caller is synchronizing access to this * element, because someone else is going to reuse it the next * time something is added to the table */ return (void *) ELEMENTKEY(currBucket); } return NULL; case HASH_ENTER_NULL: /* ENTER_NULL does not work with palloc-based allocator */ Assert(hashp->alloc != DynaHashAlloc); /* FALL THRU */ case HASH_ENTER: /* Return existing element if found, else create one */ if (currBucket != NULL) return (void *) ELEMENTKEY(currBucket); /* disallow inserts if frozen */ if (hashp->frozen) elog(ERROR, "cannot insert into frozen hashtable \"%s\"", hashp->tabname); currBucket = get_hash_entry(hashp); if (currBucket == NULL) { /* out of memory */ if (action == HASH_ENTER_NULL) return NULL; /* report a generic message */ if (hashp->isshared) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of shared memory"))); else ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"))); } /* link into hashbucket chain */ *prevBucketPtr = currBucket; currBucket->link = NULL; /* copy key into record */ currBucket->hashvalue = hashvalue; hashp->keycopy(ELEMENTKEY(currBucket), keyPtr, keysize); /* * Caller is expected to fill the data field on return. DO NOT * insert any code that could possibly throw error here, as doing * so would leave the table entry incomplete and hence corrupt the * caller's data structure. */ return (void *) ELEMENTKEY(currBucket); } elog(ERROR, "unrecognized hash action code: %d", (int) action); return NULL; /* keep compiler quiet */ }
/*---------- * hash_search -- look up key in table and perform action * * action is one of: * HASH_FIND: look up key in table * HASH_ENTER: look up key in table, creating entry if not present * HASH_REMOVE: look up key in table, remove entry if present * HASH_FIND_SAVE: look up key in table, also save in static var * HASH_REMOVE_SAVED: remove entry saved by HASH_FIND_SAVE * * Return value is a pointer to the element found/entered/removed if any, * or NULL if no match was found. (NB: in the case of the REMOVE actions, * the result is a dangling pointer that shouldn't be dereferenced!) * A NULL result for HASH_ENTER implies we ran out of memory. * * If foundPtr isn't NULL, then *foundPtr is set TRUE if we found an * existing entry in the table, FALSE otherwise. This is needed in the * HASH_ENTER case, but is redundant with the return value otherwise. * * The HASH_FIND_SAVE/HASH_REMOVE_SAVED interface is a hack to save one * table lookup in a find/process/remove scenario. Note that no other * addition or removal in the table can safely happen in between. *---------- */ void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr) { HASHHDR *hctl = hashp->hctl; uint32 hashvalue = 0; uint32 bucket; long segment_num; long segment_ndx; HASHSEGMENT segp; HASHBUCKET currBucket; HASHBUCKET *prevBucketPtr; static struct State { HASHBUCKET currBucket; HASHBUCKET *prevBucketPtr; } saveState; #if HASH_STATISTICS hash_accesses++; hctl->accesses++; #endif /* * Do the initial lookup (or recall result of prior lookup) */ if (action == HASH_REMOVE_SAVED) { currBucket = saveState.currBucket; prevBucketPtr = saveState.prevBucketPtr; /* * Try to catch subsequent errors */ Assert(currBucket); saveState.currBucket = NULL; } else { HashCompareFunc match; Size keysize = hctl->keysize; hashvalue = hashp->hash(keyPtr, keysize); bucket = calc_bucket(hctl, hashvalue); segment_num = bucket >> hctl->sshift; segment_ndx = MOD(bucket, hctl->ssize); segp = hashp->dir[segment_num]; if (segp == NULL) hash_corrupted(hashp); prevBucketPtr = &segp[segment_ndx]; currBucket = *prevBucketPtr; /* * Follow collision chain looking for matching key */ match = hashp->match; /* save one fetch in inner loop */ while (currBucket != NULL) { if (currBucket->hashvalue == hashvalue && match(ELEMENTKEY(currBucket), keyPtr, keysize) == 0) break; prevBucketPtr = &(currBucket->link); currBucket = *prevBucketPtr; #if HASH_STATISTICS hash_collisions++; hctl->collisions++; #endif } } if (foundPtr) *foundPtr = (bool) (currBucket != NULL); /* * OK, now what? */ switch (action) { case HASH_FIND: if (currBucket != NULL) return (void *) ELEMENTKEY(currBucket); return NULL; case HASH_FIND_SAVE: if (currBucket != NULL) { saveState.currBucket = currBucket; saveState.prevBucketPtr = prevBucketPtr; return (void *) ELEMENTKEY(currBucket); } return NULL; case HASH_REMOVE: case HASH_REMOVE_SAVED: if (currBucket != NULL) { Assert(hctl->nentries > 0); hctl->nentries--; /* remove record from hash bucket's chain. */ *prevBucketPtr = currBucket->link; /* add the record to the freelist for this table. */ currBucket->link = hctl->freeList; hctl->freeList = currBucket; /* * better hope the caller is synchronizing access to this * element, because someone else is going to reuse it the * next time something is added to the table */ return (void *) ELEMENTKEY(currBucket); } return NULL; case HASH_ENTER: /* Return existing element if found, else create one */ if (currBucket != NULL) return (void *) ELEMENTKEY(currBucket); /* get the next free element */ currBucket = hctl->freeList; if (currBucket == NULL) { /* no free elements. allocate another chunk of buckets */ if (!element_alloc(hashp)) return NULL; /* out of memory */ currBucket = hctl->freeList; Assert(currBucket != NULL); } hctl->freeList = currBucket->link; /* link into hashbucket chain */ *prevBucketPtr = currBucket; currBucket->link = NULL; /* copy key into record */ currBucket->hashvalue = hashvalue; hashp->keycopy(ELEMENTKEY(currBucket), keyPtr, hctl->keysize); /* caller is expected to fill the data field on return */ /* Check if it is time to split the segment */ if (++hctl->nentries / (long) (hctl->max_bucket + 1) > hctl->ffactor) { /* * NOTE: failure to expand table is not a fatal error, it * just means we have to run at higher fill factor than we * wanted. */ expand_table(hashp); } return (void *) ELEMENTKEY(currBucket); } elog(ERROR, "unrecognized hash action code: %d", (int) action); return NULL; /* keep compiler quiet */ }
void test_table_expansion() { automaton a; expand_table(a); printf("Table size: %d\n", a.table_size); }
static base_n addtotable( token_n *tokens, token_n *end_token, action_n *actions, token_n default_token, token_n parent_token ) { base_n start, i; token_n max; action_n default_action; token_n *r; a_table *t; a_table *tstart; action_n actval; token_n tokval; if( compactflag ) { start = used++; expand_table( used ); // Leave room for parent & default default_action = ACTION_NULL; for( r = tokens; r < end_token; ++r ) { tokval = *r; if( tokval == default_token ) { default_action = actions[tokval]; } else if( tokval != parent_token ) { actval = actions[tokval]; if( tokval > 0x0FFF ) { printf( "Error: token index 0x%X for item %d is higher then 0x0FFF !\n", tokval, start ); } if( actval > 0x0FFF ) { printf( "Error: token action 0x%X for item %d is higher then 0x0FFF !\n", actval, start ); } expand_table( used + 1 ); table[used].token = tokval; table[used].action = actval; ++used; } } actval = actions[parent_token]; if( actval > 0x0FFF ) { printf( "Error: parent action 0x%X for item %d is higher then 0x0FFF !\n", actval, start ); } if( default_action > 0x0FFF ) { printf( "Error: default action 0x%X for item %d is higher then 0x0FFF !\n", default_action, start ); } table[start].token = (token_n)actval; table[start].action = default_action; } else { max = *tokens; for( r = tokens + 1; r < end_token; ++r ) { if( *r > max ) { max = *r; } } for( start = 0; ; ++start ) { i = avail; expand_table( start + max + 1 ); while( i < avail ) { table[i].token = TOKEN_IMPOSSIBLE; table[i].action = ACTION_NULL; ++i; } tstart = table + start; if( !IsBase( tstart ) ) { for( r = tokens; r < end_token; ++r ) { if( IsUsed( tstart + *r ) ) { break; } } if( r >= end_token ) { break; } } } SetBase( tstart ); for( r = tokens; r < end_token; ++r ) { tokval = *r; t = tstart + tokval; if( ! bigflag ) { if( tokval >= UCHAR_MAX ) { msg( "too many tokens!\n" ); } } SetToken( t, tokval ); actval = actions[tokval]; if( (actval & ACTION_MASK) != actval ) { printf( "Error: token action 0x%X for token %d is higher then 0x3FFF !\n", actval, tokval ); } SetAction( t, actval ); } i = start + max + 1; if( i > used ) { used = i; } } return( start ); }