/* Append to an entry. Create if not exist. */ int tdb_append(struct tdb_context *tdb, TDB_DATA key, TDB_DATA new_dbuf) { u32 hash; TDB_DATA dbuf; int ret = -1; /* find which hash bucket it is in */ hash = tdb->hash_fn(&key); if (tdb_lock(tdb, BUCKET(hash), F_WRLCK) == -1) return -1; dbuf = tdb_fetch(tdb, key); if (dbuf.dptr == NULL) { dbuf.dptr = (char *)malloc(new_dbuf.dsize); } else { dbuf.dptr = (char *)realloc(dbuf.dptr, dbuf.dsize + new_dbuf.dsize); } if (dbuf.dptr == NULL) { tdb->ecode = TDB_ERR_OOM; goto failed; } memcpy(dbuf.dptr + dbuf.dsize, new_dbuf.dptr, new_dbuf.dsize); dbuf.dsize += new_dbuf.dsize; ret = tdb_store(tdb, key, dbuf, 0); failed: tdb_unlock(tdb, BUCKET(hash), F_WRLCK); SAFE_FREE(dbuf.dptr); return ret; }
void cgc_link( pmeta linkme ) { pmeta walker = cgc_lookaside[0]; if ( linkme == NULL ) { return; } /// Handle the case where this is <= 1016 if ( linkme->length <= 1016 ) { //cgc_printf("Adding into bucket: $d\n", BUCKET( linkme->length) ); linkme->next = cgc_lookaside[ BUCKET( linkme->length ) ]; cgc_lookaside[ BUCKET( linkme->length ) ] = linkme; return; } while ( walker ) { if ( walker->next == NULL ) { walker->next = linkme; linkme->prev = walker; linkme->next = NULL; return; } else if ( linkme->length < walker->next->length ) { linkme->next = walker->next; linkme->prev = walker; walker->next->prev = linkme; walker->next = linkme; return; } else { walker = walker->next; } } return; }
/* find the next entry in the database, returning its key */ TDB_DATA tdb_nextkey(struct tdb_context *tdb, TDB_DATA oldkey) { u32 oldhash; TDB_DATA key = tdb_null; struct list_struct rec; char *k = NULL; /* Is locked key the old key? If so, traverse will be reliable. */ if (tdb->travlocks.off) { if (tdb_lock(tdb,tdb->travlocks.hash,tdb->travlocks.lock_rw)) return tdb_null; if (tdb_rec_read(tdb, tdb->travlocks.off, &rec) == -1 || !(k = tdb_alloc_read(tdb,tdb->travlocks.off+sizeof(rec), rec.key_len)) || memcmp(k, oldkey.dptr, oldkey.dsize) != 0) { /* No, it wasn't: unlock it and start from scratch */ if (tdb_unlock_record(tdb, tdb->travlocks.off) != 0) { SAFE_FREE(k); return tdb_null; } if (tdb_unlock(tdb, tdb->travlocks.hash, tdb->travlocks.lock_rw) != 0) { SAFE_FREE(k); return tdb_null; } tdb->travlocks.off = 0; } SAFE_FREE(k); } if (!tdb->travlocks.off) { /* No previous element: do normal find, and lock record */ tdb->travlocks.off = tdb_find_lock_hash(tdb, oldkey, tdb->hash_fn(&oldkey), tdb->travlocks.lock_rw, &rec); if (!tdb->travlocks.off) return tdb_null; tdb->travlocks.hash = BUCKET(rec.full_hash); if (tdb_lock_record(tdb, tdb->travlocks.off) != 0) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_nextkey: lock_record failed (%s)!\n", strerror(errno))); return tdb_null; } } oldhash = tdb->travlocks.hash; /* Grab next record: locks chain and returned record, unlocks old record */ if (tdb_next_lock(tdb, &tdb->travlocks, &rec) > 0) { key.dsize = rec.key_len; key.dptr = tdb_alloc_read(tdb, tdb->travlocks.off+sizeof(rec), key.dsize); /* Unlock the chain of this new record */ if (tdb_unlock(tdb, tdb->travlocks.hash, tdb->travlocks.lock_rw) != 0) TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_nextkey: WARNING tdb_unlock failed!\n")); } /* Unlock the chain of old record */ if (tdb_unlock(tdb, BUCKET(oldhash), tdb->travlocks.lock_rw) != 0) TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_nextkey: WARNING tdb_unlock failed!\n")); return key; }
/* Check that an in-use record is valid. */ static bool tdb_check_used_record(struct tdb_context *tdb, tdb_off_t off, const struct tdb_record *rec, unsigned char **hashes, int (*check)(TDB_DATA, TDB_DATA, void *), void *private_data) { TDB_DATA key, data; if (!tdb_check_record(tdb, off, rec)) return false; /* key + data + tailer must fit in record */ if (rec->key_len + rec->data_len + sizeof(tdb_off_t) > rec->rec_len) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "Record offset %d too short for contents\n", off)); return false; } key = get_bytes(tdb, off + sizeof(*rec), rec->key_len); if (!key.dptr) return false; if (tdb->hash_fn(&key) != rec->full_hash) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "Record offset %d has incorrect hash\n", off)); goto fail_put_key; } /* Mark this offset as a known value for this hash bucket. */ record_offset(hashes[BUCKET(rec->full_hash)+1], off); /* And similarly if the next pointer is valid. */ if (rec->next) record_offset(hashes[BUCKET(rec->full_hash)+1], rec->next); /* If they supply a check function and this record isn't dead, get data and feed it. */ if (check && rec->magic != TDB_DEAD_MAGIC) { data = get_bytes(tdb, off + sizeof(*rec) + rec->key_len, rec->data_len); if (!data.dptr) goto fail_put_key; if (check(key, data, private_data) == -1) goto fail_put_data; put_bytes(tdb, data); } put_bytes(tdb, key); return true; fail_put_data: put_bytes(tdb, data); fail_put_key: put_bytes(tdb, key); return false; }
/* As tdb_find, but if you succeed, keep the lock */ tdb_off_t tdb_find_lock_hash(struct tdb_context *tdb, TDB_DATA key, u32 hash, int locktype, struct list_struct *rec) { u32 rec_ptr; if (tdb_lock(tdb, BUCKET(hash), locktype) == -1) return 0; if (!(rec_ptr = tdb_find(tdb, key, hash, rec))) tdb_unlock(tdb, BUCKET(hash), locktype); return rec_ptr; }
/* delete an entry in the database given a key */ static int tdb_delete_hash(struct tdb_context *tdb, TDB_DATA key, u32 hash) { tdb_off_t rec_ptr; struct list_struct rec; int ret; if (tdb->max_dead_records != 0) { /* * Allow for some dead records per hash chain, mainly for * tdb's with a very high create/delete rate like locking.tdb. */ if (tdb_lock(tdb, BUCKET(hash), F_WRLCK) == -1) return -1; if (tdb_count_dead(tdb, hash) >= tdb->max_dead_records) { /* * Don't let the per-chain freelist grow too large, * delete all existing dead records */ tdb_purge_dead(tdb, hash); } if (!(rec_ptr = tdb_find(tdb, key, hash, &rec))) { tdb_unlock(tdb, BUCKET(hash), F_WRLCK); return -1; } /* * Just mark the record as dead. */ rec.magic = TDB_DEAD_MAGIC; ret = tdb_rec_write(tdb, rec_ptr, &rec); } else { if (!(rec_ptr = tdb_find_lock_hash(tdb, key, hash, F_WRLCK, &rec))) return -1; ret = tdb_do_delete(tdb, rec_ptr, &rec); } if (ret == 0) { tdb_increment_seqnum(tdb); } if (tdb_unlock(tdb, BUCKET(rec.full_hash), F_WRLCK) != 0) TDB_LOG((tdb, TDB_DEBUG_WARNING, "tdb_delete: WARNING tdb_unlock failed!\n")); return ret; }
_PUBLIC_ int tdb_parse_record(struct tdb_context *tdb, TDB_DATA key, int (*parser)(TDB_DATA key, TDB_DATA data, void *private_data), void *private_data) { tdb_off_t rec_ptr; struct tdb_record rec; int ret; uint32_t hash; /* find which hash bucket it is in */ hash = tdb->hash_fn(&key); if (!(rec_ptr = tdb_find_lock_hash(tdb,key,hash,F_RDLCK,&rec))) { /* record not found */ tdb_trace_1rec_ret(tdb, "tdb_parse_record", key, -1); tdb->ecode = TDB_ERR_NOEXIST; return -1; } tdb_trace_1rec_ret(tdb, "tdb_parse_record", key, 0); ret = tdb_parse_data(tdb, key, rec_ptr + sizeof(rec) + rec.key_len, rec.data_len, parser, private_data); tdb_unlock(tdb, BUCKET(rec.full_hash), F_RDLCK); return ret; }
/* mark a chain as locked without actually locking it. Warning! use with great caution! */ _PUBLIC_ int tdb_chainlock_mark(struct tdb_context *tdb, TDB_DATA key) { int ret = tdb_nest_lock(tdb, lock_offset(BUCKET(tdb->hash_fn(&key))), F_WRLCK, TDB_LOCK_MARK_ONLY); tdb_trace_1rec(tdb, "tdb_chainlock_mark", key); return ret; }
_PUBLIC_ int tdb_chainlock_read(struct tdb_context *tdb, TDB_DATA key) { int ret; ret = tdb_lock(tdb, BUCKET(tdb->hash_fn(&key)), F_RDLCK); tdb_trace_1rec(tdb, "tdb_chainlock_read", key); return ret; }
ACE_Timer_Hash_T<TYPE, FUNCTOR, ACE_LOCK, BUCKET>::ACE_Timer_Hash_T (FUNCTOR *upcall_functor, ACE_Free_List<ACE_Timer_Node_T <TYPE> > *freelist) : ACE_Timer_Queue_T<TYPE, FUNCTOR, ACE_LOCK> (upcall_functor, freelist), size_ (0), table_ (new BUCKET *[ACE_DEFAULT_TIMER_HASH_TABLE_SIZE]), table_size_ (ACE_DEFAULT_TIMER_HASH_TABLE_SIZE), table_functor_ (this), earliest_position_ (0) { ACE_TRACE ("ACE_Timer_Hash_T::ACE_Timer_Hash_T"); this->gettimeofday (ACE_OS::gettimeofday); for (size_t i = 0; i < this->table_size_; i++) { ACE_NEW (this->table_[i], BUCKET (&this->table_functor_, this->free_list_)); this->table_[i]->gettimeofday (ACE_OS::gettimeofday); } ACE_NEW (iterator_, HASH_ITERATOR (*this)); }
static hashnode* hash_lookup(hashtable table,ulong key){ table->last_node = BUCKET(table,mhash(table,key)); /* set a pointer to the first bucket */ while ( table->last_node != NULL ) { if( table->last_node->value==key) return table->last_node; table->last_node = table->last_node->next; } return NULL; }
/* check if an entry in the database exists note that 1 is returned if the key is found and 0 is returned if not found this doesn't match the conventions in the rest of this module, but is compatible with gdbm */ static int tdb_exists_hash(struct tdb_context *tdb, TDB_DATA key, u32 hash) { struct list_struct rec; if (tdb_find_lock_hash(tdb, key, hash, F_RDLCK, &rec) == 0) return 0; tdb_unlock(tdb, BUCKET(rec.full_hash), F_RDLCK); return 1; }
void *kmalloc_tag(int size, unsigned long tag) { struct bucket *b; int bucket; void *addr; // Handle large allocation by allocating pages if (size > PAGESIZE / 2) { // Allocate pages addr = alloc_pages(PAGES(size), tag ? tag : 'ALOC'); // Set size in pfn entry pfdb[BTOP(virt2phys(addr))].size = PAGES(size) + PAGESHIFT; return addr; } // Otherwise allocate from one of the buckets bucket = BUCKET(size); b = &buckets[bucket]; // If bucket is empty the allocate one more page for the bucket if (b->mem == 0) { char *p; int i; // Allocate new page addr = alloc_pages(1, 'HEAP'); // Set bucket number in pfn entry pfdb[BTOP(virt2phys(addr))].size = bucket; // Split page into chunks p = (char *) addr; for (i = 0; i < PAGESIZE; i += b->size) { *(void **)(p + i) = b->mem; b->mem = p + i; } // Update count of pages used for this bucket b->pages++; } // Allocate chunk from bucket addr = b->mem; b->mem = *(void **) addr; // Return allocated chunk return addr; }
/* insert a non-pending timer into the scheduler */ void __ci_ip_timer_set(ci_netif *netif, ci_ip_timer *ts, ci_iptime_t t) { ci_ni_dllist_t* bucket; int w; ci_iptime_t stime = IPTIMER_STATE(netif)->sched_ticks; ci_assert(TIME_GT(t, stime)); /* this is absolute time */ ts->time = t; if( TIME_LT(t, IPTIMER_STATE(netif)->closest_timer) ) IPTIMER_STATE(netif)->closest_timer = t; /* Previous error in this code was to choose wheel based on time delta * before timer fires (ts->time - stime). This is bogus as the timer wheels * work like a clock and we need to find wheel based on the absolute time */ /* insert in wheel 0 if the top 3 wheels have the same time */ if ((stime & WHEEL0_MASK) == (t & WHEEL0_MASK)) w = 0; /* else, insert in wheel 1 if the top 2 wheels have the same time */ else if ((stime & WHEEL1_MASK) == (t & WHEEL1_MASK)) w = 1; /* else, insert in wheel 2 if the top wheel has the same time */ else if ((stime & WHEEL2_MASK) == (t & WHEEL2_MASK)) w = 2; else w = 3; bucket = BUCKET(netif, w, t); LOG_ITV(log("%s: delta=0x%x (t=0x%x-s=0x%x), w=0x%x, b=0x%x", __FUNCTION__, ts->time-stime, ts->time, stime, w, BUCKETNO(w, ts->time))); /* append onto the correct bucket ** ** NB this might not be stable because a later insert with a ** smaller relative time will be before an earlier insert with a ** larger relative time. Oh well doesn't really matter */ ci_ni_dllist_push_tail(netif, bucket, &ts->link); ci_assert(ci_ip_timer_is_link_valid(netif, ts)); DETAILED_CHECK_TIMERS(netif); }
/* If an entry doesn't exist tdb_err will be set to * TDB_ERR_NOEXIST. If a key has no data attached * then the TDB_DATA will have zero length but * a non-zero pointer */ TDB_DATA tdb_fetch(struct tdb_context *tdb, TDB_DATA key) { tdb_off_t rec_ptr; struct list_struct rec; TDB_DATA ret; u32 hash; /* find which hash bucket it is in */ hash = tdb->hash_fn(&key); if (!(rec_ptr = tdb_find_lock_hash(tdb,key,hash,F_RDLCK,&rec))) return tdb_null; ret.dptr = tdb_alloc_read(tdb, rec_ptr + sizeof(rec) + rec.key_len, rec.data_len); ret.dsize = rec.data_len; tdb_unlock(tdb, BUCKET(rec.full_hash), F_RDLCK); return ret; }
void *malloc( size_t length ) { int bucket = 0; pmeta outb = NULL; // The minimum size for a valid request is 8 bytes if ( length < 8 ) { length = 8; } // Round up to nearest 8 length = (length+7) & 0xfffffff8; bucket = BUCKET(length); if ( bucket == 0 ) { outb = freelist_alloc(length); return outb; } else { while ( bucket < 128 ) { if ( lookaside[ bucket] != NULL ) { break; } bucket++; } } if ( bucket == 128 ) { //printf("No available buckets freelist alloc\n"); return freelist_alloc( length ); } else { //printf("Found bucket: $d\n", bucket); outb = lookaside[ bucket ]; lookaside[bucket] = outb->next; return ( (char*)outb ) + 4; } return NULL; }
static int nextBTreeItems(SetIteration *i) { if (i->position >= 0) { if (i->position) { DECREF_KEY(i->key); DECREF_VALUE(i->value); } if (BTreeItems_seek(ITEMS(i->set), i->position) >= 0) { Bucket *currentbucket; currentbucket = BUCKET(ITEMS(i->set)->currentbucket); UNLESS(PER_USE(currentbucket)) { /* Mark iteration terminated, so that finiSetIteration doesn't * try to redundantly decref the key and value */ i->position = -1; return -1; } COPY_KEY(i->key, currentbucket->keys[ITEMS(i->set)->currentoffset]); INCREF_KEY(i->key); COPY_VALUE(i->value, currentbucket->values[ITEMS(i->set)->currentoffset]); INCREF_VALUE(i->value); i->position ++; PER_UNUSE(currentbucket); } else {
int tdb_parse_record(struct tdb_context *tdb, TDB_DATA key, int (*parser)(TDB_DATA key, TDB_DATA data, void *private_data), void *private_data) { tdb_off_t rec_ptr; struct list_struct rec; int ret; u32 hash; /* find which hash bucket it is in */ hash = tdb->hash_fn(&key); if (!(rec_ptr = tdb_find_lock_hash(tdb,key,hash,F_RDLCK,&rec))) { return TDB_ERRCODE(TDB_ERR_NOEXIST, 0); } ret = tdb_parse_data(tdb, key, rec_ptr + sizeof(rec) + rec.key_len, rec.data_len, parser, private_data); tdb_unlock(tdb, BUCKET(rec.full_hash), F_RDLCK); return ret; }
int tdb_chainunlock_read(struct tdb_context *tdb, TDB_DATA key) { tdb_trace_1rec(tdb, "tdb_chainunlock_read", key); return tdb_unlock(tdb, BUCKET(tdb->hash_fn(&key)), F_RDLCK); }
/* store an element in the database, replacing any existing element with the same key return 0 on success, -1 on failure */ int tdb_store(struct tdb_context *tdb, TDB_DATA key, TDB_DATA dbuf, int flag) { struct list_struct rec; u32 hash; tdb_off_t rec_ptr; char *p = NULL; int ret = -1; if (tdb->read_only || tdb->traverse_read) { tdb->ecode = TDB_ERR_RDONLY; return -1; } /* find which hash bucket it is in */ hash = tdb->hash_fn(&key); if (tdb_lock(tdb, BUCKET(hash), F_WRLCK) == -1) return -1; /* check for it existing, on insert. */ if (flag == TDB_INSERT) { if (tdb_exists_hash(tdb, key, hash)) { tdb->ecode = TDB_ERR_EXISTS; goto fail; } } else { /* first try in-place update, on modify or replace. */ if (tdb_update_hash(tdb, key, hash, dbuf) == 0) { goto done; } if (tdb->ecode == TDB_ERR_NOEXIST && flag == TDB_MODIFY) { /* if the record doesn't exist and we are in TDB_MODIFY mode then we should fail the store */ goto fail; } } /* reset the error code potentially set by the tdb_update() */ tdb->ecode = TDB_SUCCESS; /* delete any existing record - if it doesn't exist we don't care. Doing this first reduces fragmentation, and avoids coalescing with `allocated' block before it's updated. */ if (flag != TDB_INSERT) tdb_delete_hash(tdb, key, hash); /* Copy key+value *before* allocating free space in case malloc fails and we are left with a dead spot in the tdb. */ if (!(p = (char *)malloc(key.dsize + dbuf.dsize))) { tdb->ecode = TDB_ERR_OOM; goto fail; } memcpy(p, key.dptr, key.dsize); if (dbuf.dsize) memcpy(p+key.dsize, dbuf.dptr, dbuf.dsize); if (tdb->max_dead_records != 0) { /* * Allow for some dead records per hash chain, look if we can * find one that can hold the new record. We need enough space * for key, data and tailer. If we find one, we don't have to * consult the central freelist. */ rec_ptr = tdb_find_dead( tdb, hash, &rec, key.dsize + dbuf.dsize + sizeof(tdb_off_t)); if (rec_ptr != 0) { rec.key_len = key.dsize; rec.data_len = dbuf.dsize; rec.full_hash = hash; rec.magic = TDB_MAGIC; if (tdb_rec_write(tdb, rec_ptr, &rec) == -1 || tdb->methods->tdb_write( tdb, rec_ptr + sizeof(rec), p, key.dsize + dbuf.dsize) == -1) { goto fail; } goto done; } } /* * We have to allocate some space from the freelist, so this means we * have to lock it. Use the chance to purge all the DEAD records from * the hash chain under the freelist lock. */ if (tdb_lock(tdb, -1, F_WRLCK) == -1) { goto fail; } if ((tdb->max_dead_records != 0) && (tdb_purge_dead(tdb, hash) == -1)) { tdb_unlock(tdb, -1, F_WRLCK); goto fail; } /* we have to allocate some space */ rec_ptr = tdb_allocate(tdb, key.dsize + dbuf.dsize, &rec); tdb_unlock(tdb, -1, F_WRLCK); if (rec_ptr == 0) { goto fail; } /* Read hash top into next ptr */ if (tdb_ofs_read(tdb, TDB_HASH_TOP(hash), &rec.next) == -1) goto fail; rec.key_len = key.dsize; rec.data_len = dbuf.dsize; rec.full_hash = hash; rec.magic = TDB_MAGIC; /* write out and point the top of the hash chain at it */ if (tdb_rec_write(tdb, rec_ptr, &rec) == -1 || tdb->methods->tdb_write(tdb, rec_ptr+sizeof(rec), p, key.dsize+dbuf.dsize)==-1 || tdb_ofs_write(tdb, TDB_HASH_TOP(hash), &rec_ptr) == -1) { /* Need to tdb_unallocate() here */ goto fail; } done: ret = 0; fail: if (ret == 0) { tdb_increment_seqnum(tdb); } SAFE_FREE(p); tdb_unlock(tdb, BUCKET(hash), F_WRLCK); return ret; }
_PUBLIC_ int tdb_chainunlock(struct tdb_context *tdb, TDB_DATA key) { tdb_trace_1rec(tdb, "tdb_chainunlock", key); return tdb_unlock(tdb, BUCKET(tdb->hash_fn(&key)), F_WRLCK); }
/* unmark a chain as locked without actually locking it. Warning! use with great caution! */ _PUBLIC_ int tdb_chainlock_unmark(struct tdb_context *tdb, TDB_DATA key) { tdb_trace_1rec(tdb, "tdb_chainlock_unmark", key); return tdb_nest_unlock(tdb, lock_offset(BUCKET(tdb->hash_fn(&key))), F_WRLCK, true); }
/* lock/unlock one hash chain, non-blocking. This is meant to be used to reduce contention - it cannot guarantee how many records will be locked */ _PUBLIC_ int tdb_chainlock_nonblock(struct tdb_context *tdb, TDB_DATA key) { int ret = tdb_lock_nonblock(tdb, BUCKET(tdb->hash_fn(&key)), F_WRLCK); tdb_trace_1rec_ret(tdb, "tdb_chainlock_nonblock", key, ret); return ret; }
int main(int argc, char *argv[]) { struct tdb_context *tdb; TDB_DATA key, orig_data, data; uint32_t hashval; tdb_off_t rec_ptr; struct tdb_record rec; int ret; plan_tests(24); tdb = tdb_open_ex("run-36-file.tdb", 1024, TDB_CLEAR_IF_FIRST, O_CREAT|O_TRUNC|O_RDWR, 0600, &taplogctx, NULL); ok1(tdb); tdb->methods = &large_io_methods; key.dsize = strlen("hi"); key.dptr = (void *)"hi"; orig_data.dsize = strlen("world"); orig_data.dptr = (void *)"world"; /* Enlarge the file (internally multiplies by 2). */ ret = tdb_expand(tdb, 1500000000); #ifdef HAVE_INCOHERENT_MMAP /* This can fail due to mmap failure on 32 bit systems. */ if (ret == -1) { /* These should now fail. */ ok1(tdb_store(tdb, key, orig_data, TDB_INSERT) == -1); data = tdb_fetch(tdb, key); ok1(data.dptr == NULL); ok1(tdb_traverse(tdb, test_traverse, &orig_data) == -1); ok1(tdb_delete(tdb, key) == -1); ok1(tdb_traverse(tdb, test_traverse, NULL) == -1); /* Skip the rest... */ for (ret = 0; ret < 24 - 6; ret++) ok1(1); tdb_close(tdb); return exit_status(); } #endif ok1(ret == 0); /* Put an entry in, and check it. */ ok1(tdb_store(tdb, key, orig_data, TDB_INSERT) == 0); data = tdb_fetch(tdb, key); ok1(data.dsize == strlen("world")); ok1(memcmp(data.dptr, "world", strlen("world")) == 0); free(data.dptr); /* That currently fills at the end, make sure that's true. */ hashval = tdb->hash_fn(&key); rec_ptr = tdb_find_lock_hash(tdb, key, hashval, F_RDLCK, &rec); ok1(rec_ptr); ok1(rec_ptr > 2U*1024*1024*1024); tdb_unlock(tdb, BUCKET(rec.full_hash), F_RDLCK); /* Traverse must work. */ ok1(tdb_traverse(tdb, test_traverse, &orig_data) == 1); /* Delete should work. */ ok1(tdb_delete(tdb, key) == 0); ok1(tdb_traverse(tdb, test_traverse, NULL) == 0); /* Transactions should work. */ ok1(tdb_transaction_start(tdb) == 0); ok1(tdb_store(tdb, key, orig_data, TDB_INSERT) == 0); data = tdb_fetch(tdb, key); ok1(data.dsize == strlen("world")); ok1(memcmp(data.dptr, "world", strlen("world")) == 0); free(data.dptr); ok1(tdb_transaction_commit(tdb) == 0); ok1(tdb_traverse(tdb, test_traverse, &orig_data) == 1); tdb_close(tdb); return exit_status(); }
int tdb_chainunlock_read(struct tdb_context *tdb, TDB_DATA key) { return tdb_unlock(tdb, BUCKET(tdb->hash_fn(&key)), F_RDLCK); }
/* unmark a chain as locked without actually locking it. Warning! use with great caution! */ int tdb_chainlock_unmark(struct tdb_context *tdb, TDB_DATA key) { return tdb_unlock(tdb, BUCKET(tdb->hash_fn(&key)), F_WRLCK | TDB_MARK_LOCK); }
/* lock/unlock one hash chain, non-blocking. This is meant to be used to reduce contention - it cannot guarantee how many records will be locked */ int tdb_chainlock_nonblock(struct tdb_context *tdb, TDB_DATA key) { return tdb_lock_nonblock(tdb, BUCKET(tdb->hash_fn(&key)), F_WRLCK); }
/* mark a chain as locked without actually locking it. Warning! use with great caution! */ int tdb_chainlock_mark(struct tdb_context *tdb, TDB_DATA key) { int ret = tdb_lock(tdb, BUCKET(tdb->hash_fn(&key)), F_WRLCK | TDB_MARK_LOCK); tdb_trace_1rec(tdb, "tdb_chainlock_mark", key); return ret; }
/* take the bucket corresponding to time t in the given wheel and ** reinsert them back into the wheel (i.e. into wheelno -1) */ static int ci_ip_timer_cascadewheel(ci_netif* netif, int wheelno, ci_iptime_t stime) { ci_ip_timer* ts; ci_ni_dllist_t* bucket; oo_p curid, buckid; int changed = 0; ci_assert(wheelno > 0 && wheelno < CI_IPTIME_WHEELS); /* check time is on the boundary expected by the wheel number passed in */ ci_assert( (stime & ((unsigned)(-1) << (CI_IPTIME_BUCKETBITS*wheelno))) == stime ); /* bucket to empty */ bucket = BUCKET(netif, wheelno, stime); buckid = ci_ni_dllist_link_addr(netif, &bucket->l); curid = bucket->l.next; LOG_ITV(log(LN_FMT "cascading wheel=%u sched_ticks=0x%x bucket=%i", LN_PRI_ARGS(netif), wheelno, stime, BUCKETNO(wheelno, stime))); /* ditch the timers in this dll, pointers held in curid and buckid */ ci_ni_dllist_init(netif, bucket, ci_ni_dllist_link_addr(netif, &bucket->l), "timw"); while( ! OO_P_EQ(curid, buckid) ) { ts = ADDR2TIMER(netif, curid); /* get next in linked list */ curid = ts->link.next; #ifndef NDEBUG { /* if inserting in wheel 0 - top 3 wheels must have the same time */ if (wheelno == 1) ci_assert( (stime & WHEEL0_MASK) == (ts->time & WHEEL0_MASK) ); /* else, if inserting in wheel 1 - top 2 wheels must have the same time */ else if (wheelno == 2) ci_assert( (stime & WHEEL1_MASK) == (ts->time & WHEEL1_MASK) ); /* else, if inserting in wheel 2 - the top wheel must have the same time */ else { ci_assert(wheelno == 3); ci_assert( (stime & WHEEL2_MASK) == (ts->time & WHEEL2_MASK) ); } } #endif /* insert ts into wheel below */ bucket = BUCKET(netif, wheelno-1, ts->time); changed = 1; /* append onto the correct bucket ** ** NB this might not be stable because a later insert with a ** smaller relative time will be before an earlier insert with a ** larger relative time. Oh well doesn't really matter */ ci_ni_dllist_push_tail(netif, bucket, &ts->link); ci_assert(ci_ip_timer_is_link_valid(netif, ts)); } return changed; }
table->last_node = table->last_node->next; while ( table->last_node != NULL ) { if( table->last_node->value==key) return table->last_node->obj; table->last_node = table->last_node->next; } return NULL; } /* removes the element with key 'key' and returns the object stored on it */ __ptr_t delete(hashtable table,ulong key) { __ptr_t obj; hashnode *b,*prev=NULL; int c=mhash(table,key); b=BUCKET(table,c); /* set a pointer to the first bucket */ while( b!=NULL) { if( b->value==key){ obj=b->obj; if(prev==NULL) /* first element */ BUCKET(table,c)=b->next; else prev->next=b->next; free(b); table->n_entries--; return obj; } prev = b; b = b->next; }; return NULL;