Example #1
0
LIST * list_append( LIST * l, LIST * nl )
{
    if ( list_empty( l ) )
        return nl;
    if ( !list_empty( nl ) )
    {
        int const l_size = list_length( l );
        int const nl_size = list_length( nl );
        int const size = l_size + nl_size;
        unsigned const bucket = get_bucket( size );

        /* Do we need to reallocate? */
        if ( l_size <= ( 1u << ( bucket - 1 ) ) )
        {
            LIST * result = list_alloc( size );
            memcpy( list_begin( result ), list_begin( l ), l_size * sizeof(
                OBJECT * ) );
            list_dealloc( l );
            l = result;
        }

        l->impl.size = size;
        memcpy( list_begin( l ) + l_size, list_begin( nl ), nl_size * sizeof(
            OBJECT * ) );
        list_dealloc( nl );
    }
    return l;
}
Example #2
0
      void fdb_manager::close_buckets(const vector<int> &buckets)
      {
        tbsys::CThreadGuard guard(&stat_lock);
        fdb_buckets_map *temp_buckets_map = new fdb_buckets_map(*buckets_map);
        vector<fdb_bucket * >rm_buckets;

        for(int i = 0; i < (int) buckets.size(); i++) {
          int bucket_number = buckets[i];
          fdb_bucket *bucket = get_bucket(bucket_number);
          if(bucket == NULL) {
            // not exist
            continue;
          }

          // init new FdbBucket
          temp_buckets_map->erase(bucket_number);
          rm_buckets.push_back(bucket);
        }

        fdb_buckets_map *old_buckets_map = buckets_map;
        buckets_map = temp_buckets_map;

        sleep(1);
        for(size_t i = 0; i < rm_buckets.size(); ++i) {
          rm_buckets[i]->backup();
          delete rm_buckets[i];
        }

        delete old_buckets_map;
      }
hash_entry<T, Q> *hashmap<T, Q>::get_qprobe(Q id, unsigned long hash, unsigned int exponent)
{
	unsigned int loc;
	unsigned char sign = 0; //0 = +, 1 = -
	loc = (hash + exponent) % max_entry;
	hash_entry<T, Q> *ret = (hash_entry<T, Q> *)NULL;
gqloop_top:
	if ((loc > hash && sign == 1) || (loc < hash && sign == 0)) //-/+ and full rollover
		return (hash_entry<T, Q> *)NULL;
	if (table[loc] != NULL)
	{
		if (table[loc]->id == id)
			return table[loc];
		else if ((ret = get_bucket(id, loc)) != NULL)
			return ret;
	}
	if (sign == 0)
	{
		if (exponent > hash)
			loc = ((hash - exponent) * -1) % max_entry;
		else
			loc = (hash - exponent);
		sign = 1;
		goto gqloop_top;
	}
	return get_qprobe(id, hash, exponent == 0 ? 1 : exponent * 2);
}
Example #4
0
void fd_bo_del(struct fd_bo *bo)
{
	struct fd_device *dev = bo->dev;

	if (!atomic_dec_and_test(&bo->refcnt))
		return;

	pthread_mutex_lock(&table_lock);

	if (bo->bo_reuse) {
		struct fd_bo_bucket *bucket = get_bucket(dev, bo->size);

		/* see if we can be green and recycle: */
		if (bucket) {
			struct timespec time;

			clock_gettime(CLOCK_MONOTONIC, &time);

			bo->free_time = time.tv_sec;
			list_addtail(&bo->list, &bucket->list);
			fd_cleanup_bo_cache(dev, time.tv_sec);

			/* bo's in the bucket cache don't have a ref and
			 * don't hold a ref to the dev:
			 */

			goto out;
		}
	}

	bo_del(bo);
out:
	fd_device_del_locked(dev);
	pthread_mutex_unlock(&table_lock);
}
Example #5
0
struct fd_bo * fd_bo_new(struct fd_device *dev,
		uint32_t size, uint32_t flags)
{
	struct fd_bo *bo = NULL;
	struct fd_bo_bucket *bucket;
	uint32_t handle;
	int ret;

	size = ALIGN(size, 4096);
	bucket = get_bucket(dev, size);

	/* see if we can be green and recycle: */
	if (bucket) {
		size = bucket->size;
		bo = find_in_bucket(dev, bucket, flags);
		if (bo) {
			atomic_set(&bo->refcnt, 1);
			fd_device_ref(bo->dev);
			return bo;
		}
	}

	ret = dev->funcs->bo_new_handle(dev, size, flags, &handle);
	if (ret)
		return NULL;

	pthread_mutex_lock(&table_lock);
	bo = bo_from_handle(dev, size, handle);
	bo->bo_reuse = 1;
	pthread_mutex_unlock(&table_lock);

	return bo;
}
Example #6
0
void _tr_blacklist::remove(const sockaddr_storage* addr)
{
  blacklist_bucket* bucket = get_bucket(hashlittle(addr, SA_len(addr), 0)
					& BLACKLIST_HT_MASK);
  bucket->lock();
  bucket->remove(*(const bl_addr*)addr);
  bucket->unlock();
}
Example #7
0
static inline void
burst_insert(TSTNode<CharT>* root, unsigned char** strings, size_t N)
{
	for (size_t i=0; i < N; ++i) {
		unsigned char* str = strings[i];
		size_t depth = 0;
		CharT c = get_char<CharT>(str, depth);
		TSTNode<CharT>* node = root;
		unsigned bucket = get_bucket(c, node->pivot);
		while (node->is_tst[bucket]) {
			if (is_middle_bucket(bucket)) {
				depth += sizeof(CharT);
				c = get_char<CharT>(str, depth);
			}
			node = static_cast<TSTNode<CharT>*>(
					node->buckets[bucket]);
			bucket = get_bucket(c, node->pivot);
		}
		BucketT* buck = static_cast<BucketT*>(node->buckets[bucket]);
		if (not buck)
			node->buckets[bucket] = buck = new BucketT;
		buck->push_back(str);
		if (is_middle_bucket(bucket) && is_end(node->pivot)) {
			continue;
		}
		if (buck->size() > sizeof(CharT)*Threshold
				and buck->size() == buck->capacity()) {
			if (is_middle_bucket(bucket)) {
				depth += sizeof(CharT);
			}
			CharT* oracle = static_cast<CharT*>(
				malloc(buck->size()*sizeof(CharT)));
			for (unsigned j=0; j < buck->size(); ++j) {
				oracle[j] = get_char<CharT>((*buck)[j], depth);
			}
			TSTNode<CharT>* new_node
				= BurstImpl()(*buck, oracle, depth);
			free(oracle);
			delete buck;
			node->buckets[bucket] = new_node;
			node->is_tst[bucket] = true;
		}
	}
}
Example #8
0
int main()
{
	int i;
	user_dir_t * u;
	bucket_t * b;
	object_t * o;
	init_name_space();
	put_user(U1);
	put_user(U2);
	put_user(U3);
	put_user(U4);
	put_user(U5);
	put_user(U6);
	put_user(U7);
	put_user(U8);
	put_user(U9);
	put_user(U10);
	put_user(U11);
	put_user(U12);
	prt_ulist();
	prt_uhash();
	printf("----------------------\n");
	put_bucket(B1,U5);
	put_bucket(B2,U5);
	put_bucket(B3,U5);
	put_bucket(B4,U5);
	put_bucket(B5,U5);
	prt_bhash();
	printf("----------------------\n");
	if(put_object(O1,B3,U5) == 0){
		printf("put o1 ok\n");
	}
	if(put_object(O2,B3,U5) == 0){
		printf("put o2 ok\n");
	}
	if(put_object(O3,B3,U5) == 0){
		printf("put o3 ok\n");
	}
	if(put_object(O4,B3,U5) == 0){
		printf("put o4 ok\n");
	}
	if(put_object(O5,B3,U5) == 0){
		printf("put o5 ok\n");
	}
	if(put_object(O6,B3,U5) == 0){
		printf("put o6 ok\n");
	}
	if(put_object(O7,B3,U5) == 0){
		printf("put o7 ok\n");
	}
	prt_ohash();
	get_bucket(B3,U5,LIST_OBJECT_FILE);
	get_user(U5,LIST_BUCKET_FILE,GU_LIST_BUCKETS);
	get_user(U5,ALL_BUCKETS_OBJECTS_FILE,GU_LIST_ALL_BUCKETS_OBJECTS);
	return 0;
}
Example #9
0
bool _tr_blacklist::exist(const sockaddr_storage* addr)
{
  bool res;
  blacklist_bucket* bucket = get_bucket(hashlittle(addr, SA_len(addr), 0)
					& BLACKLIST_HT_MASK);
  bucket->lock();
  res = bucket->exist(*(const bl_addr*)addr);
  bucket->unlock();

  return res;
}
Example #10
0
 std::size_t hash_buckets<A, G>::bucket_size(std::size_t index) const
 {
     if(!buckets_) return 0;
     bucket_ptr ptr = get_bucket(index)->next_;
     std::size_t count = 0;
     while(ptr) {
         ++count;
         ptr = ptr->next_;
     }
     return count;
 }
Example #11
0
static LIST * list_alloc( unsigned const size )
{
    unsigned const bucket = get_bucket( size );
    if ( freelist[ bucket ] )
    {
        LIST * result = freelist[ bucket ];
        freelist[ bucket ] = result->impl.next;
        return result;
    }
    return (LIST *)BJAM_MALLOC( sizeof( LIST ) + ( 1u << bucket ) *
        sizeof( OBJECT * ) );
}
Example #12
0
		Tile * find_tile(Key const & key) {
			if (cached_ && cached_->key == key) { return &cached_->value; }

			Bucket * bucket = get_bucket(key);
			if (!bucket) { return 0; }

			Node * node = Node::lower_bound(bucket->start, key);
			node = node ? node->next : bucket->start;
			if (node && node->key == key) { return cache(node); }

			return 0;
		}
Example #13
0
void* htable_store(htable_t* ht, void* node)
{
	htable_node_t* tnode = (htable_node_t*)node;
	dlist_t* bucket = get_bucket(ht, tnode);
	dlist_node_t* old_node = dlist_find(bucket, node, ht->do_compare);
	if (old_node) {
	    dlist_remove(bucket, old_node);
	}
	tnode->bucket = bucket;
    dlist_add(bucket, node);

	return old_node;
}
Example #14
0
void json_error_log::log(const std::string &json, const std::string &errstr,
			 uint64_t ts_ns, const std::string &uri)
{
	time_t now = ts_ns / ONE_SECOND_IN_NS;

	if(m_json_parse_errors_file != "")
	{
		std::ofstream errs(m_json_parse_errors_file, std::ofstream::out | std::ofstream::app);
		char buf[sizeof("YYYY-MM-DDTHH:MM:SSZ")];
		strftime(buf, sizeof(buf), "%FT%TZ", gmtime(&now));

		errs << "*******************************" << std::endl;
		errs << "URI: " << uri << std::endl;
		errs << "Time (UTC): " << buf << std::endl;
		errs << "Error: " << errstr << std::endl;
		errs << "Json: " << json << std::endl;
		errs << "*******************************" << std::endl;

		errs.close();
	}

	token_bucket &bucket = get_bucket(uri);

	if(bucket.claim(1, ts_ns))
	{
		sinsp_user_event evt;
		sinsp_user_event::tag_map_t tags;
		tags["source"] = "json_parser";
		tags["uri"] = uri;
		tags["json_prefix"] = json.substr(0, 100);
		std::string event_name = "json_parse_error";
		std::string desc = errstr;

		event_scope scope;
		if(m_machine_id.length())
		{
			scope.add("host.mac", m_machine_id);
		}

		// Also emit a custom event noting the json parse failure.
		std::string evtstr = sinsp_user_event::to_string(now,
								 std::move(event_name),
								 std::move(desc),
								 std::move(scope),
								 std::move(tags));

		g_logger.log("Logging user event: " + evtstr, sinsp_logger::SEV_DEBUG);

		user_event_logger::log(evtstr, user_event_logger::SEV_EVT_WARNING);
	}
}
Example #15
0
/// Obtains the value from the key provided.
void *DS_Hash_Map_get(DS_Hash_Map_t *map, const char *key){
	MU_ARG_CHECK(logger, NULL, map, map && map->size, map && map->buckets, key);
	MU_COND_RWLOCK_RDLOCK(map->lock, logger);
	char trunc_key[DS_HASH_MAP_KEY_SIZE + 1];
	snprintf(trunc_key, DS_HASH_MAP_KEY_SIZE + 1, "%s", key);
	DS_Bucket_t *bucket = get_bucket(map->buckets, map->amount_of_buckets, trunc_key);
	if(!bucket_is_valid(bucket)){
		MU_COND_RWLOCK_UNLOCK(map->lock, logger);
		return NULL;
	}
	void *value = get_value_from_bucket(bucket, trunc_key);
	MU_COND_RWLOCK_UNLOCK(map->lock, logger);
	return value;
}
Example #16
0
void _tr_blacklist::insert(const sockaddr_storage* addr, unsigned int duration,
			   const char* reason)
{
  if(!duration)
    return;

  blacklist_bucket* bucket = get_bucket(hashlittle(addr, SA_len(addr), 0)
					& BLACKLIST_HT_MASK);
  bucket->lock();
  if(!bucket->exist(*(const bl_addr*)addr)) {
    bucket->insert(*(const bl_addr*)addr,duration,reason);
  }
  bucket->unlock();
}
Example #17
0
hash_entry<T, Q> *hashmap<T, Q>::get_object(Q id)
{
	unsigned int loc = genhash(id); //this is where it should be
	hash_entry<T, Q> *ret = (hash_entry<T, Q> *)NULL;
	assert(table[loc] != NULL);
	if (table[loc]->id == id)
		return table[loc];
	else
	{
		if ((ret = get_bucket(id, loc)) != NULL)
			return ret;
		else
			return get_qprobe(id, loc);
	}
}
Example #18
0
		Tile * get_tile(Key const & key) {
			if (cached_ && cached_->key == key) { return &cached_->value; }

			Bucket * bucket = get_bucket(key);
			Node * node = Node::lower_bound(bucket->start, key);
			Node ** out = node ? &node->next : &bucket->start;

			if (*out && (*out)->key == key) { return cache(*out); }

			*out = Node::insert(*out, new Node(key, this));
			cache(*out);
			++bucket->size;
			++node_count_;
			if (node_count_ / bucket_count_ > 8) { rebucket(bucket_count_ + 1); }
			return &cached_->value;
		}
Example #19
0
      bool fdb_manager::init_buckets(const vector < int >&buckets)
      {
        tbsys::CThreadGuard guard(&stat_lock);

        fdb_buckets_map *temp_buckets_map = new fdb_buckets_map(*buckets_map);

        for(int i = 0; i < (int) buckets.size(); i++) {
          int bucket_number = buckets[i];
          fdb_bucket *bucket = get_bucket(bucket_number);
          if(bucket != NULL) {
            // already exist
            continue;
          }

          // init new FdbBucket
          fdb_bucket *new_bucket = new fdb_bucket();
          bool sr = new_bucket->start(bucket_number);
          if(!sr) {
            // init FdbBucket failed
            log_error("init bucket[%d] failed", bucket_number);
            delete new_bucket;
            return false;
          }
          (*temp_buckets_map)[bucket_number] = new_bucket;
        }

        fdb_buckets_map *old_buckets_map = buckets_map;
        buckets_map = temp_buckets_map;
        usleep(100);
        delete old_buckets_map;

#ifdef TAIR_DEBUG
        if(buckets.size() > 0) {
          for(size_t i = 0; i < buckets.size(); ++i) {
            log_debug("init bucket: [%d]", buckets[i]);
          }
        }

        log_debug("after init, local bucket:");
        fdb_buckets_map::const_iterator it;
        for(it = buckets_map->begin(); it != buckets_map->end(); ++it) {
          log_debug("bucket[%d] => [%p]", it->first, it->second);
        }

#endif
        return true;
      }
Example #20
0
static void list_dealloc( LIST * l )
{
    unsigned size = list_length( l );
    unsigned bucket;
    LIST * node = l;

    if ( size == 0 ) return;

    bucket = get_bucket( size );;

#ifdef BJAM_NO_MEM_CACHE
    BJAM_FREE( node );
#else
    node->impl.next = freelist[ bucket ];
    freelist[ bucket ] = node;
#endif
}
Example #21
0
void hash_table_set(hash_table *table, const char *key, void *value) {
    bucket *bkt = get_bucket(table, key);
    if(bkt != NULL) {
        bkt->value = value;
        return;
    }
    if(table->occupied + 1 > table->size / 2) {
        resize_table(table);
    }
    bucket *new_bkt = malloc(sizeof(bucket));
    new_bkt->key = key;
    new_bkt->value = value;
    new_bkt->hash = hash(key);
    int index = new_bkt->hash % table->size;
    new_bkt->next = table->table[index];
    table->table[index] = new_bkt;
    table->occupied++;
}
Example #22
0
 int TairHelper::create_family(FamilyInfo& family_info)
 {
   int64_t pos = 0;
   char pkey[64] = {'\0'};
   char skey[128] = {'\0'};
   char value[1024] = {'\0'};
   snprintf(pkey, 64, "%s%06d", key_prefix_.c_str(), get_bucket(family_info.family_id_));
   snprintf(skey, 64, "%020"PRI64_PREFIX"d", family_info.family_id_);
   int32_t ret = TFS_SUCCESS != family_info.serialize(value, 1024, pos) ? EXIT_SERIALIZE_ERROR : TFS_SUCCESS;
   if (TFS_SUCCESS == ret)
   {
     tbutil::Mutex::Lock lock(mutex_);
     ret = put_(pkey, skey, value, family_info.length());
     if (TAIR_RETURN_SUCCESS != ret)
     {
       TBSYS_LOG(WARN, "create family : %"PRI64_PREFIX"d error: call tair put error, ret: %d, pkey: %s, skey: %s", family_info.family_id_, ret, pkey, skey);
     }
     ret = (TAIR_RETURN_SUCCESS == ret) ? TFS_SUCCESS : EXIT_OP_TAIR_ERROR;
   }
   return ret;
 }
Example #23
0
 int TairHelper::query_family(FamilyInfo& family_info)
 {
   int64_t pos = 0;
   char pkey[64] = {'\0'};
   char skey[128] = {'\0'};
   char value[1024] = {'\0'};
   snprintf(pkey, 64, "%s%06d", key_prefix_.c_str(), get_bucket(family_info.family_id_));
   snprintf(skey, 64, "%020"PRI64_PREFIX"d", family_info.family_id_);
   tbutil::Mutex::Lock lock(mutex_);
   int32_t ret = get_(pkey, skey, value, 1024);
   if (TAIR_RETURN_SUCCESS != ret)
   {
     TBSYS_LOG(WARN, "query family : %"PRI64_PREFIX"d error: call tair put error, ret: %d, pkey: %s, skey: %s", family_info.family_id_, ret, pkey, skey);
     ret = EXIT_OP_TAIR_ERROR;
   }
   else
   {
     ret = family_info.deserialize(value, 1024, pos);
   }
   return ret;
 }
Example #24
0
      int fdb_manager::get(int bucket_number, data_entry & key,
                           data_entry & value, bool with_stat)
      {

        fdb_bucket *bucket = get_bucket(bucket_number);

        if(bucket == NULL) {
          return TAIR_RETURN_FAILED;
        }

        int rc = TAIR_RETURN_SUCCESS;
        PROFILER_BEGIN("get from cache");
        if(memory_cache->get(bucket_number, key, value) == EXIT_SUCCESS) {
          log_debug("value get from mdb, size: %d", value.get_size());
          value.decode_meta(true);
          key.data_meta = value.data_meta;
          log_debug("memcache ing");
          key.data_meta.log_self();
          log_debug("cache hit...");
          PROFILER_END();
          return rc;
        }
        PROFILER_END();

        rc = bucket->get(key, value);
        log_debug("fdb getting");
        key.data_meta.log_self();
        PROFILER_BEGIN("put into cache");
        if(rc == TAIR_RETURN_SUCCESS) {
          data_entry temp_value = value;
          temp_value.merge_meta();
          log_debug("value put into mdb, size: %d", temp_value.get_size());
          memory_cache->put(bucket_number, key, temp_value, false,
                            key.data_meta.edate);
        }
        PROFILER_END();

        return rc;
      }
Example #25
0
      int fdb_manager::remove(int bucket_number, data_entry & key,
                              bool version_care)
      {

        fdb_bucket *bucket = get_bucket(bucket_number);

        if(bucket == NULL) {
          return TAIR_RETURN_FAILED;
        }

        int rc = TAIR_RETURN_FAILED;
        PROFILER_BEGIN("remove from cache");
        if(memory_cache->remove(bucket_number, key, false) == EXIT_FAILURE) {
          log_error("clear cache failed...");
          PROFILER_END();
          return rc;
        }
        PROFILER_END();

        rc = bucket->remove(key, version_care);
        return rc;
      }
Example #26
0
      int fdb_manager::put(int bucket_number, data_entry & key,
                           data_entry & value, bool version_care,
                           int expire_time)
      {
        fdb_bucket *bucket = get_bucket(bucket_number);
        if(bucket == NULL) {
          // bucket not exist
          log_debug("fdbBucket[%d] not exist", bucket_number);
          return TAIR_RETURN_FAILED;
        }

        int rc = TAIR_RETURN_FAILED;
        PROFILER_BEGIN("remove from cache");
        if(memory_cache->remove(bucket_number, key, false) == EXIT_FAILURE) {
          log_error("clear cache failed...");
          return rc;
        }
        PROFILER_END();
        log_debug("fdb put");
        key.data_meta.log_self();
        rc = bucket->put(key, value, version_care, expire_time);
        return rc;
      }
Example #27
0
 int TairHelper::del_family(const int64_t family_id, const bool del, const bool log, const uint64_t own_ipport)
 {
   char pkey[128] = {'\0'};
   char skey[128] = {'\0'};
   char suffix[64] = {'\0'};
   if (del)
     snprintf(suffix, 64, "_del_%"PRI64_PREFIX"u",own_ipport);
   snprintf(pkey, 128, "%s%06d%s", key_prefix_.c_str(), get_bucket(family_id), del ? suffix : "");
   snprintf(skey, 128, "%020"PRI64_PREFIX"d", family_id);
   data_entry tair_pkey(pkey, false);
   data_entry tair_skey(skey, false);
   int32_t ret = del_(pkey, skey);
   if (TAIR_RETURN_SUCCESS != ret)
   {
     TBSYS_LOG(WARN, "del family : %"PRI64_PREFIX"d error: call tair put error, ret: %d, pkey: %s, skey: %s", family_id, ret, pkey, skey);
     ret = EXIT_OP_TAIR_ERROR;
   }
   else
   {
     if (log)
       ret = insert_del_family_log_(family_id, own_ipport);
   }
   return ret;
 }
Example #28
0
int add(struct config *c, DB *dbp, ReadingSet *rs) {
  int cur_rec, ret;
  DBC *cursorp;
  struct rec_key cur_k;
  struct rec_val cur_v;
  DB_TXN *tid = NULL;
  unsigned char buf[POINT_OFF(MAXBUCKETRECS + NBUCKETSIZES)];
  struct rec_val *v = (struct rec_val *)buf;
  struct point *rec_data = v->data;
  bool_t bucket_dirty = FALSE, bucket_valid = FALSE;
  unsigned long long dirty_start = ULLONG_MAX, dirty_end = 0;

  bzero(&cur_k, sizeof(cur_k));
  bzero(&cur_v, sizeof(cur_v));

  if ((ret = env->txn_begin(env, NULL, &tid, 0)) != 0) {
    error("txn_begin: %s\n", db_strerror(ret));
    return -1;
  }
  
  if ((ret = dbp->cursor(dbp, tid, &cursorp, 0)) != 0) {
    error("db cursor: %s\n", db_strerror(ret));
    goto abort;
  }
  if (cursorp == NULL) {
    dbp->err(dbp, ret, "cursor");
    goto abort;
  }

  for (cur_rec = 0; cur_rec < rs->n_data; cur_rec++) {
    debug("Adding reading ts: 0x%x\n", rs->data[cur_rec]->timestamp);
    if (bucket_valid &&
        v->n_valid > 0 &&
        cur_k.stream_id == rs->streamid &&
        cur_k.timestamp <= rs->data[cur_rec]->timestamp &&
        cur_k.timestamp + v->period_length > rs->data[cur_rec]->timestamp) {
      /* we're already in the right bucket; don't need to do anything */
      debug("In valid bucket.  n_valid: %i\n", v->n_valid);
    } else {
      /* need to find the right bucket */
      debug("Looking up bucket\n");
      assert(!bucket_valid || POINT_OFF(v->n_valid) < sizeof(buf));
      if (bucket_valid == TRUE && 
          (ret = put(dbp, tid, &cur_k, v, POINT_OFF(v->n_valid))) < 0) {
        warn("error writing back data: %s\n", db_strerror(ret));
        // we will loose data, aborto the transaction.
        goto abort;
      }
      bucket_valid = FALSE;

      cur_k.stream_id = rs->streamid;
      cur_k.timestamp = rs->data[cur_rec]->timestamp;

      if ((ret = get_bucket(cursorp, &cur_k, &cur_v)) <= 0) {
        /* create a new bucket */

        /* the key has been updated by get_bucket */
        v->n_valid = 0;
        v->period_length = bucket_sizes[-ret];
        v->tail_timestamp = 0;
        debug("Created new bucket anchor: %i length: %i\n", cur_k.timestamp, v->period_length);
      } else {
        debug("Found existing bucket streamid: %i anchor: %i length: %i\n", 
              cur_k.stream_id, cur_k.timestamp, v->period_length);
        if ((ret = get(cursorp, DB_SET | DB_RMW, &cur_k, v, sizeof(buf))) < 0) {
          warn("error reading bucket: %s\n", db_strerror(ret));
          goto abort;
        }
      }
      bucket_valid = TRUE;
    }

    debug("v->: tail_timestamp: %i n_valid: %i\n", v->tail_timestamp, v->n_valid);
    /* start the insert -- we're in the current bucket */
    if (v->tail_timestamp < rs->data[cur_rec]->timestamp ||
        v->n_valid == 0) {
      /* if it's an append or a new bucket we can just write the values */
      /* update the header block */
      v->tail_timestamp = rs->data[cur_rec]->timestamp;
      v->n_valid++;
      /* and the data */
      _rpc_copy_records(&v->data[v->n_valid-1], &rs->data[cur_rec], 1);
      debug("Append detected; inserting at offset: %i\n", POINT_OFF(v->n_valid-1));
    } else {
      struct rec_val *v = (struct rec_val *)buf;
      struct point new_rec;
      int i;
      /* otherwise we have to insert it somewhere. we'll just read out
         all the data and do the insert stupidly. */
      for (i = 0; i < v->n_valid; i++) {
        if (v->data[i].timestamp >= rs->data[cur_rec]->timestamp)
          break;
      }
      debug("Inserting within existing bucket index: %i (%i %i)\n", 
            i, rs->data[cur_rec]->timestamp, v->tail_timestamp);
      /* appends should have been handled without reading back the whole record */
      assert(i < v->n_valid);
      /* we have our insert position */
      if (v->data[i].timestamp == rs->data[cur_rec]->timestamp) {
        /* replace a record */
        debug("Replacing record with timestamp 0x%x\n", rs->data[cur_rec]->timestamp);
        _rpc_copy_records(&v->data[i], &rs->data[cur_rec], 1);
      } else {
        /* shift the existing records back */
        debug("Inserting new record (moving %i recs)\n", v->n_valid - i);
        memmove(&v->data[i+1], &v->data[i], (v->n_valid - i) * sizeof(struct point));
        _rpc_copy_records(&v->data[i], &rs->data[cur_rec], 1);
        v->n_valid++;
        /* and update the header */
      }
    }
    bucket_dirty = TRUE;
    assert(v->n_valid < MAXBUCKETRECS + NBUCKETSIZES);

    if (v->n_valid > MAXBUCKETRECS) {
      debug("Splitting buckets since this one is full!\n");
      /* start by writing the current bucket back */
      assert(POINT_OFF(v->n_valid) < sizeof(buf));
      if (bucket_valid == TRUE && 
          (ret = put(dbp, tid, &cur_k, v, POINT_OFF(v->n_valid))) < 0) {
        bucket_valid = FALSE;
        warn("error writing back data: %s\n", db_strerror(ret));
        goto abort;
      }

      if (split_bucket(dbp, cursorp, tid, &cur_k) < 0)
        goto abort;
      bzero(&cur_k, sizeof(cur_k));
      bzero(&cur_v, sizeof(cur_v));
    }

    /* find the time region this write dirties  */
    if (rs->data[cur_rec]->timestamp < dirty_start) {
      dirty_start = rs->data[cur_rec]->timestamp;
    }
    if (rs->data[cur_rec]->timestamp > dirty_end) {
      dirty_end = rs->data[cur_rec]->timestamp;
    }
  }

  if (bucket_valid && bucket_dirty) {
    debug("flushing bucket back to db\n");
    assert(POINT_OFF(v->n_valid) < sizeof(buf));
    if ((ret = put(dbp, tid, &cur_k, v, POINT_OFF(v->n_valid))) < 0) {
      warn("error writing back data: %s\n", db_strerror(ret));
      goto abort;
    }
  }

  cursorp->close(cursorp);

  if ((ret = tid->commit(tid, 0)) != 0) {
    fatal("transaction commit failed: %s\n", db_strerror(ret));
    // SDH : "If DB_TXN->commit() encounters an error, the transaction
    //  and all child transactions of the transaction are aborted."
    //
    // So, we can just die here.
    // do_shutdown = 1;
    return -1;
  }

  //
  if (dirty_start != ULLONG_MAX && dirty_end != 0) {
    mark_sketch_dirty(c, rs->streamid, dirty_start, dirty_end);
  }

  return 0;

 abort:
  cursorp->close(cursorp);
  warn("Aborting transaction\n");

  if ((ret = tid->abort(tid)) != 0) {
    fatal("Could not abort transaction: %s\n", db_strerror(ret));
    // do_shutdown = 1;
    // SDH : there are no documented error codes for DB_TXN->abort().
    assert(0);
  }
  return -1;
}
Example #29
0
static void
multikey_block(unsigned char** strings, size_t n, size_t depth)
{
	if (n < 10000) {
		mkqsort(strings, n, depth);
		return;
	}
	assert(n > B);
	static Buckets buckets;
	static std::array<unsigned char*, 32*B> temp_space;
	static FreeBlocks freeblocks;
	const CharT partval = pseudo_median<CharT>(strings, n, depth);
	BackLinks backlinks(n/B+1);
	std::array<size_t, 3> bucketsize;
	bucketsize.fill(0);
	buckets[0].clear();
	buckets[1].clear();
	buckets[2].clear();
	// Initialize our list of free blocks.
	assert(freeblocks.empty());
	for (size_t i=0; i < 32; ++i)
		freeblocks.push_back(&temp_space[i*B]);
	for (size_t i=0; i < n-n%B; i+=B)
		freeblocks.push_back(strings+i);
	// Distribute strings to buckets. Use a small cache to reduce memory
	// stalls. The exact size of the cache is not very important.
	size_t i=0;
	for (; i < n-n%32; i+=32) {
		std::array<CharT, 32> cache;
		for (unsigned j=0; j<32; ++j) {
			cache[j] = get_char<CharT>(strings[i+j], depth);
		}
		for (unsigned j=0; j<32; ++j) {
			const CharT c = cache[j];
			const unsigned b = get_bucket(c, partval);
			if (bucketsize[b] % B == 0) {
				Block block = take_free_block(freeblocks);
				buckets[b].push_back(block);
				// Backlinks must be set for those blocks, that
				// use the original string array space.
				if (block >= strings && block < strings+n) {
					backlinks[(block-strings)/B] =
						&(buckets[b].back());
				}
			}
			assert(not buckets[b].empty());
			buckets[b].back()[bucketsize[b] % B] = strings[i+j];
			++bucketsize[b];
		}
	}
	for (; i < n; ++i) {
		const CharT c = get_char<CharT>(strings[i], depth);
		const unsigned b = get_bucket(c, partval);
		if (bucketsize[b] % B == 0) {
			Block block = take_free_block(freeblocks);
			buckets[b].push_back(block);
			// Backlinks must be set for those blocks, that
			// use the original string array space.
			if (block >= strings && block < strings+n) {
				backlinks[(block-strings)/B] =
					&(buckets[b].back());
			}
		}
		assert(not buckets[b].empty());
		buckets[b].back()[bucketsize[b] % B] = strings[i];
		++bucketsize[b];
	}
	assert(bucketsize[0]+bucketsize[1]+bucketsize[2]==n);
	// Process each bucket, and copy all strings in that bucket to proper
	// place in the original string pointer array. This means that those
	// positions that are occupied by other blocks must be moved to free
	// space etc.
	size_t pos = 0;
	for (unsigned i=0; i < 3; ++i) {
		if (bucketsize[i] == 0) continue;
		Bucket::const_iterator it = buckets[i].begin();
		for (size_t bucket_pos=0; bucket_pos < bucketsize[i]; ++it, bucket_pos+=B) {
			const size_t block_items = std::min(size_t(B), bucketsize[i]-bucket_pos);
			const size_t block_overlap = (pos+block_items-1)/B;
			if (*it == (strings+pos)) {
				// Already at correct place.
				assert(pos%B==0);
				backlinks[pos/B] = 0;
				pos += block_items;
				continue;
			}
			// Don't overwrite the block in the position we are
			// about to write to, but copy it into safety.
			if (backlinks[block_overlap]) {
				// Take a free block. The block can be 'stale',
				// i.e. it can point to positions we have
				// already copied new strings into. Take free
				// blocks until we have non-stale block.
				Block tmp = take_free_block(freeblocks);
				while (tmp >= strings && tmp < strings+pos)
					tmp = take_free_block(freeblocks);
				if (tmp >= strings && tmp < strings+n) {
					assert(backlinks[(tmp-strings)/B]==0);
					backlinks[(tmp-strings)/B] = backlinks[block_overlap];
				}
				memcpy(tmp, *(backlinks[block_overlap]), B*sizeof(unsigned char*));
				*(backlinks[block_overlap]) = tmp;
				backlinks[block_overlap] = 0;
			}
			if (*it >= strings && *it < strings+n) {
				assert(*it > strings+pos);
				backlinks[(*it-strings)/B] = 0;
			}
			// Copy string pointers to correct position.
			memcpy(strings+pos, *it, block_items*sizeof(unsigned char*));
			// Return block for later use. Favor those in the
			// temporary space.
			if (*it >= strings && *it < strings+n) {
				freeblocks.push_back(*it);
			} else {
				freeblocks.push_front(*it);
			}
			pos += block_items;
		}
	}
	freeblocks.clear();
	backlinks.clear(); BackLinks().swap(backlinks);
	multikey_block<B, CharT>(strings, bucketsize[0], depth);
	if (not is_end(partval))
		multikey_block<B, CharT>(strings+bucketsize[0], bucketsize[1],
				depth+sizeof(CharT));
	multikey_block<B, CharT>(strings+bucketsize[0]+bucketsize[1],
			bucketsize[2], depth);
}
Example #30
0
void* htable_find(htable_t* ht, void* data)
{
	dlist_node_t* node = dlist_find(get_bucket(ht, data), data, ht->do_compare);
	return node;
}