Beispiel #1
0
uint32_t index_field_count_query(struct index_field_manager* index_field, struct low_data_struct* data, MEM_POOL* mem_pool)
{
	uint32_t ret = NULL;
	if(index_field == NULL)
	{
		log_warn("此列未创建存储实体");
		return NULL;
	}
	
	if(index_field->index_type != HI_KEY_ALG_FULLTEXT)
	{
		log_warn("只有fulltext列才能根据value查询对应分词的个数");
		return 0;
	}

	/* hash */
	PROFILER_BEGIN("dyhash index query");

	//dyhash索引
	if(index_field->index_type == HI_KEY_ALG_FULLTEXT){
		PROFILER_BEGIN("count query");

		ret = dyhash_index_count_query(index_field->dyhash_index, data, mem_pool);

		PROFILER_END();
	}


	PROFILER_END();
	return ret;
}
   int tair_manager::put(int area, data_entry &key, data_entry &value, int expire_time)
   {
      if (key.get_size() >= TAIR_MAX_KEY_SIZE || key.get_size() < 1) {
         return TAIR_RETURN_ITEMSIZE_ERROR;
      }

      if (value.get_size() >= TAIR_MAX_DATA_SIZE || value.get_size() < 1) {
         return TAIR_RETURN_ITEMSIZE_ERROR;
      }

      if (area < 0 || area >= TAIR_MAX_AREA_COUNT) {
         return TAIR_RETURN_INVALID_ARGUMENT;
      }

      data_entry mkey = key; // key merged with area
      mkey.merge_area(area);

      int bucket_number = get_bucket_number(key);
      log_debug("put request will server in bucket: %d key =%s ", bucket_number, key.get_data());
      key.data_meta.log_self();
      int op_flag = get_op_flag(bucket_number, key.server_flag);

      int rc = TAIR_RETURN_SERVER_CAN_NOT_WORK;
      PROFILER_BEGIN("should write local?");
      if (should_write_local(bucket_number, key.server_flag, op_flag, rc) == false) {
         PROFILER_END();
         return rc;
      }
      PROFILER_END();

      // save into the storage engine
      bool version_care =  op_flag & TAIR_OPERATION_VERSION;
      PROFILER_BEGIN("put into storage");
      rc = storage_mgr->put(bucket_number, mkey, value, version_care, expire_time);
      PROFILER_END();

      if (rc == TAIR_RETURN_SUCCESS ) {
         key.data_meta = mkey.data_meta;
         if (op_flag & TAIR_OPERATION_DUPLICATE) {
            vector<uint64_t> slaves;
            get_slaves(key.server_flag, bucket_number, slaves);
            if (slaves.empty() == false) {
               PROFILER_BEGIN("do duplicate");
               duplicator->duplicate_data(area, &key, &value, bucket_number, slaves);
               PROFILER_END();
            }
         }

         if (migrate_log != NULL && need_do_migrate_log(bucket_number)) {
            PROFILER_BEGIN("do migrate log");
            migrate_log->log(SN_PUT, mkey, value, bucket_number);
            PROFILER_END();
         }

      }
      TAIR_STAT.stat_put(area);

      return rc;
   }
   bool tair_manager::should_write_local(int bucket_number, int server_flag, int op_flag, int &rc)
   {
      if (status != STATUS_CAN_WORK) {
         log_debug("server can not work now...");
         rc = TAIR_RETURN_SERVER_CAN_NOT_WORK;
         return false;
      }

      if (migrate_mgr->is_bucket_available(bucket_number) == false) {
         log_debug("bucket is migrating, request reject");
         rc = TAIR_RETURN_MIGRATE_BUSY;
         return false;
      }

      PROFILER_BEGIN("migrate is done?");
      if ((server_flag == TAIR_SERVERFLAG_CLIENT || server_flag == TAIR_SERVERFLAG_PROXY)
          && migrate_done_set.test(bucket_number)
          && table_mgr->is_master(bucket_number, TAIR_SERVERFLAG_PROXY) == false) {
         rc = TAIR_RETURN_MIGRATE_BUSY;
         PROFILER_END();
         return false;
      }
      PROFILER_END();

      log_debug("bucket number: %d, serverFlag: %d, client const: %d", bucket_number, server_flag, TAIR_SERVERFLAG_CLIENT);
      if ((server_flag == TAIR_SERVERFLAG_CLIENT || server_flag == TAIR_SERVERFLAG_PROXY)
          && table_mgr->is_master(bucket_number, server_flag) == false) {
         log_debug("request rejected...");
         rc = TAIR_RETURN_WRITE_NOT_ON_MASTER;
         return false;
      }

      if (op_flag & TAIR_OPERATION_DUPLICATE) {
         bool is_available = false;
         for (int i=0; i<TAIR_DUPLICATE_BUSY_RETRY_COUNT; ++i) {
            is_available = duplicator->is_bucket_available(bucket_number);
            if (is_available)
               break;

            usleep(1000);
         }

         if (is_available == false) {
            log_debug("bucket is not avaliable, reject request");
            rc = TAIR_RETURN_DUPLICATE_BUSY;
            return false;
         }
      }

      return true;
   }
   int tair_manager::remove(int area, data_entry &key)
   {
      if (key.get_size() >= TAIR_MAX_KEY_SIZE || key.get_size() < 1) {
         return TAIR_RETURN_ITEMSIZE_ERROR;
      }

      if (area < 0 || area >= TAIR_MAX_AREA_COUNT) {
         return TAIR_RETURN_INVALID_ARGUMENT;
      }

      data_entry mkey = key;
      mkey.merge_area(area);

      int bucket_number = get_bucket_number(key);

      int op_flag = get_op_flag(bucket_number, key.server_flag);
      int rc = TAIR_RETURN_SERVER_CAN_NOT_WORK;
      PROFILER_BEGIN("should write local?");
      if (should_write_local(bucket_number, key.server_flag, op_flag, rc) == false) {
         PROFILER_END();
         return rc;
      }
      PROFILER_END();
      bool version_care =  op_flag & TAIR_OPERATION_VERSION;

      PROFILER_BEGIN("remove from storage engine");
      rc = storage_mgr->remove(bucket_number, mkey, version_care);
      PROFILER_END();

      if (rc == TAIR_RETURN_SUCCESS || rc == TAIR_RETURN_DATA_NOT_EXIST) {
         if (op_flag & TAIR_OPERATION_DUPLICATE) {
            vector<uint64_t> slaves;
            get_slaves(key.server_flag, bucket_number, slaves);
            if (slaves.empty() == false) {
               PROFILER_BEGIN("do duplicate");
               duplicator->duplicate_data(area, &key, NULL, bucket_number, slaves);
               PROFILER_END();
            }
         }

         if (migrate_log != NULL && need_do_migrate_log(bucket_number)) {
            PROFILER_BEGIN("do migrate log");
            migrate_log->log(SN_REMOVE, mkey, mkey, bucket_number);
            PROFILER_END();
         }

      }
      TAIR_STAT.stat_remove(area);

      return rc;
   }
   int tair_manager::add_count(int area, data_entry &key, int count, int init_value, int *result_value, int expire_time)
   {
      if (status != STATUS_CAN_WORK) {
         return TAIR_RETURN_SERVER_CAN_NOT_WORK;
      }

      if (key.get_size() >= TAIR_MAX_KEY_SIZE || key.get_size() < 1) {
         return TAIR_RETURN_ITEMSIZE_ERROR;
      }

      if (area < 0 || area >= TAIR_MAX_AREA_COUNT) {
         return TAIR_RETURN_INVALID_ARGUMENT;
      }

      tbsys::CThreadGuard guard(&counter_mutex[get_mutex_index(key)]);
      // get from storage engine
      data_entry old_value;
      PROFILER_BEGIN("get from storage");
      int rc = get(area, key, old_value);
      PROFILER_END();
      log_debug("get result: %d, flag: %d", rc, key.data_meta.flag);
      key.data_meta.log_self();
      if (rc == TAIR_RETURN_SUCCESS && IS_ADDCOUNT_TYPE(key.data_meta.flag)) {
         // old value exist
         int32_t *v = (int32_t *)(old_value.get_data() + ITEM_HEAD_LENGTH);
         log_debug("old count: %d, new count: %d, init value: %d", (*v), count, init_value);
         *v += count;
         *result_value = *v;
      } else if(rc == TAIR_RETURN_SUCCESS){
         //exist,but is not add_count,return error;
         log_debug("cann't override old value");
         return TAIR_RETURN_CANNOT_OVERRIDE;
      }else {
         // old value not exist
         char fv[6]; // 2 + sizeof(int)
         *((short *)fv) = 0x1600; // for java header
         *result_value = init_value + count;
         *((int32_t *)(fv + 2)) = *result_value;
         old_value.set_data(fv, 6);
      }

      old_value.data_meta.flag |= TAIR_ITEM_FLAG_ADDCOUNT;
      log_debug("before put flag: %d", old_value.data_meta.flag);
      PROFILER_BEGIN("save count into storage");
      int result = put(area, key, old_value, expire_time);
      PROFILER_END();
      return result;
   }
 bool bucket_waiting_queue::send(int64_t now)
 {
    bool empty = true;
    tbsys::CThreadGuard guard(&mutex);
    map<uint64_t, packets_queue_type>::iterator it = packets_queue.begin();
    for (it = packets_queue.begin(); it != packets_queue.end(); ++it) {
       packets_queue_type queue = it->second;
       if (queue.empty()) continue;
       empty = false;
       int64_t& last_time = last_send_time[it->first];
       if (now - last_time < MISECONDS_BEFOR_SEND_RETRY) {
          continue;
       }
       request_duplicate* packet = new request_duplicate(*queue.front());
       log_debug("will send packet pid = %d", packet->packet_id);
       PROFILER_START("do duplicate");
       PROFILER_BEGIN("send duplicate packet");
       if (psm->conn_mgr->sendPacket(it->first, packet, NULL, NULL, true) == false) {
          log_debug("send duplicate packet failed: %s", tbsys::CNetUtil::addrToString(it->first).c_str());
          delete packet;
       } else {
          log_debug("duplicate packet sent: %s", tbsys::CNetUtil::addrToString(it->first).c_str());
          last_time = now;
       }
       PROFILER_END();
       PROFILER_DUMP();
       PROFILER_STOP();
    }
    return !empty;
 }
   int tair_manager::get(int area, data_entry &key, data_entry &value)
   {
      if (status != STATUS_CAN_WORK) {
         return TAIR_RETURN_SERVER_CAN_NOT_WORK;
      }

      if (key.get_size() >= TAIR_MAX_KEY_SIZE || key.get_size() < 1) {
         return TAIR_RETURN_ITEMSIZE_ERROR;
      }

      if (area < 0 || area >= TAIR_MAX_AREA_COUNT) {
         return TAIR_RETURN_INVALID_ARGUMENT;
      }

      data_entry mkey = key;
      mkey.merge_area(area);

      int bucket_number = get_bucket_number(key);
      log_debug("get request will server in bucket: %d", bucket_number);
      PROFILER_BEGIN("get from storage engine");
      int rc = storage_mgr->get(bucket_number, mkey, value);
      PROFILER_END();
      key.data_meta = mkey.data_meta;
      TAIR_STAT.stat_get(area, rc);
      return rc;
   }
Beispiel #8
0
/*只有hash btree filterhash hashfilterhash支持*/
struct rowid_list* index_field_equal_query(struct index_field_manager* index_field,struct low_data_struct* data,MEM_POOL* mem_pool)
{
	struct rowid_list* ret = NULL;
	if(index_field == NULL)
	{
		log_warn("此列未创建存储实体");
		return NULL;
	}
	
	if(index_field->index_type != HI_KEY_ALG_HASH && index_field->index_type != HI_KEY_ALG_FULLTEXT)
	{
		log_warn("只有hash btree filterhash  fulltext列才能根据value查询");
		return NULL;
	}

	/* hash */
	PROFILER_BEGIN("hash index query");

	//hash索引
	if(index_field->index_type  == HI_KEY_ALG_HASH){

		if(Mile_AtomicGetPtr(index_field->flag) & INDEX_FIELD_COMPRESS)
		{
			PROFILER_BEGIN("compress");
			ret = hash_compress_query(index_field->hash_compress,data,mem_pool);
			PROFILER_END();
		}
		else
		{
			PROFILER_BEGIN("no compress");
			ret = hash_index_query(index_field->hash_index,data,mem_pool);
			PROFILER_END();
		}
	}

	//dyhash索引
	if(index_field->index_type == HI_KEY_ALG_FULLTEXT){
		PROFILER_BEGIN("no compress");

		ret = dyhash_index_query(index_field->dyhash_index, data, mem_pool);
		PROFILER_END();
	}


	PROFILER_END();
	return ret;
}
void MoveTestShit(void*args)
{
    PROFILER_BEGIN();
    TestStruct * test = reinterpret_cast<TestStruct*>(args);

    for (int i = 0; i < 100; ++i)
    {
        test->x++;
        test->y++;
    }
    PROFILER_END();
}
void TestJob5(void * args)
{
    PROFILER_BEGIN();
    int a = 0;
    for (int i = 0; i < 1800; ++i)
    {
        a = i + 1;
    }

    a += 20;
    PROFILER_END();
}
Beispiel #11
0
      int fdb_manager::get(int bucket_number, data_entry & key,
                           data_entry & value, bool with_stat)
      {

        fdb_bucket *bucket = get_bucket(bucket_number);

        if(bucket == NULL) {
          return TAIR_RETURN_FAILED;
        }

        int rc = TAIR_RETURN_SUCCESS;
        PROFILER_BEGIN("get from cache");
        if(memory_cache->get(bucket_number, key, value) == EXIT_SUCCESS) {
          log_debug("value get from mdb, size: %d", value.get_size());
          value.decode_meta(true);
          key.data_meta = value.data_meta;
          log_debug("memcache ing");
          key.data_meta.log_self();
          log_debug("cache hit...");
          PROFILER_END();
          return rc;
        }
        PROFILER_END();

        rc = bucket->get(key, value);
        log_debug("fdb getting");
        key.data_meta.log_self();
        PROFILER_BEGIN("put into cache");
        if(rc == TAIR_RETURN_SUCCESS) {
          data_entry temp_value = value;
          temp_value.merge_meta();
          log_debug("value put into mdb, size: %d", temp_value.get_size());
          memory_cache->put(bucket_number, key, temp_value, false,
                            key.data_meta.edate);
        }
        PROFILER_END();

        return rc;
      }
Beispiel #12
0
      int fdb_manager::remove(int bucket_number, data_entry & key,
                              bool version_care)
      {

        fdb_bucket *bucket = get_bucket(bucket_number);

        if(bucket == NULL) {
          return TAIR_RETURN_FAILED;
        }

        int rc = TAIR_RETURN_FAILED;
        PROFILER_BEGIN("remove from cache");
        if(memory_cache->remove(bucket_number, key, false) == EXIT_FAILURE) {
          log_error("clear cache failed...");
          PROFILER_END();
          return rc;
        }
        PROFILER_END();

        rc = bucket->remove(key, version_care);
        return rc;
      }
   bool duplicate_sender_manager::is_bucket_available(uint32_t bucket_number)
   {
      PROFILER_BEGIN("acquire lock");
      while(packages_mgr_mutex.rdlock()) {
         usleep(10);
      }
      PROFILER_END();

      map<uint32_t, bucket_waiting_queue>::iterator it = packets_mgr.find(bucket_number);
      bool res = (it == packets_mgr.end() || it->second.size() <= max_queue_size);
      packages_mgr_mutex.unlock();
      return res;

   }
Beispiel #14
0
struct low_data_struct* index_field_value_query(struct index_field_manager* index_field, uint32_t docid, MEM_POOL* mem_pool)
{
	struct low_data_struct* ret = NULL;
	if(index_field == NULL)
	{
		struct low_data_struct* data = (struct low_data_struct*)mem_pool_malloc(mem_pool,sizeof(struct low_data_struct));
		memset(data,0,sizeof(struct low_data_struct));
		log_warn("此列未创建存储实体");
		return data;
	}

	if(index_field->index_type != HI_KEY_ALG_FILTER )
	{
		log_warn("只有hash btree filterhash列才能根据value查询");
		return NULL;
	}

	PROFILER_BEGIN("filter index query");

	if(Mile_AtomicGetPtr(index_field->flag) & INDEX_FIELD_COMPRESS)
	{
		PROFILER_BEGIN("compress");
		ret = filter_compress_query(index_field->filter_compress,docid,mem_pool);
		PROFILER_END();
	}
	else
	{
		PROFILER_BEGIN("no compress");
		ret = filter_index_query(index_field->filter_index,docid,mem_pool);
		PROFILER_END();
	}

	PROFILER_END();

	return ret;
}
Beispiel #15
0
void ZenFWRenderer::Run()
{
	PROFILER_START(ZenFWRenderer::Run());
	// VRAM upload
	VRAMService->ProcessDatas();

	//mDefaultSkyColor
	if (mRenderProfile == RENDER_PROFILE_NETBOOK)
		GDD->Clear(	CLEAR_COLOR|CLEAR_Z|CLEAR_STENCIL, gAtmosphere->GetDefaultSkyColor()->ConvToRGBA());
	else
		GDD->Clear(	/*CLEAR_COLOR|*/CLEAR_Z|CLEAR_STENCIL);

	ZCamera *pCam = GetActiveCamera();
	GDD->SetProjectionMatrix(pCam->GetProjectionMatrix());
	GDD->SetViewMatrix(pCam->GetTransform()->GetWorldMatrix());
	if (GDD->BeginScene())
	{

		
		GDD->GetQueueToRasterize()->Rasterize(mRenderProfile);

		


		static bool bPreviousMouseButDown = false;

		int X, Y;
		GetInputDevice()->MouseXY(&X, &Y);
		bool bCurrentMouseButDown = GetInputDevice()->MouseButDown(1);
		InjectMouseMove(X, Y);
		if ( bCurrentMouseButDown && (!bPreviousMouseButDown))
			InjectMouseButtons(0, true);
		if ( (! bCurrentMouseButDown) && bPreviousMouseButDown)
			InjectMouseButtons(0, false);

		bPreviousMouseButDown = bCurrentMouseButDown;

		if (mProtoGui)
			mProtoGui->Tick();

		TickCEGui(gTimer.GetEllapsed());
	}
	if (mSoundRenderer)
		mSoundRenderer->Tick();

	PROFILER_END();
}
void MainGame::tick( float dTime )
{
    he::ge::Game::tick(dTime);

    PROFILER_BEGIN("Fps graph");
    m_FPSGraph->tick(dTime);
    PROFILER_END();

    if (m_RestartTimer > 0.0f)
    {
        m_RestartTimer -= dTime;
        if (m_RestartTimer <= 0.0f)
        {
            restart(false);
        }
    }
}
Beispiel #17
0
void TickCEGui(float Ellapsed)
{
	if (!GBCEGUIInited)
		return;

	PROFILER_START(CEGuiTick);
	extern int GGUIStackCount;
	if (GGUIStackCount)
	{
		CEGUI::System::getSingleton().injectTimePulse(Ellapsed);
		CEGUI::System::getSingleton().renderGUI();
	}



	//
	PROFILER_END();
}
void NestedJob(void * args)
{
    PROFILER_BEGIN();
    int a = 0;
    for (int i = 0; i < 800; ++i)
    {
        a = i + 1;
    }

    a += 20;

    DECLARE_JOBS(TestJob5, nullptr, 10);
    JobCounter temp = ADD_JOB_ARRAY(TestJob5, 10);
    WAIT_FOR_COUNTER(temp, 10);



    PROFILER_END();


}
Beispiel #19
0
void ZenFWGame::Run()
{
	PROFILER_START(ZenFWGame::Run);

	if (GetInputDevice())
		GetInputDevice()->Tick();

	if (mServerGame.ptr())
	{
		mServerGame->Update(gTimer.GetEllapsed());
	}

	if (mClientGame.ptr())
	{
		mClientGame->Update(gTimer.GetEllapsed());
	}

	gCameraMotion.Tick();


	PROFILER_END();
}
Beispiel #20
0
      int fdb_manager::put(int bucket_number, data_entry & key,
                           data_entry & value, bool version_care,
                           int expire_time)
      {
        fdb_bucket *bucket = get_bucket(bucket_number);
        if(bucket == NULL) {
          // bucket not exist
          log_debug("fdbBucket[%d] not exist", bucket_number);
          return TAIR_RETURN_FAILED;
        }

        int rc = TAIR_RETURN_FAILED;
        PROFILER_BEGIN("remove from cache");
        if(memory_cache->remove(bucket_number, key, false) == EXIT_FAILURE) {
          log_error("clear cache failed...");
          return rc;
        }
        PROFILER_END();
        log_debug("fdb put");
        key.data_meta.log_self();
        rc = bucket->put(key, value, version_care, expire_time);
        return rc;
      }
   bool tair_server::handlePacketQueue(tbnet::Packet *apacket, void *args)
   {
      base_packet *packet = (base_packet*)apacket;
      int pcode = packet->getPCode();

      bool send_return = true;
      int ret = TAIR_RETURN_SUCCESS;
      const char *msg = "";
      char buf[100];
      sprintf(buf, "pcode is %d, ip is %u", pcode, (uint32_t)(packet->get_connection()->getServerId() & 0xffffffff));
      PROFILER_START("process request");
      PROFILER_BEGIN(buf);
      switch (pcode) {
		  case TAIR_REQ_TTL_PACKET:
		  {
		      request_ttl *npacket = (request_ttl *)packet;
			  ret = req_processor->process(npacket, send_return);
			  break;
		  }
		  case TAIR_REQ_TYPE_PACKET:
		  {
			  request_type *npacket = (request_type *)packet;
			  ret = req_processor->process(npacket, send_return);
			  break;
		  }
		  case TAIR_REQ_EXISTS_PACKET:
		  {
			  request_exists *npacket = (request_exists *)packet;
			  ret = req_processor->process(npacket, send_return);
			  break;
		  }
		  case TAIR_REQ_PUT_PACKET:
		  {
			  request_put *npacket = (request_put *)packet;
			  ret = req_processor->process (npacket, send_return);
			  break;
		  }
          case TAIR_REQ_PUTNX_PACKET:
          {
              request_putnx *npacket = (request_putnx *)packet;
              ret = req_processor->process (npacket, send_return);
              break;
          }
		  case TAIR_REQ_GET_PACKET:
		  {
			  request_get *npacket = (request_get *) packet;
			  ret = req_processor->process (npacket, send_return);
			  send_return = false;
			  break;
		  }
		  case TAIR_REQ_GETSET_PACKET:
		  {
			  request_getset *npacket = (request_getset *)packet;
			  ret = req_processor->process(npacket, send_return);
			  break;
		  }
		  case TAIR_REQ_REMOVE_PACKET:
		  {
			  request_remove *npacket = (request_remove *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end remove,prepare to send return packet");
			  break;
		  }
		  case TAIR_REQ_LINDEX_PACKET:
		  {
			  request_lindex *npacket = (request_lindex *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end lindex, prepare to send return packet");
			  break;
		  }
		  case TAIR_REQ_LPOP_PACKET:
		  case TAIR_REQ_RPOP_PACKET:
		  {
			  request_lrpop *npacket = (request_lrpop *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end lrpop, prepare to send return packet");
			  break;
		  }
		  case TAIR_REQ_LPUSH_PACKET:
		  case TAIR_REQ_RPUSH_PACKET:
		  case TAIR_REQ_LPUSHX_PACKET:
		  case TAIR_REQ_RPUSHX_PACKET:
		  {
			  request_lrpush *npacket = (request_lrpush *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end lrpush, prepare to send return packet");
			  break;
		  }
		  case TAIR_REQ_LPUSH_LIMIT_PACKET:
		  case TAIR_REQ_RPUSH_LIMIT_PACKET:
		  case TAIR_REQ_LPUSHX_LIMIT_PACKET:
		  case TAIR_REQ_RPUSHX_LIMIT_PACKET:
		  {
			  request_lrpush_limit *npacket = (request_lrpush_limit *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end lrpush limit, prepare to send return packet");
			  break;
		  }
          case TAIR_REQ_HEXISTS_PACKET:
          {
              request_hexists *npacket = (request_hexists *) packet;
              ret = req_processor->process (npacket, send_return);
			  log_debug ("end hexists, prepare to send return packet");

			  break;
          }
		  case TAIR_REQ_HGETALL_PACKET:
		  {
			  request_hgetall *npacket = (request_hgetall *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end hgetall, prepare to send return packet");

			  break;
		  }
          case TAIR_REQ_HKEYS_PACKET:
          {
              request_hkeys *npacket = (request_hkeys *) packet;
              ret = req_processor->process (npacket, send_return);
              log_debug ("end hkeys, prepare to send return packet");

              break;
          }
		  case TAIR_REQ_HINCRBY_PACKET:
		  {
			  request_hincrby *npacket = (request_hincrby *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end hincrby, prepare to send return packet");

			  break;
		  }
		  case TAIR_REQ_HMSET_PACKET:
		  {
			  request_hmset *npacket = (request_hmset *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end hmset, prepare to send return packet");

			  break;
		  }
		  case TAIR_REQ_HSET_PACKET:
		  {
			  request_hset *npacket = (request_hset *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end hset, prepare to send return packet");
			  break;
		  }
	  	  case TAIR_REQ_HSETNX_PACKET:
		  {
			  request_hsetnx *npacket = (request_hsetnx *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end hsetnx, prepare to send return packet");
			  break;
		  }
	      case TAIR_REQ_HGET_PACKET:
		  {
			  request_hget *npacket = (request_hget *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end hget, prepare to send return packet");

			  break;
		  }
		  case TAIR_REQ_HMGET_PACKET:
		  {
			  request_hmget *npacket = (request_hmget *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end hmget, prepare to send return packet");

			  break;
		  }
		  case TAIR_REQ_HVALS_PACKET:
		  {
			  request_hvals *npacket = (request_hvals *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end hvals, prepare to send return packet");

			  break;
		  }
		  case TAIR_REQ_HDEL_PACKET:
		  {
			  request_hdel *npacket = (request_hdel *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end hdel, prepare to send return packet");

			  break;
		  }
		  case TAIR_REQ_HLEN_PACKET:
		  {
			  request_hlen *npacket = (request_hlen *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end llen, prepare to sen return packet");

			  break;
		  }
		  case TAIR_REQ_LTRIM_PACKET:
		  {
			  request_ltrim *npacket = (request_ltrim *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end ltrim, prepare to send return packet");

			  break;
		  }
		  case TAIR_REQ_LREM_PACKET:
		  {
			  request_lrem *npacket = (request_lrem *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end lrem, prepare to send return packet");

			  break;
		  }
		  case TAIR_REQ_LLEN_PACKET:
		  {
			  request_llen *npacket = (request_llen *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end llen, prepare to sen return packet");

			  break;
		  }
		  case TAIR_REQ_LRANGE_PACKET:
		  {
			  request_lrange *npacket = (request_lrange *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end lrange, prepare to send return packet");

			  break;
		  }
		  case TAIR_REQ_SCARD_PACKET:
		  {
			  request_scard *npacket = (request_scard *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end scard, prepare to send return packet");

			  break;
		  }
		  case TAIR_REQ_SMEMBERS_PACKET:
		  {
			  request_smembers *npacket = (request_smembers *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end smembers, prepare to send return packet");

			  break;
		  }
		  case TAIR_REQ_SADD_PACKET:
		  {
			  request_sadd *npacket = (request_sadd *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end sadd, prepare to send return packet");

			  break;
		  }
		  case TAIR_REQ_SREM_PACKET:
		  {
			  request_srem *npacket = (request_srem *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end srem, prepare to send return packet");

			  break;
		  }
		  case TAIR_REQ_SPOP_PACKET:
		  {
			  request_spop *npacket = (request_spop *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end lrpush, prepare to send return packet");

			  break;
		  }
          case TAIR_REQ_SADDMULTI_PACKET:
          {
              request_sadd_multi *npacket = (request_sadd_multi *) packet;
              ret = req_processor->process (npacket, send_return);
              log_debug ("end request sadd multi, prepare to send return packet");

              break;
          }
          case TAIR_REQ_SREMMULTI_PACKET:
          {
              request_srem_multi *npacket = (request_srem_multi *) packet;
              ret = req_processor->process (npacket, send_return);
              log_debug ("end request srem multi, prepare to send return packet");

              break;
          }
          case TAIR_REQ_SMEMBERSMULTI_PACKET:
          {
              request_smembers_multi *npacket = (request_smembers_multi *) packet;
              ret = req_processor->process (npacket, send_return);
              log_debug ("end request smembers multi, prepare to send retrun packet");

              break;
          }
		  case TAIR_REQ_ZSCORE_PACKET:
		  {
			  request_zscore *npacket = (request_zscore *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end zscore, prepare to send return packet");

			  break;
		  }
		  case TAIR_REQ_ZRANGE_PACKET:
		  {
			  request_zrange *npacket = (request_zrange *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end zrange, prepare to send return packet");

			  break;
		  }
		  case TAIR_REQ_ZREVRANGE_PACKET:
		  {
			  request_zrevrange *npacket = (request_zrevrange *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end zrevrange, prepare to send return packet");

			  break;
		  }
		  case TAIR_REQ_ZRANGEBYSCORE_PACKET:
		  {
			  request_zrangebyscore *npacket = (request_zrangebyscore *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end zrangebyscore, prepare to send return packet");

			  break;
		  }
          case TAIR_REQ_GENERIC_ZRANGEBYSCORE_PACKET:
          {
              request_generic_zrangebyscore *npacket = (request_generic_zrangebyscore *)packet;
              ret = req_processor->process (npacket, send_return);
			  log_debug ("end generic zrangebyscore, prepare to send return packet, send_return=%d",send_return);

			  break;
          }
		  case TAIR_REQ_ZADD_PACKET:
		  {
			  request_zadd *npacket = (request_zadd *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end zadd, prepare to send return packet, send_return=%d", send_return);

			  break;
		  }
		  case TAIR_REQ_ZRANK_PACKET:
		  {
			  request_zrank *npacket = (request_zrank *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end zrank, prepare to send return packet");
			  break;
		  }
		  case TAIR_REQ_ZREVRANK_PACKET:
		  {
			  request_zrevrank *npacket = (request_zrevrank *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end zrevrank, prepare to send return packet, send_return=%d",send_return);
			  break;
		  }
		  case TAIR_REQ_ZCOUNT_PACKET:
		  {
			  request_zcount *npacket = (request_zcount *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end zcount, prepare to send return packet");
			  break;
		  }
		  case TAIR_REQ_ZINCRBY_PACKET:
		  {
			  request_zincrby *npacket = (request_zincrby *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end zincrby, prepare to send return packet");
			  break;
		  }
		  case TAIR_REQ_ZCARD_PACKET:
		  {
			  request_zcard *npacket = (request_zcard *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end zcard, prepare to send return packet");

			  break;
		  }
		  case TAIR_REQ_ZREM_PACKET:
		  {
			  request_zrem *npacket = (request_zrem *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end zrem, prepare to send return packet");

			  break;
		  }
		  case TAIR_REQ_ZREMRANGEBYRANK_PACKET:
		  {
			  request_zremrangebyrank *npacket = (request_zremrangebyrank *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end zremrangebyrank, prepare to send return packet");

			  break;
		  }
		  case TAIR_REQ_ZREMRANGEBYSCORE_PACKET:
		  {
			  request_zremrangebyscore *npacket = (request_zremrangebyscore *) packet;
			  ret = req_processor->process (npacket, send_return);
		      log_debug ("end zremrangebyscore, prepare to send return packet");

			  break;
		  }
		  case TAIR_REQ_EXPIRE_PACKET:
		  {
			  request_expire *npacket = (request_expire *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end expire, prepare to send return packet");

			  break;
		  }
		  case TAIR_REQ_EXPIREAT_PACKET:
		  {
			  request_expireat *npacket = (request_expireat *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end expireat, prepare to send return packet");

			  break;
		  }
		  case TAIR_REQ_PERSIST_PACKET:
		  {
			  request_persist *npacket = (request_persist *) packet;
			  ret = req_processor->process (npacket, send_return);
			  log_debug ("end persist, prepare to send return packet");

			  break;
		  }
          case TAIR_REQ_INFO_PACKET:
          {
              request_info *npacket = (request_info *) packet;
              ret = req_processor->process (npacket, send_return);
              log_debug ("end info, prepare to send return packet");

              break;
          }
          case TAIR_REQ_LAZY_REMOVE_AREA_PACKET:
          {
              request_lazy_remove_area *npacket = (request_lazy_remove_area *)packet;
              ret = req_processor->process (npacket, send_return);
			  log_debug ("end lazy remove area, prepare to send return packet");

			  break;
          }
          case TAIR_REQ_DUMP_AREA_PACKET:
          {
              request_dump_area *npacket = (request_dump_area *)packet;
              ret = req_processor->process (npacket, send_return);
			  log_debug ("end dump area, prepare to send return packet");

			  break;
          }
          case TAIR_REQ_LOAD_AREA_PACKET:
          {
              request_load_area *npacket = (request_load_area *)packet;
              ret = req_processor->process (npacket, send_return);
			  log_debug ("end dump area, prepare to send return packet");

			  break;
          }
          case TAIR_REQ_ADD_FILTER_PACKET:
          {
              request_addfilter *npacket = (request_addfilter *)packet;
              ret = req_processor->process (npacket, send_return);
              log_debug ("end add filter area, prepare to send return packet");

              break;
          }
          case TAIR_REQ_REMOVE_FILTER_PACKET:
          {
              request_removefilter *npacket = (request_removefilter *)packet;
              ret = req_processor->process (npacket, send_return);
              log_debug ("end add filter area, prepare to send return packet");

              break;
          }
		 case TAIR_REQ_REMOVE_AREA_PACKET:
         {
            request_remove_area *npacket = (request_remove_area*)packet;
            if (npacket->get_direction() == DIRECTION_RECEIVE) {
               async_task_queue_thread.push(new request_remove_area(*npacket));
            } else {
               if (tair_mgr->clear(npacket->area) == false) {
                  ret = EXIT_FAILURE;
               }
            }
            break;
         }
         case TAIR_REQ_PING_PACKET:
         {
            ret = ((request_ping*)packet)->value;
            break;
         }
         case TAIR_REQ_DUMP_PACKET:
         {
            request_dump *npacket = (request_dump*)packet;
            if (npacket->get_direction() == DIRECTION_RECEIVE) {
               async_task_queue_thread.push(new request_dump(*npacket));
            } else {
               tair_mgr->do_dump(npacket->info_set);
            }
            break;
         }
         case TAIR_REQ_DUMP_BUCKET_PACKET:
         {
            ret = EXIT_FAILURE;
            break;
         }
         case TAIR_REQ_INCDEC_PACKET:
         {
            request_inc_dec *npacket = (request_inc_dec*)packet;
            ret = req_processor->process(npacket, send_return);
            break;
         }
         case TAIR_REQ_DUPLICATE_PACKET:
         {
            request_duplicate *dpacket = (request_duplicate *)packet;
            ret = req_processor->process(dpacket, send_return);
            if (ret == TAIR_RETURN_SUCCESS) send_return = false;
            break;
         }
         case TAIR_REQ_MUPDATE_PACKET:
         {
            request_mupdate *mpacket = (request_mupdate *)(packet);
            ret = req_processor->process(mpacket, send_return);
            break;
         }
         default:
         {
            ret = EXIT_FAILURE;
            log_warn("unknow packet, pcode: %d", pcode);
         }
      }
      PROFILER_END();
      PROFILER_DUMP();
      PROFILER_STOP();

      if (ret == TAIR_RETURN_PROXYED) {
         // request is proxyed
         return false;
      }

      if (send_return && packet->get_direction() == DIRECTION_RECEIVE) {
         log_debug("send return packet, return code: %d", ret);
         tair_packet_factory::set_return_packet(packet, ret, msg, heartbeat.get_client_version());
      }

      if ((TBSYS_LOG_LEVEL_DEBUG<=TBSYS_LOGGER._level)) {
         int64_t now = tbsys::CTimeUtil::getTime();
         if (packet->get_direction() == DIRECTION_RECEIVE && now-packet->request_time>100000LL) {
            log_warn("Slow, pcode: %d, %ld us", pcode, now-packet->request_time);
         }
      }

      return true;
   }
Beispiel #22
0
void profiler_end()
{
  PROFILER_END();
}
Beispiel #23
0
int PreprocessOccluders(const tmatrix &invCameraMat)
{
	if (!GetInstancesCount(ZOccluderBox))
		return 0;

	PROFILER_START(PreprocessOccluders);

	tvector3 campos = invCameraMat.V4.position;
	tvector3 camdir = invCameraMat.V4.dir;

	FActiveOccluder* pActiveOccluder = &gActiveOccluders[0]; 
    tvector4 viewPoint = vector4(campos.x, campos.y, campos.z, 0);
    tvector4 viewDir = vector4(camdir.x, camdir.y, camdir.z, 0);
	float sqrFar = 1000.0f * 1000.0f;
	float sqrDist;
	int i;

	gNbActiveOccluders = 0;
	gOccluderBoxes.clear();

	ZOccluderBox *pocc = (ZOccluderBox*)FirstInstanceOf(ZOccluderBox);
	while (pocc)
	{
		addDynamicOccluder(pocc->GetTransform());

		pocc = (ZOccluderBox*)NI(pocc);
	}

    
    for (unsigned int ju = 0;ju<gOccluderBoxes.size(); ju++)
	{
		// todo : frustum culling of the  occluder (except far plane)
		// todo : compute solid angle to reorder occluder accordingly
		FOccluderBox *obox = &gOccluderBoxes[ju];

         //= oboxiter.Get();
		// check for far plane
		const tvector3 &oboxcenter = obox->mCenter;//pocc->GetTransform()->GetWorldMatrix().position;
		sqrDist= SquaredDistance(viewPoint, oboxcenter);
		if (sqrDist > sqrFar)
		{
			continue;
		}

		// check for near plane
		if (DotProduct(tvector3(viewDir), tvector3(oboxcenter - viewPoint)) < 0.0f)
		{
			continue;
		}

		// select planes of the occluder box that lies on the viewing direction
		// todo : reduce to 3 planes instead of 6 (due to box symetry)
		float invSqrDist = 1.0f/sqrDist;//Rcp(sqrDist);

		pActiveOccluder->mSolidAngle = 0.0f;

		BoxSilhouette	silhouette;

		silhouette.vertices = &obox->mVertex[0];

		for (i=0; i<6; i++)
		{
			tvector4 dir= obox->mVertex[FaceVertexIndex[i][0]];
			dir -= viewPoint;
			float vdotp = silhouette.dots[i] = DotProduct(obox->mPlanes[i], dir);

			// compute the maximum solidAngle of the box : -area * v.p/d.d
			pActiveOccluder->mSolidAngle = Max(-obox->mVertex[i].w * vdotp * invSqrDist, pActiveOccluder->mSolidAngle);

		}

		// exit if the occluder is not relevant enough
		if (pActiveOccluder->mSolidAngle < gMinSolidAngle)
			continue;


		int	  nPlanes = 0;
		tvector4*	pPlanes = &pActiveOccluder->mPlanes[0];

		// find silhouette
		tvector4		vertices[12];
        int			nVertices = silhouette.findSilhouette(vertices);

		// create a plane with a edge of the occluder and the viewpoint
        
		for (i=0; i<nVertices; i+=2)
		{
            //tplane plan(campos, vertices[i], vertices[i+1]);
            
			tvector3	v1 = vertices[i];
			v1 -= viewPoint;
			tvector3	v2 = vertices[i+1];
			v2 -= viewPoint;

            v1.Normalize();
            v2.Normalize();
			*pPlanes = CrossProduct(v1, v2);
			pPlanes->Normalize();
			
			pPlanes->w = - DotProduct(*pPlanes, vertices[i]);

			pPlanes++;
			nPlanes ++;

		}
    
		if (gAddNearPlane)
		{
			for (int i=0; i<6; i++)
			{
				if (silhouette.dots[i] < 0.0f)
				{
					pActiveOccluder->mPlanes[nPlanes] = obox->mPlanes[i];
					nPlanes++;
				}
			}
		}
        
		pActiveOccluder->mNbPlanes = nPlanes;

		pActiveOccluder++;
		gNbActiveOccluders++;

		if (gNbActiveOccluders >= gMaxCandidateOccluders)
			break;
		
	}
	

	if (gNbActiveOccluders)
	{
		qsort(gActiveOccluders, gNbActiveOccluders, sizeof(FActiveOccluder), compareOccluder);
		if (gNbActiveOccluders > gMaxActiveOccluders)
			gNbActiveOccluders = gMaxActiveOccluders;
	}

	PROFILER_END();
	return gNbActiveOccluders;
}
Beispiel #24
0
int32_t index_field_insert(struct index_field_manager* index_field,struct low_data_struct* data,uint32_t docid)
{
	int32_t ret;

	//拒绝插入的条件
	if(index_field == NULL) 
	{
		log_warn("此列未初始化%s",index_field->field_name);
		return ERROR_FIELD_NOT_WORK;
	}
	
	if(index_field->flag != NULL && (Mile_AtomicGetPtr(index_field->flag) & INDEX_FIELD_COMPRESS))
    	return ERROR_INDEX_FIELD_COMPRESSED;

	switch(index_field->index_type)
	{
		case HI_KEY_ALG_FULLTEXT:
			{
				/*全文列插入hash*/
				PROFILER_BEGIN("dyhash index insert");
				if((ret = dyhash_index_insert(index_field->dyhash_index,data,docid)) < 0)
				{
					PROFILER_END();
					return ret;
				}
				PROFILER_END();

				return MILE_RETURN_SUCCESS;

			}
		case HI_KEY_ALG_HASH:
			{
				/*哈希列插入hash*/
				PROFILER_BEGIN("hash index insert");
				if((ret = hash_index_insert(index_field->hash_index,data,docid)) < 0)
				{
					PROFILER_END();
					return ret;
				}
				PROFILER_END();

				return MILE_RETURN_SUCCESS;
			}
		case HI_KEY_ALG_BTREE:
			{
				return MILE_RETURN_SUCCESS;
			}
		case HI_KEY_ALG_FILTER:
			{
				//如果是字符串,需要对数据进行预处理
				if(data->type == HI_TYPE_STRING)
				{
					struct low_data_struct hash_data;
					
					PROFILER_BEGIN("get hash value");
					uint64_t hash_value = get_hash_value(data);
					PROFILER_END();
					
					hash_data.data = &hash_value;
					hash_data.len = get_unit_size(HI_TYPE_LONGLONG);
					hash_data.type = HI_TYPE_LONGLONG;
					hash_data.field_name = data->field_name;

					if(*index_field->max_len < get_unit_size(HI_TYPE_LONGLONG))
					{
						*index_field->max_len = get_unit_size(HI_TYPE_LONGLONG);
						msync(index_field->max_len,sizeof(uint32_t),MS_SYNC);
					}

					PROFILER_BEGIN("filter index insert");
					if((ret = filter_index_insert(index_field->filter_index,&hash_data,docid) < 0) )
					{
						PROFILER_END();
						return ret;
					}
					PROFILER_END();
					
				}
				else
				{
					if(data->len > get_unit_size(HI_TYPE_LONGLONG))
					{
						log_error("数据长度超过8个字节,len:%u",data->len);
						return ERROR_INSERT_FAILDED;
			
					}
					if(*index_field->max_len < data->len)
					{
						*index_field->max_len = data->len;
						msync(index_field->max_len,sizeof(uint32_t),MS_SYNC);
					}
					
					PROFILER_BEGIN("filter index insert");
					if((ret = filter_index_insert(index_field->filter_index,data,docid) < 0) )
					{
						PROFILER_END();
						return ret;
					}
					PROFILER_END();
				}

				return MILE_RETURN_SUCCESS;
			}
		default:
			log_error("该列的索引类型不正确,%d",index_field->index_type);
			return ERROR_NOT_SUPPORT_INDEX;
	}
}
Beispiel #25
0
//只有filter列才能更新
int32_t index_field_update(struct index_field_manager* index_field,
						 struct low_data_struct* new_data,
						 struct low_data_struct** old_data,
						 uint32_t docid,
						 MEM_POOL* mem_pool)
{
	int32_t ret;
	
	//拒绝更新的条件
	if(index_field == NULL)
	{
		log_warn("此列未初始化%s",index_field->field_name);
		return ERROR_FIELD_NOT_WORK;
	}

	if(index_field->flag != NULL && Mile_AtomicGetPtr(index_field->flag) & INDEX_FIELD_COMPRESS)
		return ERROR_INDEX_FIELD_COMPRESSED;

	switch(index_field->index_type)
	{

		case HI_KEY_ALG_FILTER:
		{
			//如果是字符串,需要对数据进行预处理
			if(new_data->type == HI_TYPE_STRING)
			{
				struct low_data_struct hash_data;

				PROFILER_BEGIN("get_hash_value");
				uint64_t hash_value = get_hash_value(new_data);
				PROFILER_END();
				
				hash_data.data = &hash_value;
				hash_data.len = get_unit_size(HI_TYPE_LONGLONG);

				PROFILER_BEGIN("filter index update");
				if((ret = filter_index_update(index_field->filter_index,&hash_data,old_data,docid,mem_pool) < 0) )
				{
					PROFILER_END();
					return ret;
				}
				PROFILER_END();
			}
			else
			{
				PROFILER_BEGIN("filter index update");
				if((ret = filter_index_update(index_field->filter_index,new_data,old_data,docid,mem_pool) < 0) )
				{
					PROFILER_END();
					return ret;
				}
				PROFILER_END();
			}

			return MILE_RETURN_SUCCESS;
		}	
		default:
			log_warn("只有filter列才能update");
			return ERROR_ONLY_FILTER_SUPPORT;
	}
}
Beispiel #26
0
//两种情况
//1.空值情况
//则直接定位到第hashmod个桶
//2.非空值情况
//则根据插入的值做hash,通过hash值对hashmod取模,从而定位到存储到哪个桶上,如果这个桶的hash value相等则直接插入,如果
//不等,则说明冲突了,则需要往后继续找到自己的桶
int32_t hash_index_insert(struct hash_index_manager* hash_index,struct low_data_struct* data,uint32_t docid)
{
	struct hash_bucket* bucket;
	uint64_t hash_value;
	uint32_t loc;
	uint32_t i;
	uint32_t offset;

	//根据value做一次hash
	PROFILER_BEGIN("get hash value");
	hash_value = get_hash_value(data);
	PROFILER_END();
	
	//判断是否为空值
	if(data->len == 0)
	{
		bucket = hash_index->mem_mmaped+hash_index->hashmod;

		//哈希这个位置没有这个值存在,调用doclist接口插入
		if(bucket->hash_value == 0)
		{
			if((offset = doclist_insert(hash_index->doclist,docid,0, hash_index->hashmod)) == 0)
				return ERROR_INSERT_REPEAT;
			
			//注意hash_value一定要在offset之后写,因为并发查询的时候会先去读value,判断value是否为0
			Mile_AtomicSetPtr(&bucket->offset, offset);
			Mile_AtomicSetPtr(&bucket->hash_value, hash_value);
		}
		else
		{
			offset = doclist_insert(hash_index->doclist,docid,bucket->offset, hash_index->hashmod);
			Mile_AtomicSetPtr(&bucket->offset, offset);
		}
		return MILE_RETURN_SUCCESS;
	}
	
	
	//取模
	loc = hash_value%hash_index->hashmod;
	i = loc;

	do
	{
		bucket = hash_index->mem_mmaped+i;

		//哈希这个位置没有这个值存在,调用doclist接口插入
		if(bucket->hash_value == 0)
		{
			if((offset = doclist_insert(hash_index->doclist,docid,0, i)) == 0 )
				return ERROR_INSERT_REPEAT;
			//注意hash_value一定要在offset之后写,因为并发查询的时候会先去读value,判断value是否为0
			Mile_AtomicSetPtr(&bucket->offset, offset);
			Mile_AtomicSetPtr(&bucket->hash_value, hash_value);
			return MILE_RETURN_SUCCESS;
		}

		//哈希这个位置有值存在,并且相等
		if(bucket->hash_value == hash_value)
		{
			if((offset = doclist_insert(hash_index->doclist,docid,bucket->offset, i)) == 0)
				return ERROR_INSERT_REPEAT;
			Mile_AtomicSetPtr(&bucket->offset, offset);
			return MILE_RETURN_SUCCESS;
		}
		
		i = (i+1)%hash_index->hashmod;
	}
	while(i!=loc);

	log_error("hash 恶性冲突");
	return ERROR_HASH_CONFLICT;
}
Beispiel #27
0
//也是分两种情况
//1.空值 直接返回桶里的所有doclist
//2.非空 则对data取hash值,找到自己的桶,如果该桶没有值,则说明不存在,如果存在,且相等,则返回该桶下的所有doclist,否则继续往后找
//一旦有桶为空,则说明这个值一定不存在,也不需要再往后找了
struct rowid_list* hash_index_query(struct hash_index_manager* hash_index,struct low_data_struct* data,MEM_POOL* mem_pool)
{
	struct hash_bucket* bucket;
	struct rowid_list* ret;
	uint64_t hash_value;
	uint64_t hash_value_in_hash_info;
	uint32_t loc;
	uint32_t offset;
	uint32_t i;

	//如果为空值的话,则把第hashmod的桶返回给上层
	if(data->len == 0)
	{
		bucket = hash_index->mem_mmaped+hash_index->hashmod;
		
		hash_value_in_hash_info = Mile_AtomicGetPtr(&bucket->hash_value);
		/*只要为空,则肯定不存在*/
		if(hash_value_in_hash_info == 0)
		{
			return NULL;
		}

		offset = Mile_AtomicGetPtr(&bucket->offset);
		ret = get_rowid_list(hash_index,NEXT_DOC_ROW_STRUCT(hash_index->doclist, offset),mem_pool);
		return ret;
	}
	
	
	//根据value做一次hash
	PROFILER_BEGIN("get hash value");
	hash_value = get_hash_value(data);
	PROFILER_END();

	//取模
	loc = hash_value%hash_index->hashmod;

	//如果定位到的hash有值,则需要往下寻找
	i=loc;
	do
	{
		bucket = hash_index->mem_mmaped+i;

		hash_value_in_hash_info = Mile_AtomicGetPtr(&bucket->hash_value);
		/*如果循环的过程中,只要有一个为空,则肯定不存在*/
		if(hash_value_in_hash_info == 0)
		{
			return NULL;
		}
		
		//找出hash值相等地方
		if(hash_value_in_hash_info == hash_value)
		{
		   offset = Mile_AtomicGetPtr(&bucket->offset);
		   ret = get_rowid_list(hash_index,NEXT_DOC_ROW_STRUCT(hash_index->doclist, offset),mem_pool);
		   return ret;
		}
		
		i = (i+1)%hash_index->hashmod;
	}
	while(i!=loc);

	log_debug("查询不到这个值");
	return NULL;
}