コード例 #1
0
ファイル: ldb_rsync.cpp プロジェクト: 0x3FFFFFFF/tair
int do_rsync(const char* db_path, const char* manifest_file, std::vector<int32_t>& buckets, 
             ClusterHandler* local_handler, ClusterHandler* remote_handler, bool mtime_care,
             DataFilter& filter, DataStat& stat, RecordLogger* fail_logger)
{
  // open db with specified manifest(read only)
  leveldb::DB* db = NULL;
  leveldb::Options open_options;
  open_options.error_if_exists = false; // exist is ok
  open_options.create_if_missing = true; // create if not exist
  open_options.comparator = LdbComparator(NULL); // self-defined comparator
  open_options.env = leveldb::Env::Instance();
  leveldb::Status s = leveldb::DB::Open(open_options, db_path, manifest_file, &db);

  if (!s.ok())
  {
    log_error("open db with mainfest fail: %s", s.ToString().c_str());
    delete open_options.comparator;
    delete open_options.env;
    return TAIR_RETURN_FAILED;
  }

  // get db iterator
  leveldb::ReadOptions scan_options;
  scan_options.verify_checksums = false;
  scan_options.fill_cache = false;
  leveldb::Iterator* db_it = db->NewIterator(scan_options);
  char scan_key[LDB_KEY_META_SIZE];

  bool skip_in_bucket = false;
  bool skip_in_area = false;
  uint32_t start_time = 0;
  int32_t bucket = -1;
  int32_t area = -1;

  LdbKey ldb_key;
  LdbItem ldb_item;
  data_entry* key = NULL;
  data_entry* value = NULL;

  int32_t mtime_care_flag = mtime_care ? TAIR_CLIENT_DATA_MTIME_CARE : 0;
  int ret = TAIR_RETURN_SUCCESS;

  if (db_it == NULL)
  {
    log_error("new db iterator fail.");
    ret = TAIR_RETURN_FAILED;
  }
  else
  {
    for (size_t i = 0; !g_stop && i < buckets.size(); ++i)
    {
      start_time = time(NULL);
      area = -1;
      bucket = buckets[i];

      // seek to bucket
      LdbKey::build_key_meta(scan_key, bucket);

      for (db_it->Seek(leveldb::Slice(scan_key, sizeof(scan_key))); !g_stop && db_it->Valid(); db_it->Next())
      {
        ret = TAIR_RETURN_SUCCESS;
        skip_in_bucket = false;
        skip_in_area = false;

        ldb_key.assign(const_cast<char*>(db_it->key().data()), db_it->key().size());
        ldb_item.assign(const_cast<char*>(db_it->value().data()), db_it->value().size());
        area = LdbKey::decode_area(ldb_key.key());

        // current bucket iterate over
        if (ldb_key.get_bucket_number() != bucket)
        {
          break;
        }

        // skip this data
        if (!filter.ok(area))
        {
          skip_in_bucket = true;
        }
        else
        {
          key = new data_entry(ldb_key.key(), ldb_key.key_size(), false);
          value = NULL;
          key->has_merged = true;
          key->set_prefix_size(ldb_item.prefix_size());

          // re-get from local
          if (local_handler != NULL)
          {
            ret = get_from_local_cluster(*local_handler, *key, value, skip_in_area);
          }
          else
          {
            value = new data_entry(ldb_item.value(), ldb_item.value_size(), false);
            key->data_meta.cdate = value->data_meta.cdate = ldb_item.cdate();
            key->data_meta.edate = value->data_meta.edate = ldb_item.edate();
            key->data_meta.mdate = value->data_meta.mdate = ldb_item.mdate();
            key->data_meta.version = value->data_meta.version = ldb_item.version();
            key->data_meta.keysize = value->data_meta.keysize = key->get_size();
            key->data_meta.valsize = value->data_meta.valsize = ldb_item.value_size();
            value->data_meta.flag = ldb_item.flag();
          }

          if (ret == TAIR_RETURN_SUCCESS)
          {
            log_debug("@@ k:%d %s %d %d %u %u %u.v:%s %d %d %u %u %u", key->get_area(), key->get_size() > 6 ? key->get_data()+6 : "", key->get_size(), key->get_prefix_size(),key->data_meta.cdate,key->data_meta.mdate,key->data_meta.edate, value->get_size() > 4 ? value->get_data()+4 : "", value->get_size(), value->data_meta.flag, value->data_meta.cdate, value->data_meta.mdate, value->data_meta.edate);
            // mtime care / skip cache
            key->data_meta.flag = mtime_care_flag | TAIR_CLIENT_PUT_SKIP_CACHE_FLAG;
            key->server_flag = TAIR_SERVERFLAG_RSYNC;
            // sync to remote cluster
            ret = remote_handler->client()->put(key->get_area(), *key, *value, 0, 0, false/* not fill cache */);
            if (ret == TAIR_RETURN_MTIME_EARLY)
            {
              ret = TAIR_RETURN_SUCCESS;
            }
          }

          // log failed key
          if (ret != TAIR_RETURN_SUCCESS)
          {
            log_error("fail one: %d", ret);
            FailRecord record(key, remote_handler->info(), ret);
            data_entry entry;
            FailRecord::record_to_entry(record, entry);
            int tmp_ret = fail_logger->add_record(0, TAIR_REMOTE_SYNC_TYPE_PUT, &entry, NULL);
            if (tmp_ret != TAIR_RETURN_SUCCESS)
            {
              log_error("add fail record fail, ret: %d", tmp_ret);
            }
          }
        }

        // update stat
        stat.update(bucket, skip_in_bucket ? -1 : area, // skip in bucket, then no area to update
                    ldb_key.key_size() + ldb_item.value_size(), (skip_in_bucket || skip_in_area), ret == TAIR_RETURN_SUCCESS);

        // cleanup
        if (key != NULL)
        {
          delete key;
          key = NULL;
        }
        if (value != NULL)
        {
          delete value;
          value = NULL;
        }
      }

      log_warn("sync bucket %d over, cost: %d(s), stat:\n",
               bucket, time(NULL) - start_time);
      // only dump bucket stat
      stat.dump(bucket, -1);
    }
  }

  if (db_it != NULL)
  {
    delete db_it;
  }
  if (db != NULL)
  {
    delete db;
  }
  delete open_options.comparator;
  delete open_options.env;

  return ret;
}
コード例 #2
0
ファイル: ldb_dump.cpp プロジェクト: solitaire2015/Nair
int do_dump(const char* db_path, const char* manifest, const char* cmp_desc,
            const std::vector<int32_t>& buckets, DataFilter& filter, DataStat& stat,
            const char* dump_file, int64_t dump_file_max_size)
{
  // open db
  leveldb::Options open_options;
  leveldb::DB* db = NULL;
  leveldb::Status s = open_db_readonly(db_path, manifest, cmp_desc, open_options, db);
  if (!s.ok())
  {
    fprintf(stderr, "open db fail: %s\n", s.ToString().c_str());
    return 1;
  }

  // get db iterator
  leveldb::ReadOptions scan_options;
  scan_options.verify_checksums = false;
  scan_options.fill_cache = false;
  leveldb::Iterator* db_it = db->NewIterator(scan_options);
  char scan_key[LDB_KEY_META_SIZE];

  int32_t bucket = 0;
  int32_t area = 0;
  LdbKey ldb_key;
  LdbItem ldb_item;
  int32_t size = 0;

  bool skip_in_bucket = false;
  bool skip_in_area = false;

  int dump_fd = -1;
  int32_t dump_file_index = 1;
  int64_t dump_file_size = 0;

  static const int32_t BUF_SIZE = 2 << 20; // 2M
  char* buf = new char[BUF_SIZE];
  int32_t buf_remain = BUF_SIZE;

  int ret = 0;

  for (size_t i = 0; !g_stop && i < buckets.size(); ++i)
  {
    area = -1;
    bucket = buckets[i];
    // seek to bucket
    LdbKey::build_key_meta(scan_key, bucket);

    for (db_it->Seek(leveldb::Slice(scan_key, sizeof(scan_key))); !g_stop && db_it->Valid() && ret == 0; db_it->Next())
    {
      skip_in_bucket = false;
      skip_in_area = false;

      ldb_key.assign(const_cast<char*>(db_it->key().data()), db_it->key().size());
      ldb_item.assign(const_cast<char*>(db_it->value().data()), db_it->value().size());
      area = LdbKey::decode_area(ldb_key.key());

      // current bucket iterate over
      if (ldb_key.get_bucket_number() != bucket)
      {
        break;
      }

      // skip this data
      if (!filter.ok(area))
      {
        skip_in_bucket = true;
      }
      else
      {
        // open new dump file
        if (dump_file_size >= dump_file_max_size || dump_fd < 0)
        {
          if (dump_fd > 0)
          {
            close(dump_fd);
          }

          char name[TAIR_MAX_PATH_LEN];
          snprintf(name, sizeof(name), "%s.%d", dump_file, dump_file_index);
          // open dump file
          dump_fd = open(name, O_RDWR|O_CREAT|O_TRUNC, 0444);
          if (dump_fd <= 0)
          {
            fprintf(stderr, "open dump file fail, file: %s, error: %s\n", name, strerror(errno));
            ret = 1;
            break;
          }
          dump_file_size = 0;
          dump_file_index++;
        }

        // appropriate size
        size = ldb_key.key_size() + ldb_item.value_size() + 3*sizeof(int32_t);
        if (size < BUF_SIZE)
        {
          if (size > buf_remain)
          {
            if (write(dump_fd, buf, BUF_SIZE - buf_remain) != (BUF_SIZE - buf_remain))
            {
              fprintf(stderr, "write file fail: %s\n", strerror(errno));
              ret = 1;
            }
            dump_file_size += (BUF_SIZE - buf_remain);
            buf_remain = BUF_SIZE;
          }

          size = encode_ldb_kv(buf + (BUF_SIZE - buf_remain), ldb_key, ldb_item);
          buf_remain -= size;
        }
        else                    // big data
        {
          char* tmp_buf = new char[size];
          size = encode_ldb_kv(tmp_buf, ldb_key, ldb_item);
          if (write(dump_fd, tmp_buf, size) != size)
          {
            fprintf(stderr, "write file fail: %s\n", strerror(errno));
            ret = 1;
          }
          delete [] tmp_buf;
          dump_file_size += size;
        }
      }

      // update stat
      stat.update(bucket, skip_in_bucket ? -1 : area, // skip in bucket, then no area to update
                  ldb_key.key_size() + ldb_item.value_size(), (skip_in_bucket || skip_in_area), ret == 0);
    }

    if (ret != 0)
    {
      break;
    }

    // only dump bucket stat
    stat.dump(bucket, -1);
  }

  // last data
  if (ret == 0 && buf_remain != BUF_SIZE)
  {
    if (write(dump_fd, buf, BUF_SIZE - buf_remain) != (BUF_SIZE - buf_remain))
    {
      fprintf(stderr, "write file fail: %s\n", strerror(errno));
      ret = 1;
    }
  }
  if (dump_fd > 0)
  {
    close(dump_fd);
  }

  // cleanup
  delete [] buf;

  if (db_it != NULL)
  {
    delete db_it;
  }
  if (db != NULL)
  {
    delete db;
    delete open_options.comparator;
    delete open_options.env;
    delete open_options.info_log;
  }

  stat.dump_all();

  return ret;
}