コード例 #1
0
ファイル: ldb_rsync.cpp プロジェクト: 0x3FFFFFFF/tair
int main(int argc, char* argv[])
{
  int ret = TAIR_RETURN_SUCCESS;
  char* db_path = NULL;
  char* local_cluster_addr = NULL;
  char* remote_cluster_addr = NULL;
  char* manifest_file = NULL;
  char* fail_logger_file = NULL;
  char* buckets = NULL;
  char* yes_areas = NULL;
  char* no_areas = NULL;
  bool mtime_care = true;
  int i = 0;

  while ((i = getopt(argc, argv, "p:f:l:r:e:b:a:A:n")) != EOF)
  {
    switch (i)
    {
    case 'p':
      db_path = optarg;
      break;
    case 'f':
      manifest_file = optarg;
      break;
    case 'l':
      local_cluster_addr = optarg;
      break;
    case 'r':
      remote_cluster_addr = optarg;
      break;
    case 'e':
      fail_logger_file = optarg;
      break;
    case 'b':
      buckets = optarg;
      break;
    case 'a':
      yes_areas = optarg;
      break;
    case 'A':
      no_areas = optarg;
      break;
    case 'n':
      mtime_care = false;
      break;
    default:
      print_help(argv[0]);
      return 1;
    }
  }

  if (db_path == NULL || manifest_file == NULL || remote_cluster_addr == NULL || fail_logger_file == NULL || buckets == NULL)
  {
    print_help(argv[0]);
    return 1;
  }

  // init signals
  signal(SIGINT, sign_handler);
  signal(SIGTERM, sign_handler);

  TBSYS_LOGGER.setLogLevel("warn");

  // init local cluster handler(optional)
  ClusterHandler* local_handler = NULL;
  if (local_cluster_addr != NULL)
  {
    local_handler = new ClusterHandler();
    ret = init_cluster_handler(local_cluster_addr, *local_handler);
    if (ret != TAIR_RETURN_SUCCESS)
    {
      log_error("init local client fail, addr: %d, ret: %d", local_cluster_addr, ret);
      delete local_handler;
      return 1;
    }
  }

  // init remote cluster handler(must)
  ClusterHandler* remote_handler = new ClusterHandler();
  ret = init_cluster_handler(remote_cluster_addr, *remote_handler);
  if (ret != TAIR_RETURN_SUCCESS)
  {
    log_error("init remote client fail, addr: %s, ret: %d", remote_cluster_addr, ret);
    delete remote_handler;
    return 1;
  }

  // init buckets
  std::vector<int32_t> bucket_container;
  std::vector<std::string> bucket_strs;
  tair::util::string_util::split_str(buckets, ", ", bucket_strs);
  for (size_t i = 0; i < bucket_strs.size(); ++i)
  {
    bucket_container.push_back(atoi(bucket_strs[i].c_str()));
  }

  // init fail logger
  RecordLogger* fail_logger = new SequentialFileRecordLogger(fail_logger_file, 30<<20/*30M*/, true/*rotate*/);
  if (fail_logger->init() != TAIR_RETURN_SUCCESS)
  {
    log_error("init fail logger fail, ret: %d", ret);
  }
  else
  {
    // init data filter
    DataFilter filter(yes_areas, no_areas);
    // init data stat
    DataStat stat;

    // do data rsync
    uint32_t start_time = time(NULL);
    ret = do_rsync(db_path, manifest_file, bucket_container, local_handler, remote_handler, mtime_care, filter, stat, fail_logger);

    log_warn("rsync data over, stopped: %s, cost: %u(s), stat:", g_stop ? "yes" : "no", time(NULL) - start_time);
    stat.dump_all();
  }

  // cleanup
  delete fail_logger;
  if (local_handler != NULL)
  {
    delete local_handler;
  }
  if (remote_handler != NULL)
  {
    delete remote_handler;
  }

  return ret == TAIR_RETURN_SUCCESS ? 0 : 1;
}
コード例 #2
0
ファイル: ldb_dump.cpp プロジェクト: solitaire2015/Nair
int do_dump(const char* db_path, const char* manifest, const char* cmp_desc,
            const std::vector<int32_t>& buckets, DataFilter& filter, DataStat& stat,
            const char* dump_file, int64_t dump_file_max_size)
{
  // open db
  leveldb::Options open_options;
  leveldb::DB* db = NULL;
  leveldb::Status s = open_db_readonly(db_path, manifest, cmp_desc, open_options, db);
  if (!s.ok())
  {
    fprintf(stderr, "open db fail: %s\n", s.ToString().c_str());
    return 1;
  }

  // get db iterator
  leveldb::ReadOptions scan_options;
  scan_options.verify_checksums = false;
  scan_options.fill_cache = false;
  leveldb::Iterator* db_it = db->NewIterator(scan_options);
  char scan_key[LDB_KEY_META_SIZE];

  int32_t bucket = 0;
  int32_t area = 0;
  LdbKey ldb_key;
  LdbItem ldb_item;
  int32_t size = 0;

  bool skip_in_bucket = false;
  bool skip_in_area = false;

  int dump_fd = -1;
  int32_t dump_file_index = 1;
  int64_t dump_file_size = 0;

  static const int32_t BUF_SIZE = 2 << 20; // 2M
  char* buf = new char[BUF_SIZE];
  int32_t buf_remain = BUF_SIZE;

  int ret = 0;

  for (size_t i = 0; !g_stop && i < buckets.size(); ++i)
  {
    area = -1;
    bucket = buckets[i];
    // seek to bucket
    LdbKey::build_key_meta(scan_key, bucket);

    for (db_it->Seek(leveldb::Slice(scan_key, sizeof(scan_key))); !g_stop && db_it->Valid() && ret == 0; db_it->Next())
    {
      skip_in_bucket = false;
      skip_in_area = false;

      ldb_key.assign(const_cast<char*>(db_it->key().data()), db_it->key().size());
      ldb_item.assign(const_cast<char*>(db_it->value().data()), db_it->value().size());
      area = LdbKey::decode_area(ldb_key.key());

      // current bucket iterate over
      if (ldb_key.get_bucket_number() != bucket)
      {
        break;
      }

      // skip this data
      if (!filter.ok(area))
      {
        skip_in_bucket = true;
      }
      else
      {
        // open new dump file
        if (dump_file_size >= dump_file_max_size || dump_fd < 0)
        {
          if (dump_fd > 0)
          {
            close(dump_fd);
          }

          char name[TAIR_MAX_PATH_LEN];
          snprintf(name, sizeof(name), "%s.%d", dump_file, dump_file_index);
          // open dump file
          dump_fd = open(name, O_RDWR|O_CREAT|O_TRUNC, 0444);
          if (dump_fd <= 0)
          {
            fprintf(stderr, "open dump file fail, file: %s, error: %s\n", name, strerror(errno));
            ret = 1;
            break;
          }
          dump_file_size = 0;
          dump_file_index++;
        }

        // appropriate size
        size = ldb_key.key_size() + ldb_item.value_size() + 3*sizeof(int32_t);
        if (size < BUF_SIZE)
        {
          if (size > buf_remain)
          {
            if (write(dump_fd, buf, BUF_SIZE - buf_remain) != (BUF_SIZE - buf_remain))
            {
              fprintf(stderr, "write file fail: %s\n", strerror(errno));
              ret = 1;
            }
            dump_file_size += (BUF_SIZE - buf_remain);
            buf_remain = BUF_SIZE;
          }

          size = encode_ldb_kv(buf + (BUF_SIZE - buf_remain), ldb_key, ldb_item);
          buf_remain -= size;
        }
        else                    // big data
        {
          char* tmp_buf = new char[size];
          size = encode_ldb_kv(tmp_buf, ldb_key, ldb_item);
          if (write(dump_fd, tmp_buf, size) != size)
          {
            fprintf(stderr, "write file fail: %s\n", strerror(errno));
            ret = 1;
          }
          delete [] tmp_buf;
          dump_file_size += size;
        }
      }

      // update stat
      stat.update(bucket, skip_in_bucket ? -1 : area, // skip in bucket, then no area to update
                  ldb_key.key_size() + ldb_item.value_size(), (skip_in_bucket || skip_in_area), ret == 0);
    }

    if (ret != 0)
    {
      break;
    }

    // only dump bucket stat
    stat.dump(bucket, -1);
  }

  // last data
  if (ret == 0 && buf_remain != BUF_SIZE)
  {
    if (write(dump_fd, buf, BUF_SIZE - buf_remain) != (BUF_SIZE - buf_remain))
    {
      fprintf(stderr, "write file fail: %s\n", strerror(errno));
      ret = 1;
    }
  }
  if (dump_fd > 0)
  {
    close(dump_fd);
  }

  // cleanup
  delete [] buf;

  if (db_it != NULL)
  {
    delete db_it;
  }
  if (db != NULL)
  {
    delete db;
    delete open_options.comparator;
    delete open_options.env;
    delete open_options.info_log;
  }

  stat.dump_all();

  return ret;
}