void StoreQueue::configureInline(pStoreConf configuration) {
    // Constructor defaults are fine if these don't exist
    configuration->getUnsigned("target_write_size", (unsigned long&) targetWriteSize);
    configuration->getUnsigned("max_write_interval", (unsigned long&) maxWriteInterval);

    store->configure(configuration);
}
Example #2
0
// Configures the store specified by the store configuration. Returns false if failed.
bool scribeHandler::configureStore(pStoreConf store_conf, int *numstores) {
  string category;
  shared_ptr<StoreQueue> pstore;
  vector<string> category_list;
  shared_ptr<StoreQueue> model;
  bool single_category = true;


  // Check if a single category is specified
  if (store_conf->getString("category", category)) {
    category_list.push_back(category);
  }

  // Check if multiple categories are specified
  string categories;
  if (store_conf->getString("categories", categories)) {
    // We want to set up to configure multiple categories, even if there is
    // only one category specified here so that configuration is consistent
    // for the 'categories' keyword.
    single_category = false;

    // Parse category names, separated by whitespace
    stringstream ss(categories);

    while (ss >> category) {
      category_list.push_back(category);
    }
  }
Example #3
0
void StoreQueue::configureInline(pStoreConf configuration) {
  // Constructor defaults are fine if these don't exist
  configuration->getUnsignedLongLong("target_write_size", targetWriteSize);
  configuration->getUnsigned("max_write_interval",
                            (unsigned long&) maxWriteInterval);
  if (maxWriteInterval == 0) {
    maxWriteInterval = 1;
  }

  string tmp;
  if (configuration->getString("must_succeed", tmp) && tmp == "no") {
    mustSucceed = false;
  }

  store->configure(configuration, pStoreConf());
}
void HdfsSync::configure(pStoreConf configuration) {

  FileStore::configure(configuration);

  // Hdfs Sync doesn't use chunk file
  rollPeriod = ROLL_NEVER;
  chunkSize = 0;

  unsigned long inttemp = 0;
  configuration->getUnsigned("add_newlines", inttemp);
  addNewlines = inttemp ? true : false;

  lastSyncTime = time (NULL);

  configuration->getUnsigned("period_length", periodLength);  
 
  configuration->getString("hdfs_dir", hdfsDir);  
  if(configuration->getString("hdfs_base_filename", hdfs_base_filename)) {
    hdfs_base_filename.append("_");
  }
  if(configuration->getString("hdfs_base_directory", hdfs_base_directory)) {
    hdfs_base_directory.append("/");
  }
  
  // Parse HDFS information
  char* hostport = (char *)malloc(hdfsDir.length()+1);
  char* buf;
  char* portStr;
  char* host;

  int ret = 0;

  ret = sscanf(hdfsDir.c_str(), "hdfs://%s", hostport);
  host = strtok_r(hostport, ":", &buf);
  portStr = strtok_r(NULL, "/", &buf);

  ret = sscanf(portStr, "%d", &hdfsPort);
  hdfsHost.append(host);
  hdfsPath.append(buf);
  free(hostport);  
}
Example #5
0
void CassandraStore::configure(pStoreConf configuration, pStoreConf parent) {
    Store::configure(configuration, parent);
    // Error checking is done on open()
    if (!configuration->getString("remote_host", remoteHost)) {
        LOG_OPER("[%s] Bad Config - remote_host not set", categoryHandled.c_str());
    }

    if (!configuration->getInt("remote_port", remotePort)) {
        remotePort = DEFAULT_CASSANDRA_PORT;
    }

    if (!configuration->getInt("timeout", timeout)) {
        timeout = DEFAULT_SOCKET_TIMEOUT_MS;
    }

    configuration->getBool("category_as_cf_name", categoryAsCfName);

    if (!configuration->getString("keyspace", keyspace)) {
        LOG_OPER("[%s] Bad Config - Keyspace not set", categoryHandled.c_str());
    }

    if (configuration->getString("column_family", columnFamily)
            && categoryAsCfName) {
        LOG_OPER("[%s] Bad Config - category_is_cf_name = 'yes' and column_family set", categoryHandled.c_str());
    }

    string consistencyLevel_;
    if (configuration->getString("consistency_level", consistencyLevel_)) {
        if (0 == consistencyLevel_.compare("one")) {
            consistencyLevel = org::apache::cassandra::ConsistencyLevel::ONE;
        } else if (0 == consistencyLevel_.compare("quorum")) {
            consistencyLevel = org::apache::cassandra::ConsistencyLevel::QUORUM;
        } else if (0 == consistencyLevel_.compare("local_quorum")) {
            consistencyLevel = org::apache::cassandra::ConsistencyLevel::LOCAL_QUORUM;
        } else if (0 == consistencyLevel_.compare("each_quorum")) {
            consistencyLevel = org::apache::cassandra::ConsistencyLevel::EACH_QUORUM;
        } else if (0 == consistencyLevel_.compare("all")) {
            consistencyLevel = org::apache::cassandra::ConsistencyLevel::ALL;
        } else if (0 == consistencyLevel_.compare("any")) {
            consistencyLevel = org::apache::cassandra::ConsistencyLevel::ANY;
        } else {
            LOG_OPER("[%s] [cassandra] unknown Consistency Level <%s> assuming QUORUM", categoryHandled.c_str(), consistencyLevel_.c_str());
            consistencyLevel = org::apache::cassandra::ConsistencyLevel::QUORUM;
        }
    } else {
        LOG_OPER("[%s] [cassandra] unknown Consistency Level <%s> assuming QUORUM", categoryHandled.c_str(), consistencyLevel_.c_str());
        consistencyLevel = org::apache::cassandra::ConsistencyLevel::QUORUM;
    }
}
Example #6
0
// Given a single bucket definition, create multiple buckets
void BucketStore::createBucketsFromBucket(pStoreConf configuration,
					  pStoreConf bucket_conf) {
  string error_msg, bucket_subdir, type, path, failure_bucket;
  bool needs_bucket_subdir = false;
  unsigned long bucket_offset = 0;
  pStoreConf tmp;

  // check for extra bucket definitions
  if (configuration->getStore("bucket0", tmp) ||
      configuration->getStore("bucket1", tmp)) {
    error_msg = "bucket store has too many buckets defined";
    goto handle_error;
  }

  bucket_conf->getString("type", type);
  if (type != "file" && type != "thriftfile") {
    error_msg = "store contained in a bucket store must have a type of ";
    error_msg += "either file or thriftfile if not defined explicitely";
    goto handle_error;
  }

  needs_bucket_subdir = true;
  if (!configuration->getString("bucket_subdir", bucket_subdir)) {
    error_msg =
      "bucketizer containing file stores must have a bucket_subdir";
    goto handle_error;
  }
  if (!bucket_conf->getString("file_path", path)) {
    error_msg =
      "file store contained by bucketizer must have a file_path";
    goto handle_error;
  }

  // set starting bucket number if specified
  configuration->getUnsigned("bucket_offset", bucket_offset);

  // check if failure bucket was given a different name
  configuration->getString("failure_bucket", failure_bucket);

  // We actually create numBuckets + 1 stores. Messages are normally
  // hashed into buckets 1 through numBuckets, and messages that can't
  // be hashed are put in bucket 0.

  for (unsigned int i = 0; i <= numBuckets; ++i) {

    shared_ptr<Store> newstore =
      createStore(type, categoryHandled, false, multiCategory);

    if (!newstore) {
      error_msg = "can't create store of type: ";
      error_msg += type;
      goto handle_error;
    }

    // For file/thrift file buckets, create unique filepath for each bucket
    if (needs_bucket_subdir) {
      if (i == 0 && !failure_bucket.empty()) {
        bucket_conf->setString("file_path", path + '/' + failure_bucket);
      } else {
        // the bucket number is appended to the file path
        unsigned int bucket_id = i + bucket_offset;

        ostringstream oss;
        oss << path << '/' << bucket_subdir << setw(3) << setfill('0')
            << bucket_id;
        bucket_conf->setString("file_path", oss.str());
      }
    }

    buckets.push_back(newstore);
    newstore->configure(bucket_conf);
  }

  return;

handle_error:
  setStatus(error_msg);
  LOG_OPER("[%s] Bad config - %s", categoryHandled.c_str(),
           error_msg.c_str());
  numBuckets = 0;
  buckets.clear();
}
Example #7
0
/**
   * Buckets in a bucket store can be defined explicitly or implicitly:
   *
   * #Explicitly
   * <store>
   *   type=bucket
   *   num_buckets=2
   *   bucket_type=key_hash
   *
   *   <bucket0>
   *     ...
   *   </bucket0>
   *
   *   <bucket1>
   *     ...
   *   </bucket1>
   *
   *   <bucket2>
   *     ...
   *   </bucket2>
   * </store>
   *
   * #Implicitly
   * <store>
   *   type=bucket
   *   num_buckets=2
   *   bucket_type=key_hash
   *
   *   <bucket>
   *     ...
   *   </bucket>
   * </store>
   */
void BucketStore::configure(pStoreConf configuration) {

  string error_msg, bucketizer_str, remove_key_str;
  unsigned long delim_long = 0;
  pStoreConf bucket_conf;
  //set this to true for bucket types that have a delimiter
  bool need_delimiter = false;

  configuration->getString("bucket_type", bucketizer_str);

  // Figure out th bucket type from the bucketizer string
  if (0 == bucketizer_str.compare("context_log")) {
    bucketType = context_log;
  } else if (0 == bucketizer_str.compare("random")) {
      bucketType = random;
  } else if (0 == bucketizer_str.compare("key_hash")) {
    bucketType = key_hash;
    need_delimiter = true;
  } else if (0 == bucketizer_str.compare("key_modulo")) {
    bucketType = key_modulo;
    need_delimiter = true;
  } else if (0 == bucketizer_str.compare("key_range")) {
    bucketType = key_range;
    need_delimiter = true;
    configuration->getUnsigned("bucket_range", bucketRange);

    if (bucketRange == 0) {
      LOG_OPER("[%s] config warning - bucket_range is 0",
               categoryHandled.c_str());
    }
  }

  // This is either a key_hash or key_modulo, not context log, figure out the delimiter and store it
  if (need_delimiter) {
    configuration->getUnsigned("delimiter", delim_long);
    if (delim_long > 255) {
      LOG_OPER("[%s] config warning - delimiter is too large to fit in a char, using default", categoryHandled.c_str());
      delimiter = DEFAULT_BUCKETSTORE_DELIMITER;
    } else if (delim_long == 0) {
      LOG_OPER("[%s] config warning - delimiter is zero, using default", categoryHandled.c_str());
      delimiter = DEFAULT_BUCKETSTORE_DELIMITER;
    } else {
      delimiter = (char)delim_long;
    }
  }

  // Optionally remove the key and delimiter of each message before bucketizing
  configuration->getString("remove_key", remove_key_str);
  if (remove_key_str == "yes") {
    removeKey = true;

    if (bucketType == context_log) {
      error_msg =
        "Bad config - bucketizer store of type context_log do not support remove_key";
      goto handle_error;
    }
  }

  if (!configuration->getUnsigned("num_buckets", numBuckets)) {
    error_msg = "Bad config - bucket store must have num_buckets";
    goto handle_error;
  }

  // Buckets can be defined explicitely or by specifying a single "bucket"
  if (configuration->getStore("bucket", bucket_conf)) {
    createBucketsFromBucket(configuration, bucket_conf);
  } else {
    createBuckets(configuration);
  }

  return;

handle_error:
  setStatus(error_msg);
  LOG_OPER("[%s] %s", categoryHandled.c_str(), error_msg.c_str());
  numBuckets = 0;
  buckets.clear();
}
Example #8
0
// Checks for a bucket definition for every bucket from 0 to numBuckets
// and configures each bucket
void BucketStore::createBuckets(pStoreConf configuration) {
  string error_msg, tmp_string;
  pStoreConf tmp;
  unsigned long i;

  if (configuration->getString("bucket_subdir", tmp_string)) {
    error_msg =
      "cannot have bucket_subdir when defining multiple buckets";
      goto handle_error;
  }

  if (configuration->getString("bucket_offset", tmp_string)) {
    error_msg =
      "cannot have bucket_offset when defining multiple buckets";
      goto handle_error;
  }

  if (configuration->getString("failure_bucket", tmp_string)) {
    error_msg =
      "cannot have failure_bucket when defining multiple buckets";
      goto handle_error;
  }

  // Configure stores named 'bucket0, bucket1, bucket2, ... bucket{numBuckets}
  for (i = 0; i <= numBuckets; i++) {
    pStoreConf   bucket_conf;
    string       type, bucket_name;
    stringstream ss;

    ss << "bucket" << i;
    bucket_name = ss.str();

    if (!configuration->getStore(bucket_name, bucket_conf)) {
      error_msg = "could not find bucket definition for " +
	bucket_name;
      goto handle_error;
    }

    if (!bucket_conf->getString("type", type)) {
      error_msg =
	"store contained in a bucket store must have a type";
      goto handle_error;
    }

    shared_ptr<Store> bucket =
      createStore(type, categoryHandled, false, multiCategory);

    buckets.push_back(bucket);
    bucket->configure(bucket_conf);
  }

  // Check if an extra bucket is defined
  if (configuration->getStore("bucket" + (numBuckets + 1), tmp)) {
    error_msg = "bucket store has too many buckets defined";
    goto handle_error;
  }

  return;

handle_error:
  setStatus(error_msg);
  LOG_OPER("[%s] Bad config - %s", categoryHandled.c_str(),
           error_msg.c_str());
  numBuckets = 0;
  buckets.clear();
}
Example #9
0
void RedisStore::configure(pStoreConf configuration) {
  // Redis connection settings
  configuration->getString("redis_host", redisHost);
  configuration->getUnsigned("redis_port", redisPort);
}