示例#1
0
int main(int argc, char** argv) {
    if (argc != 7){
        cout<<"option: name nData mean0 str0 mean1 str1"<<endl;
        return 0;
    }
    int dim = 2;
    const char* path = argv[1];
    int nData = atoi(argv[2]);
    Dtype mean_0 = atof(argv[3]);
    Dtype str_0 = atof(argv[4]);
    Dtype mean_1 = atof(argv[5]);
    Dtype str_1 = atof(argv[6]);

    // generate data from two 2-d gaussians
    Blob<Dtype>* const blob_data = new Blob<Dtype>(nData, dim, 1, 1);
    caffe_rng_gaussian<Dtype>(nData/2*dim, mean_0, str_0, blob_data->mutable_cpu_data());
    caffe_rng_gaussian<Dtype>(nData/2*dim, mean_1, str_1, blob_data->mutable_cpu_data()+blob_data->offset(nData/2,0,0,0));

    // open leveldb
    leveldb::DB* db;
    leveldb::Options options;
    options.error_if_exists = true;
    options.create_if_missing = true;
    options.write_buffer_size = 268435456;
    leveldb::WriteBatch* batch = NULL;
    leveldb::Status status = leveldb::DB::Open(options, path, &db);
    CHECK(status.ok()) << "Failed to open leveldb " << path
                       << ". Is it already existing?";
    batch = new leveldb::WriteBatch();

    // save to db
    const int kMaxKeyLength = 10;
    char key_cstr[kMaxKeyLength];

    for (int item_id = 0; item_id < nData; ++item_id) {
        Datum datum;
        datum.set_channels(dim);
        datum.set_height(1);
        datum.set_width(1);

        datum.mutable_float_data()->Reserve(dim);
        for(int k=0;k<2;k++){
            datum.add_float_data(blob_data->cpu_data()[blob_data->offset(item_id,k,0,0)]);
        }

        if(item_id < nData/2)  datum.set_label(0);
        else datum.set_label(1);

        snprintf(key_cstr, kMaxKeyLength, "%08d", item_id);
        string keystr(key_cstr);
        batch->Put(keystr, datum.SerializeAsString());
    }

    db->Write(leveldb::WriteOptions(), batch);
    delete batch;
    delete db;

    return 0;
}
void convert_dataset(string phrase) {
  leveldb::DB* db;
  leveldb::Options options;
  options.error_if_exists = true;
  options.create_if_missing = true;
  options.write_buffer_size = 268435456;
  leveldb::WriteBatch* batch = NULL;
  string db_path = ".//examples//language_model//lm_" + phrase + "_leveldb";
  leveldb::Status status = leveldb::DB::Open(
      options, db_path, &db);

  batch = new leveldb::WriteBatch();
  const int kMaxKeyLength = 10;
  char key_cstr[kMaxKeyLength];
  string value;
  Datum datum;
  datum.set_channels(2*maximum_length);
  datum.set_height(1);
  datum.set_width(1);

  for (int i=0;i<2*maximum_length;i++)
    datum.add_float_data(0.0);


  string tmp = ".//data//language_model//"+phrase+"_indices.txt";
  std::ifstream infile(tmp);
  string s;
  int item_id = 0;
  while (getline(infile,s)){
    std::vector<float> dt,real_dt;
	  std::istringstream iss(s);
	  int num;
	  while (iss >> num){
		  if (num >= unknown_symbol)
			  num = unknown_symbol;
		  dt.push_back(num);
    }
    if (dt.size()<maximum_length){
      int l = maximum_length-dt.size();
      for (int i=0;i<l;i++)
        dt.push_back(zero_symbol);
    }
    real_dt.push_back(zero_symbol);
    for (int i=0;i<dt.size()-1;i++)
      real_dt.push_back(dt[i]);
    for (int i=0;i<dt.size();i++)
      real_dt.push_back(dt[i]);
    _snprintf(key_cstr, kMaxKeyLength, "%08d", item_id);
    string keystr(key_cstr);
    for (int i=0;i<2*maximum_length;i++)
      datum.set_float_data(i,real_dt[i]);
    datum.SerializeToString(&value);
    batch->Put(keystr, value);
    item_id++;
  }
  db->Write(leveldb::WriteOptions(), batch);
  delete batch;
}
示例#3
0
int feature_extraction_pipeline(int argc, char** argv) {
  ::google::InitGoogleLogging(argv[0]);
  const int num_required_args = 7;
  if (argc < num_required_args) {
    LOG(ERROR)<<
    "This program takes in a trained network and an input data layer, and then"
    " extract features of the input data produced by the net.\n"
    "Usage: extract_features  pretrained_net_param"
    "  feature_extraction_proto_file  extract_feature_blob_name1[,name2,...]"
    "  save_feature_dataset_name1[,name2,...]  num_mini_batches  db_type"
    "  [CPU/GPU] [DEVICE_ID=0]\n"
    "Note: you can extract multiple features in one pass by specifying"
    " multiple feature blob names and dataset names seperated by ','."
    " The names cannot contain white space characters and the number of blobs"
    " and datasets must be equal.";
    return 1;
  }
  int arg_pos = num_required_args;

  arg_pos = num_required_args;
  if (argc > arg_pos && strcmp(argv[arg_pos], "GPU") == 0) {
    LOG(ERROR)<< "Using GPU";
    uint device_id = 0;
    if (argc > arg_pos + 1) {
      device_id = atoi(argv[arg_pos + 1]);
      CHECK_GE(device_id, 0);
    }
    LOG(ERROR) << "Using Device_id=" << device_id;
    Caffe::SetDevice(device_id);
    Caffe::set_mode(Caffe::GPU);
  } else {
    LOG(ERROR) << "Using CPU";
    Caffe::set_mode(Caffe::CPU);
  }

  arg_pos = 0;  // the name of the executable
  std::string pretrained_binary_proto(argv[++arg_pos]);

  // Expected prototxt contains at least one data layer such as
  //  the layer data_layer_name and one feature blob such as the
  //  fc7 top blob to extract features.
  /*
   layers {
     name: "data_layer_name"
     type: DATA
     data_param {
       source: "/path/to/your/images/to/extract/feature/images_leveldb"
       mean_file: "/path/to/your/image_mean.binaryproto"
       batch_size: 128
       crop_size: 227
       mirror: false
     }
     top: "data_blob_name"
     top: "label_blob_name"
   }
   layers {
     name: "drop7"
     type: DROPOUT
     dropout_param {
       dropout_ratio: 0.5
     }
     bottom: "fc7"
     top: "fc7"
   }
   */
  std::string feature_extraction_proto(argv[++arg_pos]);
  shared_ptr<Net<Dtype> > feature_extraction_net(
      new Net<Dtype>(feature_extraction_proto, caffe::TEST));
  feature_extraction_net->CopyTrainedLayersFrom(pretrained_binary_proto);

  std::string extract_feature_blob_names(argv[++arg_pos]);
  std::vector<std::string> blob_names;
  boost::split(blob_names, extract_feature_blob_names, boost::is_any_of(","));

  std::string save_feature_dataset_names(argv[++arg_pos]);
  std::vector<std::string> dataset_names;
  boost::split(dataset_names, save_feature_dataset_names,
               boost::is_any_of(","));
  CHECK_EQ(blob_names.size(), dataset_names.size()) <<
      " the number of blob names and dataset names must be equal";
  size_t num_features = blob_names.size();

  for (size_t i = 0; i < num_features; i++) {
    CHECK(feature_extraction_net->has_blob(blob_names[i]))
        << "Unknown feature blob name " << blob_names[i]
        << " in the network " << feature_extraction_proto;
  }

  int num_mini_batches = atoi(argv[++arg_pos]);

  std::vector<shared_ptr<db::DB> > feature_dbs;
  std::vector<shared_ptr<db::Transaction> > txns;
  const char* db_type = argv[++arg_pos];
  for (size_t i = 0; i < num_features; ++i) {
    LOG(INFO)<< "Opening dataset " << dataset_names[i];
    shared_ptr<db::DB> db(db::GetDB(db_type));
    db->Open(dataset_names.at(i), db::NEW);
    feature_dbs.push_back(db);
    shared_ptr<db::Transaction> txn(db->NewTransaction());
    txns.push_back(txn);
  }

  LOG(ERROR)<< "Extacting Features";

  Datum datum;
  const int kMaxKeyStrLength = 100;
  char key_str[kMaxKeyStrLength];
  std::vector<Blob<float>*> input_vec;
  std::vector<int> image_indices(num_features, 0);
  for (int batch_index = 0; batch_index < num_mini_batches; ++batch_index) {
    feature_extraction_net->Forward(input_vec);
    for (int i = 0; i < num_features; ++i) {
      const shared_ptr<Blob<Dtype> > feature_blob = feature_extraction_net
          ->blob_by_name(blob_names[i]);
      int batch_size = feature_blob->num();
      int dim_features = feature_blob->count() / batch_size;
      const Dtype* feature_blob_data;
      for (int n = 0; n < batch_size; ++n) {
        datum.set_height(feature_blob->height());
        datum.set_width(feature_blob->width());
        datum.set_channels(feature_blob->channels());
        datum.clear_data();
        datum.clear_float_data();
        feature_blob_data = feature_blob->cpu_data() +
            feature_blob->offset(n);
        for (int d = 0; d < dim_features; ++d) {
          datum.add_float_data(feature_blob_data[d]);
        }
        // int length = snprintf(key_str, kMaxKeyStrLength, "%08d",
        int length = snprintf(key_str, kMaxKeyStrLength, "%010d",
            image_indices[i]);
        string out;
        CHECK(datum.SerializeToString(&out));
        txns.at(i)->Put(std::string(key_str, length), out);
        ++image_indices[i];
        if (image_indices[i] % 1000 == 0) {
          txns.at(i)->Commit();
          txns.at(i).reset(feature_dbs.at(i)->NewTransaction());
          LOG(ERROR)<< "Extracted features of " << image_indices[i] <<
              " query images for feature blob " << blob_names[i];
        }
      }  // for (int n = 0; n < batch_size; ++n)
    }  // for (int i = 0; i < num_features; ++i)
  }  // for (int batch_index = 0; batch_index < num_mini_batches; ++batch_index)
  // write the last batch
  for (int i = 0; i < num_features; ++i) {
    if (image_indices[i] % 1000 != 0) {
      txns.at(i)->Commit();
    }
    LOG(ERROR)<< "Extracted features of " << image_indices[i] <<
        " query images for feature blob " << blob_names[i];
    feature_dbs.at(i)->Close();
  }

  LOG(ERROR)<< "Successfully extracted the features!";
  return 0;
}
示例#4
0
void FeatureExtractor<Dtype>::ExtractFeatures(const NetParameter& net_param) {
  util::Context& context = util::Context::get_instance();
  int client_id = context.get_int32("client_id");
  string weights_path = context.get_string("weights");
  string extract_feature_blob_names 
      = context.get_string("extract_feature_blob_names");

  shared_ptr<Net<Dtype> > feature_extraction_net(
      new Net<Dtype>(net_param, thread_id_, 0));
  map<string, vector<int> >::const_iterator it 
      = layer_blobs_global_idx_ptr_->begin();
  for (; it != layer_blobs_global_idx_ptr_->end(); ++it) {
    const shared_ptr<Layer<Dtype> > layer 
        = feature_extraction_net->layer_by_name(it->first);
    layer->SetUpBlobGlobalTable(it->second, false, false);
  }
  if (client_id == 0 && thread_id_ == 0) {
    LOG(INFO) << "Extracting features by " << weights_path;
    feature_extraction_net->CopyTrainedLayersFrom(weights_path, true);
  } 
  petuum::PSTableGroup::GlobalBarrier();

  feature_extraction_net->SyncWithPS(0);

  vector<string> blob_names;
  boost::split(blob_names, extract_feature_blob_names, boost::is_any_of(","));

  string save_feature_leveldb_names  
      = context.get_string("save_feature_leveldb_names");
  vector<string> leveldb_names;
  boost::split(leveldb_names, save_feature_leveldb_names,
               boost::is_any_of(","));
  CHECK_EQ(blob_names.size(), leveldb_names.size()) <<
      " the number of blob names and leveldb names must be equal";
  size_t num_features = blob_names.size();

  for (size_t i = 0; i < num_features; i++) {
    CHECK(feature_extraction_net->has_blob(blob_names[i]))
        << "Unknown feature blob name " << blob_names[i]
        << " in the network ";
  } 
  CHECK(feature_extraction_net->has_blob("label"))
      << "Fail to find label blob in the network ";

  // Differentiate leveldb names
  std::ostringstream suffix;
  suffix  << "_" << client_id << "_" << thread_id_;
  for (size_t i = 0; i < num_features; i++) {
      leveldb_names[i] = leveldb_names[i] + suffix.str();
  }
  
  leveldb::Options options;
  options.error_if_exists = true;
  options.create_if_missing = true;
  options.write_buffer_size = 268435456;
  vector<shared_ptr<leveldb::DB> > feature_dbs;
  for (size_t i = 0; i < num_features; ++i) {
    leveldb::DB* db;
    leveldb::Status status = leveldb::DB::Open(options,
                                               leveldb_names[i].c_str(),
                                               &db);
    CHECK(status.ok()) << "Failed to open leveldb " << leveldb_names[i];
    feature_dbs.push_back(shared_ptr<leveldb::DB>(db));
  }

  int num_mini_batches = context.get_int32("num_mini_batches");
 
  Datum datum;
  vector<shared_ptr<leveldb::WriteBatch> > feature_batches(
      num_features,
      shared_ptr<leveldb::WriteBatch>(new leveldb::WriteBatch()));
  const int kMaxKeyStrLength = 100;
  char key_str[kMaxKeyStrLength];
  vector<Blob<float>*> input_vec;
  vector<int> image_indices(num_features, 0);
  for (int batch_index = 0; batch_index < num_mini_batches; ++batch_index) {
    feature_extraction_net->Forward(input_vec);
    for (int i = 0; i < num_features; ++i) {
      const shared_ptr<Blob<Dtype> > feature_blob 
          = feature_extraction_net->blob_by_name(blob_names[i]);
      const shared_ptr<Blob<Dtype> > label_blob
          = feature_extraction_net->blob_by_name("label");
      const Dtype* labels = label_blob->cpu_data(); 
      int batch_size = feature_blob->num();
      int dim_features = feature_blob->count() / batch_size;
      Dtype* feature_blob_data;
      for (int n = 0; n < batch_size; ++n) {
        datum.set_height(dim_features);
        datum.set_width(1);
        datum.set_channels(1);
        datum.clear_data();
        datum.clear_float_data();
        feature_blob_data = feature_blob->mutable_cpu_data() +
            feature_blob->offset(n);
        for (int d = 0; d < dim_features; ++d) {
          datum.add_float_data(feature_blob_data[d]);
        }
        datum.set_label(static_cast<int>(labels[n]));

        string value;
        datum.SerializeToString(&value);
        snprintf(key_str, kMaxKeyStrLength, "%d", image_indices[i]);
        feature_batches[i]->Put(string(key_str), value);
        ++image_indices[i];
        if (image_indices[i] % 1000 == 0) {
          feature_dbs[i]->Write(leveldb::WriteOptions(),
                                feature_batches[i].get());
          feature_batches[i].reset(new leveldb::WriteBatch());
        }
      }  // for (int n = 0; n < batch_size; ++n)
    }  // for (int i = 0; i < num_features; ++i)
  }  // for (int batch_index = 0; batch_index < num_mini_batches; ++batch_index)
  // write the last batch
  for (int i = 0; i < num_features; ++i) {
    if (image_indices[i] % 1000 != 0) {
      feature_dbs[i]->Write(leveldb::WriteOptions(), feature_batches[i].get());
    }
  }
}
int feature_extraction_pipeline(int argc, char** argv) {
  ::google::InitGoogleLogging(argv[0]);
  const int num_required_args = 6;
  if (argc < num_required_args) {
    LOG(ERROR)<<
    "This program takes in a trained network and an input data layer, and then"
    " extract features of the input data produced by the net.\n"
    "Usage: extract_features  pretrained_net_param"
    "  feature_extraction_proto_file  extract_feature_blob_name1[,name2,...]"
    "  save_feature_leveldb_name1[,name2,...]  num_mini_batches  [CPU/GPU]"
    "  [DEVICE_ID=0]\n"
    "Note: you can extract multiple features in one pass by specifying"
    " multiple feature blob names and leveldb names seperated by ','."
    " The names cannot contain white space characters and the number of blobs"
    " and leveldbs must be equal.";
    return 1;
  }
  int arg_pos = num_required_args;

  arg_pos = num_required_args;
  if (argc > arg_pos && strcmp(argv[arg_pos], "GPU") == 0) {
    LOG(ERROR)<< "Using GPU";
    uint device_id = 0;
    if (argc > arg_pos + 1) {
      device_id = atoi(argv[arg_pos + 1]);
      CHECK_GE(device_id, 0);
    }
    LOG(ERROR) << "Using Device_id=" << device_id;
    Caffe::SetDevice(device_id);
    Caffe::set_mode(Caffe::GPU);
  } else {
    LOG(ERROR) << "Using CPU";
    Caffe::set_mode(Caffe::CPU);
  }
  Caffe::set_phase(Caffe::TEST);

  arg_pos = 0;  // the name of the executable
  string pretrained_binary_proto(argv[++arg_pos]);

  // Expected prototxt contains at least one data layer such as
  //  the layer data_layer_name and one feature blob such as the
  //  fc7 top blob to extract features.
  /*
   layers {
     name: "data_layer_name"
     type: DATA
     data_param {
       source: "/path/to/your/images/to/extract/feature/images_leveldb"
       mean_file: "/path/to/your/image_mean.binaryproto"
       batch_size: 128
       crop_size: 227
       mirror: false
     }
     top: "data_blob_name"
     top: "label_blob_name"
   }
   layers {
     name: "drop7"
     type: DROPOUT
     dropout_param {
       dropout_ratio: 0.5
     }
     bottom: "fc7"
     top: "fc7"
   }
   */
  string feature_extraction_proto(argv[++arg_pos]);
  shared_ptr<Net<Dtype> > feature_extraction_net(
      new Net<Dtype>(feature_extraction_proto));
  feature_extraction_net->CopyTrainedLayersFrom(pretrained_binary_proto);

  string extract_feature_blob_names(argv[++arg_pos]);
  vector<string> blob_names;
  boost::split(blob_names, extract_feature_blob_names, boost::is_any_of(","));

  string save_feature_leveldb_names(argv[++arg_pos]);
  vector<string> leveldb_names;
  boost::split(leveldb_names, save_feature_leveldb_names,
               boost::is_any_of(","));
  CHECK_EQ(blob_names.size(), leveldb_names.size()) <<
      " the number of blob names and leveldb names must be equal";
  size_t num_features = blob_names.size();

  for (size_t i = 0; i < num_features; i++) {
    CHECK(feature_extraction_net->has_blob(blob_names[i]))
        << "Unknown feature blob name " << blob_names[i]
        << " in the network " << feature_extraction_proto;
  }

  leveldb::Options options;
  options.error_if_exists = true;
  options.create_if_missing = true;
  options.write_buffer_size = 268435456;
  vector<shared_ptr<leveldb::DB> > feature_dbs;
  for (size_t i = 0; i < num_features; ++i) {
    LOG(INFO)<< "Opening leveldb " << leveldb_names[i];
    leveldb::DB* db;
    leveldb::Status status = leveldb::DB::Open(options,
                                               leveldb_names[i].c_str(),
                                               &db);
    CHECK(status.ok()) << "Failed to open leveldb " << leveldb_names[i];
    feature_dbs.push_back(shared_ptr<leveldb::DB>(db));
  }

  int num_mini_batches = atoi(argv[++arg_pos]);

  LOG(ERROR)<< "Extracting Features";

  Datum datum;
  vector<shared_ptr<leveldb::WriteBatch> > feature_batches(
      num_features,
      shared_ptr<leveldb::WriteBatch>(new leveldb::WriteBatch()));
  const int kMaxKeyStrLength = 100;
  char key_str[kMaxKeyStrLength];
  vector<Blob<float>*> input_vec;
  vector<int> image_indices(num_features, 0);
  for (int batch_index = 0; batch_index < num_mini_batches; ++batch_index) {
    feature_extraction_net->Forward(input_vec);
    for (int i = 0; i < num_features; ++i) {
      const shared_ptr<Blob<Dtype> > feature_blob = feature_extraction_net
          ->blob_by_name(blob_names[i]);
      int batch_size = feature_blob->num();
      int dim_features = feature_blob->count() / batch_size;
      Dtype* feature_blob_data;
      for (int n = 0; n < batch_size; ++n) {
        datum.set_height(dim_features);
        datum.set_width(1);
        datum.set_channels(1);
        datum.clear_data();
        datum.clear_float_data();
        feature_blob_data = feature_blob->mutable_cpu_data() +
            feature_blob->offset(n);
        for (int d = 0; d < dim_features; ++d) {
          datum.add_float_data(feature_blob_data[d]);
        }
        string value;
        datum.SerializeToString(&value);
        snprintf(key_str, kMaxKeyStrLength, "%d", image_indices[i]);
        feature_batches[i]->Put(string(key_str), value);
        ++image_indices[i];
        if (image_indices[i] % 1000 == 0) {
          feature_dbs[i]->Write(leveldb::WriteOptions(),
                                feature_batches[i].get());
          LOG(ERROR)<< "Extracted features of " << image_indices[i] <<
              " query images for feature blob " << blob_names[i];
          feature_batches[i].reset(new leveldb::WriteBatch());
        }
      }  // for (int n = 0; n < batch_size; ++n)
    }  // for (int i = 0; i < num_features; ++i)
  }  // for (int batch_index = 0; batch_index < num_mini_batches; ++batch_index)
  // write the last batch
  for (int i = 0; i < num_features; ++i) {
    if (image_indices[i] % 1000 != 0) {
      feature_dbs[i]->Write(leveldb::WriteOptions(), feature_batches[i].get());
    }
    LOG(ERROR)<< "Extracted features of " << image_indices[i] <<
        " query images for feature blob " << blob_names[i];
  }

  LOG(ERROR)<< "Successfully extracted the features!";
  return 0;
}