int main(int argc, char** argv) { ::google::InitGoogleLogging(argv[0]); if (argc != 3) { LOG(ERROR) << "Usage: compute_image_mean input_leveldb output_file"; return 1; } leveldb::DB* db; leveldb::Options options; options.create_if_missing = false; LOG(INFO) << "Opening leveldb " << argv[1]; leveldb::Status status = leveldb::DB::Open( options, argv[1], &db); CHECK(status.ok()) << "Failed to open leveldb " << argv[1]; leveldb::ReadOptions read_options; read_options.fill_cache = false; leveldb::Iterator* it = db->NewIterator(read_options); it->SeekToFirst(); VolumeDatum datum; BlobProto sum_blob; int count = 0; datum.ParseFromString(it->value().ToString()); sum_blob.set_num(1); sum_blob.set_channels(datum.channels()); sum_blob.set_length(datum.length()); sum_blob.set_height(datum.height()); sum_blob.set_width(datum.width()); const int data_size = datum.channels() * datum.length() * datum.height() * datum.width(); int size_in_datum = std::max<int>(datum.data().size(), datum.float_data_size()); for (int i = 0; i < size_in_datum; ++i) { sum_blob.add_data(0.); } LOG(INFO) << "Starting Iteration"; for (it->SeekToFirst(); it->Valid(); it->Next()) { // just a dummy operation datum.ParseFromString(it->value().ToString()); const string& data = datum.data(); size_in_datum = std::max<int>(datum.data().size(), datum.float_data_size()); CHECK_EQ(size_in_datum, data_size) << "Incorrect data field size " << size_in_datum; if (data.size() != 0) { for (int i = 0; i < size_in_datum; ++i) { sum_blob.set_data(i, sum_blob.data(i) + (uint8_t)data[i]); } } else { for (int i = 0; i < size_in_datum; ++i) { sum_blob.set_data(i, sum_blob.data(i) + static_cast<float>(datum.float_data(i))); } } ++count; if (count % 10000 == 0) { LOG(ERROR) << "Processed " << count << " files."; } } if (count % 10000 != 0) { LOG(ERROR) << "Processed " << count << " files."; } for (int i = 0; i < sum_blob.data_size(); ++i) { sum_blob.set_data(i, sum_blob.data(i) / count); } // Write to disk LOG(INFO) << "Write to " << argv[2]; WriteProtoToBinaryFile(sum_blob, argv[2]); delete db; return 0; }
void VolumeDataLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) { CHECK_EQ(bottom.size(), 0) << "Data Layer takes no input blobs."; CHECK_GE(top->size(), 1) << "Data Layer takes at least one blob as output."; CHECK_LE(top->size(), 2) << "Data Layer takes at most two blobs as output."; if (top->size() == 1) { output_labels_ = false; } else { output_labels_ = true; } // Initialize the leveldb leveldb::DB* db_temp; leveldb::Options options; options.create_if_missing = false; options.max_open_files = 100; LOG(INFO) << "Opening leveldb " << this->layer_param_.data_param().source(); leveldb::Status status = leveldb::DB::Open( options, this->layer_param_.data_param().source(), &db_temp); CHECK(status.ok()) << "Failed to open leveldb " << this->layer_param_.data_param().source() << std::endl << status.ToString(); db_.reset(db_temp); iter_.reset(db_->NewIterator(leveldb::ReadOptions())); iter_->SeekToFirst(); // Check if we would need to randomly skip a few data points if (this->layer_param_.data_param().rand_skip()) { unsigned int skip = caffe_rng_rand() % this->layer_param_.data_param().rand_skip(); LOG(INFO) << "Skipping first " << skip << " data points."; while (skip-- > 0) { iter_->Next(); if (!iter_->Valid()) { iter_->SeekToFirst(); } } } // Read a data point, and use it to initialize the top blob. VolumeDatum datum; datum.ParseFromString(iter_->value().ToString()); // image int crop_size = this->layer_param_.data_param().crop_size(); if (crop_size > 0) { (*top)[0]->Reshape(this->layer_param_.data_param().batch_size(), datum.channels(), datum.length(), crop_size, crop_size); prefetch_data_.reset(new Blob<Dtype>( this->layer_param_.data_param().batch_size(), datum.channels(), datum.length(), crop_size, crop_size)); } else { (*top)[0]->Reshape( this->layer_param_.data_param().batch_size(), datum.channels(), datum.length(), datum.height(), datum.width()); prefetch_data_.reset(new Blob<Dtype>( this->layer_param_.data_param().batch_size(), datum.channels(), datum.length(), datum.height(), datum.width())); } LOG(INFO) << "output data size: " << (*top)[0]->num() << "," << (*top)[0]->channels() << "," << (*top)[0]->length() << "," << (*top)[0]->height() << "," << (*top)[0]->width(); // label if (output_labels_) { (*top)[1]->Reshape(this->layer_param_.data_param().batch_size(), 1, 1, 1, 1); prefetch_label_.reset( new Blob<Dtype>(this->layer_param_.data_param().batch_size(), 1, 1, 1, 1)); } // datum size datum_channels_ = datum.channels(); datum_length_ = datum.length(); datum_height_ = datum.height(); datum_width_ = datum.width(); datum_size_ = datum.channels() * datum.length() * datum.height() * datum.width(); CHECK_GT(datum_height_, crop_size); CHECK_GT(datum_width_, crop_size); // check if we want to have mean if (this->layer_param_.data_param().has_mean_file()) { const string& mean_file = this->layer_param_.data_param().mean_file(); LOG(INFO) << "Loading mean file from" << mean_file; BlobProto blob_proto; ReadProtoFromBinaryFileOrDie(mean_file.c_str(), &blob_proto); data_mean_.FromProto(blob_proto); CHECK_EQ(data_mean_.num(), 1); CHECK_EQ(data_mean_.channels(), datum_channels_); CHECK_EQ(data_mean_.length(), datum_length_); CHECK_EQ(data_mean_.height(), datum_height_); CHECK_EQ(data_mean_.width(), datum_width_); } else { // Simply initialize an all-empty mean. data_mean_.Reshape(1, datum_channels_, datum_length_, datum_height_, datum_width_); } // Now, start the prefetch thread. Before calling prefetch, we make two // cpu_data calls so that the prefetch thread does not accidentally make // simultaneous cudaMalloc calls when the main thread is running. In some // GPUs this seems to cause failures if we do not so. prefetch_data_->mutable_cpu_data(); if (output_labels_) { prefetch_label_->mutable_cpu_data(); } data_mean_.cpu_data(); DLOG(INFO) << "Initializing prefetch"; CreatePrefetchThread(); DLOG(INFO) << "Prefetch initialized."; }