Ejemplo n.º 1
0
void DataLayer<Dtype>::InternalThreadEntry() {
  Datum datum;
  CHECK(this->prefetch_data_.count());
  Dtype* top_data = this->prefetch_data_.mutable_cpu_data();
  Dtype* top_label = NULL;  // suppress warnings about uninitialized variables
  if (this->output_labels_) {
    top_label = this->prefetch_label_.mutable_cpu_data();
  }
  const int batch_size = this->layer_param_.data_param().batch_size();

  for (int item_id = 0; item_id < batch_size; ++item_id) {
    // get a blob
    switch (this->layer_param_.data_param().backend()) {
    case DataParameter_DB_LEVELDB:
      CHECK(iter_);
      CHECK(iter_->Valid());
      datum.ParseFromString(iter_->value().ToString());
      break;
    case DataParameter_DB_LMDB:
      CHECK_EQ(mdb_cursor_get(mdb_cursor_, &mdb_key_,
              &mdb_value_, MDB_GET_CURRENT), MDB_SUCCESS);
      datum.ParseFromArray(mdb_value_.mv_data,
          mdb_value_.mv_size);
      break;
    default:
      LOG(FATAL) << "Unknown database backend";
    }

    // Apply data transformations (mirror, scale, crop...)
    this->data_transformer_.Transform(item_id, datum, this->mean_, top_data);

    if (this->output_labels_) {
	  // liu
      // top_label[item_id] = datum.label();
	  // LOG(ERROR) << "label size " << datum.label_size() << " " << datum.label(0) \
				 << " " << datum.label(1) << " " << datum.label(2) << " " << datum.label(3);
	  for(int label_i=0; label_i < datum.label_size(); label_i++){
		top_label[item_id * datum.label_size() + label_i] = datum.label(label_i);
	  }
    }

    // go to the next iter
    switch (this->layer_param_.data_param().backend()) {
    case DataParameter_DB_LEVELDB:
      iter_->Next();
      if (!iter_->Valid()) {
        // We have reached the end. Restart from the first.
        DLOG(INFO) << "Restarting data prefetching from start.";
        iter_->SeekToFirst();
      }
      break;
    case DataParameter_DB_LMDB:
      if (mdb_cursor_get(mdb_cursor_, &mdb_key_,
              &mdb_value_, MDB_NEXT) != MDB_SUCCESS) {
        // We have reached the end. Restart from the first.
        DLOG(INFO) << "Restarting data prefetching from start.";
        CHECK_EQ(mdb_cursor_get(mdb_cursor_, &mdb_key_,
                &mdb_value_, MDB_FIRST), MDB_SUCCESS);
      }
      break;
    default:
      LOG(FATAL) << "Unknown database backend";
    }
  }
}
Ejemplo n.º 2
0
void DataLayer<Dtype>::DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
      vector<Blob<Dtype>*>* top) {
  // Initialize DB
  switch (this->layer_param_.data_param().backend()) {
  case DataParameter_DB_LEVELDB:
    {
    leveldb::DB* db_temp;
    leveldb::Options options = GetLevelDBOptions();
    options.create_if_missing = false;
    LOG(INFO) << "Opening leveldb " << this->layer_param_.data_param().source();
    leveldb::Status status = leveldb::DB::Open(
        options, this->layer_param_.data_param().source(), &db_temp);
    CHECK(status.ok()) << "Failed to open leveldb "
                       << this->layer_param_.data_param().source() << std::endl
                       << status.ToString();
    db_.reset(db_temp);
    iter_.reset(db_->NewIterator(leveldb::ReadOptions()));
    iter_->SeekToFirst();
    idx_ = 0;
    }
    break;
  case DataParameter_DB_LMDB:
    CHECK_EQ(mdb_env_create(&mdb_env_), MDB_SUCCESS) << "mdb_env_create failed";
    CHECK_EQ(mdb_env_set_mapsize(mdb_env_, 1099511627776), MDB_SUCCESS);  // 1TB
    CHECK_EQ(mdb_env_open(mdb_env_,
             this->layer_param_.data_param().source().c_str(),
             MDB_RDONLY|MDB_NOTLS, 0664), MDB_SUCCESS) << "mdb_env_open failed";
    CHECK_EQ(mdb_txn_begin(mdb_env_, NULL, MDB_RDONLY, &mdb_txn_), MDB_SUCCESS)
        << "mdb_txn_begin failed";
    CHECK_EQ(mdb_open(mdb_txn_, NULL, 0, &mdb_dbi_), MDB_SUCCESS)
        << "mdb_open failed";
    CHECK_EQ(mdb_cursor_open(mdb_txn_, mdb_dbi_, &mdb_cursor_), MDB_SUCCESS)
        << "mdb_cursor_open failed";
    LOG(INFO) << "Opening lmdb " << this->layer_param_.data_param().source();
    CHECK_EQ(mdb_cursor_get(mdb_cursor_, &mdb_key_, &mdb_value_, MDB_FIRST),
        MDB_SUCCESS) << "mdb_cursor_get failed";
    break;
  default:
    LOG(FATAL) << "Unknown database backend";
  }

  // Check if we would need to randomly skip a few data points
  if (this->layer_param_.data_param().rand_skip()) {
    unsigned int skip = caffe_rng_rand() %
                        this->layer_param_.data_param().rand_skip();
    LOG(INFO) << "Skipping first " << skip << " data points.";
    while (skip-- > 0) {
      switch (this->layer_param_.data_param().backend()) {
      case DataParameter_DB_LEVELDB:
        iter_->Next();
        idx_++;
        if (!iter_->Valid()) {
          iter_->SeekToFirst();
          idx_ = 0;
        }
        break;
      case DataParameter_DB_LMDB:
        if (mdb_cursor_get(mdb_cursor_, &mdb_key_, &mdb_value_, MDB_NEXT)
            != MDB_SUCCESS) {
          CHECK_EQ(mdb_cursor_get(mdb_cursor_, &mdb_key_, &mdb_value_,
                   MDB_FIRST), MDB_SUCCESS);
        }
        break;
      default:
        LOG(FATAL) << "Unknown database backend";
      }
    }
  }
  // Read a data point, and use it to initialize the top blob.
  Datum datum;
  switch (this->layer_param_.data_param().backend()) {
  case DataParameter_DB_LEVELDB:
    datum.ParseFromString(iter_->value().ToString());
    //LOG(INFO)<<idx_;
    break;
  case DataParameter_DB_LMDB:
    datum.ParseFromArray(mdb_value_.mv_data, mdb_value_.mv_size);
    break;
  default:
    LOG(FATAL) << "Unknown database backend";
  }

  // image
  int crop_size = this->layer_param_.transform_param().crop_size();
  if (crop_size > 0) {
    (*top)[0]->Reshape(this->layer_param_.data_param().batch_size(),
                       datum.channels(), crop_size, crop_size);
    this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(),
        datum.channels(), crop_size, crop_size);
  } else {
    (*top)[0]->Reshape(
        this->layer_param_.data_param().batch_size(), datum.channels(),
        datum.height(), datum.width());
    this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(),
        datum.channels(), datum.height(), datum.width());
  }
  LOG(INFO) << "output data size: " << (*top)[0]->num() << ","
      << (*top)[0]->channels() << "," << (*top)[0]->height() << ","
      << (*top)[0]->width();
  // label
  if (this->output_labels_) {
    (*top)[1]->Reshape(this->layer_param_.data_param().batch_size(), datum.label_size(), 1, 1);
    this->prefetch_label_.Reshape(this->layer_param_.data_param().batch_size(),
        datum.label_size(), 1, 1);
  }
  // datum size
  this->datum_channels_ = datum.channels();
  this->datum_height_ = datum.height();
  this->datum_width_ = datum.width();
  this->datum_size_ = datum.channels() * datum.height() * datum.width();
}
void* PoseImageDataLayerPrefetch(void* layer_pointer)
{
	CHECK(layer_pointer);
	PoseImageDataLayer<Dtype>* layer =
			reinterpret_cast<PoseImageDataLayer<Dtype>*>(layer_pointer);
	CHECK(layer);
	Datum datum;
	CHECK(layer->prefetch_data_);
	Dtype* top_data = layer->prefetch_data_->mutable_cpu_data();
	Dtype* top_label = layer->prefetch_label_->mutable_cpu_data();
	PoseImageDataParameter pose_image_data_param = layer->layer_param_.pose_image_data_param();
	const Dtype scale = pose_image_data_param.scale();
	const int batch_size = pose_image_data_param.batch_size();
	const int crop_size = pose_image_data_param.crop_size();
	const bool mirror = pose_image_data_param.mirror();
	const int new_height = pose_image_data_param.new_height();
	const int new_width = pose_image_data_param.new_width();
	const int out_height = pose_image_data_param.out_height();
	const int out_width  = pose_image_data_param.out_width();
	const int key_point_range = pose_image_data_param.key_point_range();
	const float scale_lower_bound = pose_image_data_param.scale_lower_bound();
	const float scale_upper_bound = pose_image_data_param.scale_upper_bound();
	const int key_point_num  = pose_image_data_param.key_point_num();
	const int randmargin  = pose_image_data_param.randmargin();
	const int use_mode = pose_image_data_param.use_mode();

	const float torso_ratio = pose_image_data_param.torso_ratio();
	const int mx1 = pose_image_data_param.mx1();
	const int mx2 = pose_image_data_param.mx2();
	const int my1 = pose_image_data_param.my1();
	const int my2 = pose_image_data_param.my2();

	const bool color_aug = pose_image_data_param.color_aug();


	if (mirror && crop_size == 0)
	{
		LOG(FATAL)
				<< "Current implementation requires mirror and crop_size to be "
				<< "set at the same time.";
	}
	// datum scales
	const int channels = layer->datum_channels_;
	const int height = layer->datum_height_;
	const int width = layer->datum_width_;
	const int size = layer->datum_size_;
	const int lines_size = layer->lines_.size();
	const Dtype* mean = layer->data_mean_.cpu_data();

	int * was = new int[out_height * out_width];

	for (int item_id = 0; item_id < batch_size; ++item_id)
	{
		char ss1[1010],ss2[1010];
		sprintf(ss1,"/home/dragon123/cnncode/showimg/%d.jpg",item_id);
		//sprintf(ss2,"/home/dragon123/cnncode/showimg/%d_gt.jpg",item_id);
		// get a blob
		float nowscale = 1;
		if (layer->phase_ == Caffe::TRAIN)
			nowscale = random(scale_lower_bound, scale_upper_bound);
		CHECK_GT(1.55, nowscale);
		CHECK_GT(nowscale, 0.95);

		CHECK_GT(lines_size, layer->lines_id_);
		if (use_mode == 1)
		{

			bool temp = PoseReadImageToDatum_mode1(layer->lines_[layer->lines_id_].first,
					layer->lines_[layer->lines_id_].second, new_height, new_width, &datum, nowscale,
					torso_ratio, mx1, mx2, my1, my2, randmargin);
			if (temp == false) continue;
		}
		else
		{
			bool temp = PoseReadImageToDatum_mode2(layer->lines_[layer->lines_id_].first,
								layer->lines_[layer->lines_id_].second, new_height, new_width, &datum, nowscale,
								torso_ratio, mx1, mx2, my1, my2, randmargin);
			if (temp == false) continue;
		}


		const string& data = datum.data();

		if (new_height > 0 && new_width > 0)
		{
			CHECK(data.size()) << "Image cropping only support uint8 data";
			int h_off, w_off;
			// We only do random crop when we do training.
			h_off = 0;
			w_off = 0;

			if (mirror && layer->PrefetchRand() % 2)
			{
				// Copy mirrored version
				for (int c = 0; c < channels; ++c)
				{
					float thisRand = 1;
					if(color_aug)
					{
						thisRand = random(0.8,1.2);
					}

					for (int h = 0; h < new_height; ++h)
					{
						for (int w = 0; w < new_width; ++w)
						{
							int top_index = ((item_id * channels + c)
									* new_height + h) * new_width
									+ (new_width - 1 - w);
							int data_index = (c * height + h + h_off) * width
									+ w + w_off;
							Dtype datum_element =
									static_cast<Dtype>(static_cast<uint8_t>(data[data_index]));
							top_data[top_index] = (datum_element
									- mean[data_index]) * scale;
							top_data[top_index] = min(top_data[top_index] * thisRand, (Dtype)(255.0));
						}
					}
				}
			}
			else
			{
				// Normal copy
				//Mat img(Size(new_width,new_height), CV_8UC3);
				for (int c = 0; c < channels; ++c)
				{
					float thisRand = 1;
					if(color_aug)
					{
						thisRand = random(0.8,1.2);
					}

					for (int h = 0; h < new_height; ++h)
					{
						for (int w = 0; w < new_width; ++w)
						{
							int top_index = ((item_id * channels + c)
									* new_height + h) * new_width + w;
							int data_index = (c * height + h + h_off) * width
									+ w + w_off;
							Dtype datum_element =
									static_cast<Dtype>(static_cast<uint8_t>(data[data_index]));
							top_data[top_index] = (datum_element
									- mean[data_index]) * scale;

							//img.at<cv::Vec3b>(h, w)[c] = (uchar)(datum_element * scale) * thisRand;

							top_data[top_index] = min(top_data[top_index] * thisRand, (Dtype)(255.0));
						}
					}
				}
				//imwrite(ss1, img);
			}
		}
		else
		{
			// Just copy the whole data
			if (data.size())
			{
				for (int j = 0; j < size; ++j)
				{
					Dtype datum_element =
							static_cast<Dtype>(static_cast<uint8_t>(data[j]));
					top_data[item_id * size + j] = (datum_element - mean[j])
							* scale;
				}
			}
			else
			{
				for (int j = 0; j < size; ++j)
				{
					top_data[item_id * size + j] = (datum.float_data(j)
							- mean[j]) * scale;
				}
			}
		}

		float lblratio = new_height / out_height;
		vector<int> pts;
		for (int label_i = 0; label_i < datum.label_size(); label_i++)
		{
			pts.push_back( datum.label(label_i) / lblratio );
		}

		int lblLen = key_point_num * out_height * out_width;
		PoseReadLabel(pts, was, top_label + item_id * lblLen, out_height, out_width);

		/*for(int ci = 0; ci < key_point_num; ci ++)
		{
			Mat img(Size(out_height, out_width), CV_8UC3);
			sprintf(ss2,"/home/dragon123/cnncode/showimg/%d_%d_gt.jpg",item_id, ci);
			for(int h = 0; h < out_height; h ++)
				for(int w = 0; w < out_width; w ++)
				{
					int clr = top_label[item_id * lblLen + ci * out_height * out_width + h * out_width + w];
					if(clr <= 0)
					{
						if(clr == 0) for(int c = 0; c < 3; c ++) img.at<cv::Vec3b>(h, w)[c] = 0;
						if(clr < 0) for(int c = 0; c < 3; c ++) img.at<cv::Vec3b>(h, w)[c] = 128;
					}
					else
					{
						for(int c = 0; c < 3; c ++) img.at<cv::Vec3b>(h, w)[c] = 255;
					}
 				}
			imwrite(ss2, img);
		}*/


		// go to the next iter
		layer->lines_id_++;
		if (layer->lines_id_ >= lines_size)
		{
			// We have reached the end. Restart from the first.
			DLOG(INFO) << "Restarting data prefetching from start.";
			layer->lines_id_ = 0;
			if (layer->layer_param_.pose_image_data_param().shuffle())
			{
				layer->ShuffleImages();
			}
		}
	}

	delete was;

	return reinterpret_cast<void*>(NULL);
}
void* ImageDataLayerPrefetch(void* layer_pointer)
{
	CHECK(layer_pointer);
	ImageDataLayer<Dtype>* layer =
			reinterpret_cast<ImageDataLayer<Dtype>*>(layer_pointer);
	CHECK(layer);
	Datum datum;
	CHECK(layer->prefetch_data_);
	Dtype* top_data = layer->prefetch_data_->mutable_cpu_data();
	Dtype* top_label = layer->prefetch_label_->mutable_cpu_data();
	ImageDataParameter image_data_param =
			layer->layer_param_.image_data_param();
	const Dtype scale = image_data_param.scale();
	const int batch_size = image_data_param.batch_size();
	const int crop_size = image_data_param.crop_size();
	const bool mirror = image_data_param.mirror();
	const int new_height = image_data_param.new_height();
	const int new_width = image_data_param.new_width();

	if (mirror && crop_size == 0)
	{
		LOG(FATAL)
				<< "Current implementation requires mirror and crop_size to be "
				<< "set at the same time.";
	}
	// datum scales
	const int channels = layer->datum_channels_;
	const int height = layer->datum_height_;
	const int width = layer->datum_width_;
	const int size = layer->datum_size_;
	const int lines_size = layer->lines_.size();
	const Dtype* mean = layer->data_mean_.cpu_data();
	for (int item_id = 0; item_id < batch_size; ++item_id)
	{
		// get a blob
		CHECK_GT(lines_size, layer->lines_id_);
		if (!ReadImageToDatum(layer->lines_[layer->lines_id_].first,
				layer->lines_[layer->lines_id_].second, new_height, new_width,
				&datum))
		{
			continue;
		}
		const string& data = datum.data();
		if (crop_size)
		{
			CHECK(data.size()) << "Image cropping only support uint8 data";
			int h_off, w_off;
			// We only do random crop when we do training.
			if (false && layer->phase_ == Caffe::TRAIN)
			{
				h_off = layer->PrefetchRand() % (height - crop_size);
				w_off = layer->PrefetchRand() % (width - crop_size);
			}
			else
			{
				h_off = (height - crop_size) / 2;
				w_off = (width - crop_size) / 2;
			}
			if (mirror && layer->PrefetchRand() % 2)
			{
				// Copy mirrored version
				for (int c = 0; c < channels; ++c)
				{
					for (int h = 0; h < crop_size; ++h)
					{
						for (int w = 0; w < crop_size; ++w)
						{
							int top_index = ((item_id * channels + c)
									* crop_size + h) * crop_size
									+ (crop_size - 1 - w);
							int data_index = (c * height + h + h_off) * width
									+ w + w_off;
							Dtype datum_element =
									static_cast<Dtype>(static_cast<uint8_t>(data[data_index]));
							top_data[top_index] = (datum_element
									- mean[data_index]) * scale;
						}
					}
				}
			}
			else
			{
				// Normal copy
				for (int c = 0; c < channels; ++c)
				{
					for (int h = 0; h < crop_size; ++h)
					{
						for (int w = 0; w < crop_size; ++w)
						{
							int top_index = ((item_id * channels + c)
									* crop_size + h) * crop_size + w;
							int data_index = (c * height + h + h_off) * width
									+ w + w_off;
							Dtype datum_element =
									static_cast<Dtype>(static_cast<uint8_t>(data[data_index]));
							top_data[top_index] = (datum_element
									- mean[data_index]) * scale;
						}
					}
				}
			}
		}
		else
		{
			// Just copy the whole data
			if (data.size())
			{
				for (int j = 0; j < size; ++j)
				{
					Dtype datum_element =
							static_cast<Dtype>(static_cast<uint8_t>(data[j]));
					top_data[item_id * size + j] = (datum_element - mean[j])
							* scale;
				}
			}
			else
			{
				for (int j = 0; j < size; ++j)
				{
					top_data[item_id * size + j] = (datum.float_data(j)
							- mean[j]) * scale;
				}
			}
		}

		//top_label[item_id] = datum.label();
		for (int label_i = 0; label_i < datum.label_size(); label_i++)
		{
			top_label[item_id * datum.label_size() + label_i] = datum.label(label_i);
		}
		// go to the next iter
		layer->lines_id_++;
		if (layer->lines_id_ >= lines_size)
		{
			// We have reached the end. Restart from the first.
			DLOG(INFO) << "Restarting data prefetching from start.";
			layer->lines_id_ = 0;
			if (layer->layer_param_.image_data_param().shuffle())
			{
				layer->ShuffleImages();
			}
		}
	}

	return reinterpret_cast<void*>(NULL);
}
	void* DataLayerPrefetch(void* layer_pointer) {
		CHECK(layer_pointer);
		DataLayer<Dtype>* layer = static_cast<DataLayer<Dtype>*>(layer_pointer);
		CHECK(layer);
		Datum datum;
		CHECK(layer->prefetch_data_);
		Dtype* top_data = layer->prefetch_data_->mutable_cpu_data(); //数据
		Dtype* top_label;                                            //标签
		if (layer->output_labels_) {
			top_label = layer->prefetch_label_->mutable_cpu_data();
		}
		const Dtype scale = layer->layer_param_.data_param().scale();
		const int batch_size = layer->layer_param_.data_param().batch_size();
		const int crop_size = layer->layer_param_.data_param().crop_size();
		const bool mirror = layer->layer_param_.data_param().mirror();

		if (mirror && crop_size == 0) {//当前实现需要同时设置mirror和cropsize
			LOG(FATAL) << "Current implementation requires mirror and crop_size to be "
				<< "set at the same time.";
		}
		// datum scales
		const int channels = layer->datum_channels_;
		const int height = layer->datum_height_;
		const int width = layer->datum_width_;
		const int size = layer->datum_size_;
		const Dtype* mean = layer->data_mean_.cpu_data();
		
		for (int item_id = 0; item_id < batch_size; ++item_id) {
			//每一批数据的数量是batchsize,一个循环拉取一张

			// get a blob
			CHECK(layer->iter_);
			CHECK(layer->iter_->Valid());
			datum.ParseFromString(layer->iter_->value().ToString());//利用迭代器拉取下一批数据
			const string& data = datum.data();

			int label_blob_channels = layer->prefetch_label_->channels();
			int label_data_dim = datum.label_size();
			CHECK_EQ(layer->prefetch_label_->channels(), datum.label_size()) << "label size is NOT the same.";
			
			if (crop_size) {//如果需要裁剪  
				CHECK(data.size()) << "Image cropping only support uint8 data";
				int h_off, w_off;
				// We only do random crop when we do training.
				//只是在训练阶段做随机裁剪 
				if (layer->phase_ == Caffe::TRAIN) {
					h_off = layer->PrefetchRand() % (height - crop_size);
					w_off = layer->PrefetchRand() % (width - crop_size);
				} else {//测试阶段固定裁剪
					h_off = (height - crop_size) / 2;
					w_off = (width - crop_size) / 2;
				}
				//怎么感觉下面两种情况的代码是一样的? 
				if (mirror && layer->PrefetchRand() % 2) {
					// Copy mirrored version
					for (int c = 0; c < channels; ++c) {
						for (int h = 0; h < crop_size; ++h) {
							for (int w = 0; w < crop_size; ++w) {
								int top_index = ((item_id * channels + c) * crop_size + h)
									* crop_size + (crop_size - 1 - w);
								int data_index = (c * height + h + h_off) * width + w + w_off;
								Dtype datum_element =
									static_cast<Dtype>(static_cast<uint8_t>(data[data_index]));
								top_data[top_index] = (datum_element - mean[data_index]) * scale;
							}
						}
					}
				} else {//如果不需要裁剪  
					// Normal copy
					//我们优先考虑data(),然后float_data() 
					for (int c = 0; c < channels; ++c) {
						for (int h = 0; h < crop_size; ++h) {
							for (int w = 0; w < crop_size; ++w) {
								int top_index = ((item_id * channels + c) * crop_size + h)
									* crop_size + w;
								int data_index = (c * height + h + h_off) * width + w + w_off;
								Dtype datum_element =
									static_cast<Dtype>(static_cast<uint8_t>(data[data_index]));
								top_data[top_index] = (datum_element - mean[data_index]) * scale;
							}
						}
					}
				}
			} else {
				// we will prefer to use data() first, and then try float_data()
				if (data.size()) {
					for (int j = 0; j < size; ++j) {
						Dtype datum_element =
							static_cast<Dtype>(static_cast<uint8_t>(data[j]));
						top_data[item_id * size + j] = (datum_element - mean[j]) * scale;
					}
				} else {
					for (int j = 0; j < size; ++j) {
						top_data[item_id * size + j] =
							(datum.float_data(j) - mean[j]) * scale;
					}
				}
			}

		
			if (g_item_id++ < 5)
			{
				int label_size = datum.label_size();	
				int image_label = 0;
				for (int j = 0; j < label_size; ++j) {
					if (datum.label(j) == 1)
					{
						image_label = j;
						break;
					}
				}	
				
				char strImgRawDataFile[255] = "";
				sprintf(strImgRawDataFile, "caffe_%s_%05d_%d%s", "train", item_id, image_label, ".txt");
				ofstream fout_image_raw_data(strImgRawDataFile);

				for (int h = 0; h < height; ++h) {
					for (int w = 0; w < width; ++w) {
						int pixel_index = h * height + w;
						Dtype datum_element = static_cast<Dtype>(static_cast<uint8_t>(data[pixel_index]));

						char strHexByte[3] = "";
						sprintf(strHexByte, "%02X", (unsigned char)datum_element);
						fout_image_raw_data<<" "<<strHexByte;
					}
					
					fout_image_raw_data<<endl;
				}
				
				fout_image_raw_data<<endl;
				for (int j = 0; j < label_size; ++j) {
					fout_image_raw_data<<datum.label(j);
				}	

				fout_image_raw_data.close();
			}
		
			if (layer->output_labels_) {
				int label_size = datum.label_size();				
				for (int j = 0; j < label_size; ++j) {
					top_label[item_id * label_size + j] = datum.label(j);
				}				
				//top_label[item_id] = datum.label();
			}
			
			// go to the next iter
			layer->iter_->Next();
			if (!layer->iter_->Valid()) {
				// We have reached the end. Restart from the first.
				DLOG(INFO) << "Restarting data prefetching from start.";
				layer->iter_->SeekToFirst();
			}
		}

		return static_cast<void*>(NULL);
	}
	void DataLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
		vector<Blob<Dtype>*>* top) {
			CHECK_EQ(bottom.size(), 0) << "Data Layer takes no input blobs.";
			CHECK_GE(top->size(), 1) << "Data Layer takes at least one blob as output.";
			CHECK_LE(top->size(), 2) << "Data Layer takes at most two blobs as output.";
			if (top->size() == 1) {
				output_labels_ = false;
			} else {
				output_labels_ = true;
			}
			
			// Initialize the leveldb
			leveldb::DB* db_temp;
			leveldb::Options options;
			options.create_if_missing = false;
			options.max_open_files = 100;
			
			LOG(INFO) << "Opening leveldb " << this->layer_param_.data_param().source();
			
			leveldb::Status status = leveldb::DB::Open(
				options, this->layer_param_.data_param().source(), &db_temp);
			
			CHECK(status.ok()) << "Failed to open leveldb "
				<< this->layer_param_.data_param().source() << std::endl
				<< status.ToString();
			
			db_.reset(db_temp);
			iter_.reset(db_->NewIterator(leveldb::ReadOptions()));//通过迭代器来操纵leveldb
			iter_->SeekToFirst();
			
			// Check if we would need to randomly skip a few data points
			//是否要随机跳过一些数据
			if (this->layer_param_.data_param().rand_skip()) {
				unsigned int skip = caffe_rng_rand() %
					this->layer_param_.data_param().rand_skip();
				LOG(INFO) << "Skipping first " << skip << " data points.";
				while (skip-- > 0) {
					iter_->Next();
					if (!iter_->Valid()) {
						iter_->SeekToFirst();
					}
				}
			}
			
			// Read a data point, and use it to initialize the top blob.  
			//读取一个数据点,用来初始化topblob。所谓初始化,只要是指reshape。  
			//可以观察到下面iter_调用调用next。所以这次读取只是用来读取出来channels等参数的,不作处理。  
			Datum datum;
			datum.ParseFromString(iter_->value().ToString());//利用迭代器读取第一个数据点 
			
			// image图像数据  
			int crop_size = this->layer_param_.data_param().crop_size();//裁剪大小  
			if (crop_size > 0) {//需要裁剪
				(*top)[0]->Reshape(this->layer_param_.data_param().batch_size(),
					datum.channels(), crop_size, crop_size);
				prefetch_data_.reset(new Blob<Dtype>(
					this->layer_param_.data_param().batch_size(), datum.channels(),
					crop_size, crop_size));
			} else {//不需要裁剪  
				(*top)[0]->Reshape(
					this->layer_param_.data_param().batch_size(), datum.channels(),
					datum.height(), datum.width());
				prefetch_data_.reset(new Blob<Dtype>(
					this->layer_param_.data_param().batch_size(), datum.channels(),
					datum.height(), datum.width()));
			}
			
			LOG(INFO) << "output data size: " << (*top)[0]->num() << ","
				<< (*top)[0]->channels() << "," << (*top)[0]->height() << ","
				<< (*top)[0]->width();

			/*
			// label标签数据 
			if (output_labels_) {
				(*top)[1]->Reshape(this->layer_param_.data_param().batch_size(), 1, 1, 1);
				prefetch_label_.reset(
					new Blob<Dtype>(this->layer_param_.data_param().batch_size(), 1, 1, 1));
			}
			*/
			
			// label标签数据 
			if (output_labels_) {
				(*top)[1]->Reshape(this->layer_param_.data_param().batch_size(), datum.label_size(), 1, 1);
				prefetch_label_.reset(
					new Blob<Dtype>(this->layer_param_.data_param().batch_size(), datum.label_size(), 1, 1));
			}
			
			// datum size
			datum_channels_ = datum.channels();
			datum_height_ = datum.height();
			datum_width_ = datum.width();
			datum_size_ = datum.channels() * datum.height() * datum.width();
			CHECK_GT(datum_height_, crop_size);
			CHECK_GT(datum_width_, crop_size);
			
			// check if we want to have mean  是否要减去均值  
			if (this->layer_param_.data_param().has_mean_file()) {
				const string& mean_file = this->layer_param_.data_param().mean_file();
				LOG(INFO) << "Loading mean file from" << mean_file;
				BlobProto blob_proto;
				ReadProtoFromBinaryFileOrDie(mean_file.c_str(), &blob_proto);
				data_mean_.FromProto(blob_proto);
				CHECK_EQ(data_mean_.num(), 1);
				CHECK_EQ(data_mean_.channels(), datum_channels_);
				CHECK_EQ(data_mean_.height(), datum_height_);
				CHECK_EQ(data_mean_.width(), datum_width_);
			} else {
				// Simply initialize an all-empty mean.
				data_mean_.Reshape(1, datum_channels_, datum_height_, datum_width_);
			}
			
			// Now, start the prefetch thread. Before calling prefetch, we make two
			// cpu_data calls so that the prefetch thread does not accidentally make
			// simultaneous cudaMalloc calls when the main thread is running. In some
			// GPUs this seems to cause failures if we do not so.
			prefetch_data_->mutable_cpu_data();
			if (output_labels_) {
				prefetch_label_->mutable_cpu_data();
			}
			
			data_mean_.cpu_data();
			
			DLOG(INFO) << "Initializing prefetch";
			CreatePrefetchThread();
			DLOG(INFO) << "Prefetch initialized.";
	}