Beispiel #1
0
void MemoryDataLayer<Dtype>::DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
     vector<Blob<Dtype>*>* top) {
  batch_size_ = this->layer_param_.memory_data_param().batch_size();
  this->datum_channels_ = this->layer_param_.memory_data_param().channels();
  this->datum_height_ = this->layer_param_.memory_data_param().height();
  this->datum_width_ = this->layer_param_.memory_data_param().width();
  this->datum_size_ = this->datum_channels_ * this->datum_height_ *
      this->datum_width_;
  CHECK_GT(batch_size_ * this->datum_size_, 0) <<
      "batch_size, channels, height, and width must be specified and"
      " positive in memory_data_param";
  (*top)[0]->Reshape(batch_size_, this->datum_channels_, this->datum_height_,
                     this->datum_width_);
  (*top)[1]->Reshape(batch_size_, 1, 1, 1);
  added_data_.Reshape(batch_size_, this->datum_channels_, this->datum_height_,
                      this->datum_width_);
  added_label_.Reshape(batch_size_, 1, 1, 1);
  data_ = NULL;
  labels_ = NULL;
  added_data_.cpu_data();
  added_label_.cpu_data();
}
void MemoryDataLayer<Dtype>::AddMatVector(const vector<cv::Mat>& mat_vector,
    const vector<int>& labels) {
  size_t num = mat_vector.size();
  CHECK(!has_new_data_) <<
      "Can't add mat until current data has been consumed.";
  CHECK_GT(num, 0) << "There is no mat to add";
  CHECK_EQ(num % batch_size_, 0) <<
      "The added data must be a multiple of the batch size.";
  added_data_.Reshape(num, channels_, height_, width_);
  added_label_.Reshape(num, 1, 1, 1);
  // Apply data transformations (mirror, scale, crop...)
  this->data_transformer_->Transform(mat_vector, &added_data_);
  // Copy Labels
  Dtype* top_label = added_label_.mutable_cpu_data();
  for (int item_id = 0; item_id < num; ++item_id) {
    top_label[item_id] = labels[item_id];
  }
  // num_images == batch_size_
  Dtype* top_data = added_data_.mutable_cpu_data();
  Reset(top_data, top_label, num);
  has_new_data_ = true;
}
Beispiel #3
0
void LogLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  NeuronLayer<Dtype>::LayerSetUp(bottom, top);
  const Dtype base = this->layer_param_.log_param().base();
  if (base != Dtype(-1)) {
    CHECK_GT(base, 0) << "base must be strictly positive.";
  }
  // If base == -1, interpret the base as e and set log_base = 1 exactly.
  // Otherwise, calculate its log explicitly.
  const Dtype log_base = (base == Dtype(-1)) ? Dtype(1) : log(base);
  CHECK(!isnan(log_base))
      << "NaN result: log(base) = log(" << base << ") = " << log_base;
  CHECK(!isinf(log_base))
      << "Inf result: log(base) = log(" << base << ") = " << log_base;
  base_scale_ = Dtype(1) / log_base;
  CHECK(!isnan(base_scale_))
      << "NaN result: 1/log(base) = 1/log(" << base << ") = " << base_scale_;
  CHECK(!isinf(base_scale_))
      << "Inf result: 1/log(base) = 1/log(" << base << ") = " << base_scale_;
  input_scale_ = this->layer_param_.log_param().scale();
  input_shift_ = this->layer_param_.log_param().shift();
  backward_num_scale_ = input_scale_ / log_base;
}
void GradientChecker<Dtype>::CheckGradientEltwise(
    Layer<Dtype>* layer,
    const vector<Blob<Dtype>*>& bottom,
    const vector<Blob<Dtype>*>& top) {
    layer->SetUp(
        bottom,
        top);
    CHECK_GT(top.size(), 0)<< "Eltwise mode requires at least one top blob.";
    const int check_bottom = -1;
    const bool element_wise = true;
    for (int i = 0; i < top.size(); ++i) {
        for (int j = 0; j < top[i]->count(); ++j) {
            CheckGradientSingle(
                layer,
                bottom,
                top,
                check_bottom,
                i,
                j,
                element_wise);
        }
    }
}
// H.264 bitstream without start codes.
sp<MetaData> setAVCFormat(AVCodecContext *avctx)
{
    ALOGV("AVC");

	CHECK_EQ(avctx->codec_id, AV_CODEC_ID_H264);
	CHECK_GT(avctx->extradata_size, 0);
	CHECK_EQ(avctx->extradata[0], 1); //configurationVersion

    if (avctx->width == 0 || avctx->height == 0) {
         int32_t width, height;
         sp<ABuffer> seqParamSet = new ABuffer(avctx->extradata_size - 8);
         memcpy(seqParamSet->data(), avctx->extradata + 8, avctx->extradata_size - 8);
         FindAVCDimensions(seqParamSet, &width, &height);
         avctx->width  = width;
         avctx->height = height;
     }

    sp<MetaData> meta = new MetaData;
    meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC);
    meta->setData(kKeyAVCC, kTypeAVCC, avctx->extradata, avctx->extradata_size);

	return meta;
}
void ExpectArraysCloseUptoScale(int n,
                                const double* p,
                                const double* q,
                                double tol) {
  CHECK_GT(n, 0);
  CHECK(p);
  CHECK(q);

  double p_max = 0;
  double q_max = 0;
  int p_i = 0;
  int q_i = 0;

  for (int i = 0; i < n; ++i) {
    if (std::abs(p[i]) > p_max) {
      p_max = std::abs(p[i]);
      p_i = i;
    }
    if (std::abs(q[i]) > q_max) {
      q_max = std::abs(q[i]);
      q_i = i;
    }
  }

  // If both arrays are all zeros, they are equal up to scale, but
  // for testing purposes, that's more likely to be an error than
  // a desired result.
  CHECK_NE(p_max, 0.0);
  CHECK_NE(q_max, 0.0);

  for (int i = 0; i < n; ++i) {
    double p_norm = p[i] / p[p_i];
    double q_norm = q[i] / q[q_i];

    EXPECT_NEAR(p_norm, q_norm, tol) << "i=" << i;
  }
}
Beispiel #7
0
void MemoryDataLayer<Dtype>::AddDatumVector(const vector<Datum>& datum_vector) {
  CHECK(!has_new_data_) <<
      "Can't add Datum when earlier ones haven't been consumed"
      << " by the upper layers";
  size_t num = datum_vector.size();
  CHECK_GT(num, 0) << "There is no datum to add";
  CHECK_LE(num, batch_size_) <<
      "The number of added datum must be no greater than the batch size";

  Dtype* top_data = added_data_.mutable_cpu_data();
  Dtype* top_label = added_label_.mutable_cpu_data();
  for (int batch_item_id = 0; batch_item_id < num; ++batch_item_id) {
    // Apply data transformations (mirror, scale, crop...)
    this->data_transformer_.Transform(
        batch_item_id, datum_vector[batch_item_id], this->mean_, top_data);
    // top_label[batch_item_id] = datum_vector[batch_item_id].label();
    for (int i = 0; i < datum_vector[batch_item_id].label().size(); i++){
    	top_label[batch_item_id*datum_vector[batch_item_id].label().size()+i] = datum_vector[batch_item_id].label(i);
    }
  }
  // num_images == batch_size_
  Reset(top_data, top_label, batch_size_);
  has_new_data_ = true;
}
vector<int> DataTransformer<Dtype>::InferBlobShape(const Datum& datum) {

  #ifndef CAFFE_HEADLESS

  if (datum.encoded()) {
    CHECK(!(param_.force_color() && param_.force_gray()))
        << "cannot set both force_color and force_gray";
    cv::Mat cv_img;
    if (param_.force_color() || param_.force_gray()) {
    // If force_color then decode in color otherwise decode in gray.
      cv_img = DecodeDatumToCVMat(datum, param_.force_color());
    } else {
      cv_img = DecodeDatumToCVMatNative(datum);
    }
    // InferBlobShape using the cv::image.
    return InferBlobShape(cv_img);
  }

  #endif

  const int crop_size = param_.crop_size();
  const int datum_channels = datum.channels();
  const int datum_height = datum.height();
  const int datum_width = datum.width();
  // Check dimensions.
  CHECK_GT(datum_channels, 0);
  CHECK_GE(datum_height, crop_size);
  CHECK_GE(datum_width, crop_size);
  // Build BlobShape.
  vector<int> shape(4);
  shape[0] = 1;
  shape[1] = datum_channels;
  shape[2] = (crop_size)? crop_size: datum_height;
  shape[3] = (crop_size)? crop_size: datum_width;
  return shape;
}
void SoftmaxWithLossLayer<Dtype>::Forward_cpu(
    const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
  // The forward pass computes the softmax prob values.
  softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
  const Dtype* prob_data = prob_.cpu_data();
  const Dtype* label = bottom[1]->cpu_data();
  int num = prob_.num();
  int dim = prob_.count() / num;
  int spatial_dim = prob_.height() * prob_.width();
  Dtype loss = 0;
  for (int i = 0; i < num; ++i) {
    for (int j = 0; j < spatial_dim; j++) {
      const int label_value = static_cast<int>(label[i * spatial_dim + j]);
      CHECK_GT(dim, label_value * spatial_dim);
      loss -= log(std::max(prob_data[i * dim +
          label_value * spatial_dim + j],
                           Dtype(FLT_MIN)));
    }
  }
  top[0]->mutable_cpu_data()[0] = loss / num / spatial_dim;
  if (top.size() == 2) {
    top[1]->ShareData(prob_);
  }
}
Beispiel #10
0
void SfDelegate::OnReadCompleted(net::URLRequest *request, int bytes_read) {
    if (bytes_read == -1) {
        MY_LOGI(StringPrintf(
                    "OnReadCompleted, read failed, status %d",
                    request->status().status()).c_str());

        mOwner->onReadCompleted(ERROR_IO);
        return;
    }

    MY_LOGV(StringPrintf("OnReadCompleted, read %d bytes", bytes_read).c_str());

    if (bytes_read < 0) {
        MY_LOGI(StringPrintf(
                    "Read failed w/ status %d\n",
                    request->status().status()).c_str());

        mOwner->onReadCompleted(ERROR_IO);
        return;
    } else if (bytes_read == 0) {
        mAtEOS = true;
        mOwner->onReadCompleted(mNumBytesRead);
        return;
    }

    CHECK_GT(bytes_read, 0);
    CHECK_LE(mNumBytesRead + bytes_read, mNumBytesTotal);

    memcpy((uint8_t *)mDataDestination + mNumBytesRead,
           mReadBuffer->data(),
           bytes_read);

    mNumBytesRead += bytes_read;

    readMore(request);
}
status_t CameraSource::start(MetaData *meta) {
    ALOGV("start");
    CHECK(!mStarted);
    if (mInitCheck != OK) {
        ALOGE("CameraSource is not initialized yet");
        return mInitCheck;
    }

    char value[PROPERTY_VALUE_MAX];
    if (property_get("media.stagefright.record-stats", value, NULL)
        && (!strcmp(value, "1") || !strcasecmp(value, "true"))) {
        mCollectStats = true;
    }

    mStartTimeUs = 0;
    mNumInputBuffers = 0;
    if (meta) {
        int64_t startTimeUs;
        if (meta->findInt64(kKeyTime, &startTimeUs)) {
            mStartTimeUs = startTimeUs;
        }

        int32_t nBuffers;
        if (meta->findInt32(kKeyNumBuffers, &nBuffers)) {
            CHECK_GT(nBuffers, 0);
            mNumInputBuffers = nBuffers;
        }
    }

    status_t err;
    if ((err = startCameraRecording()) == OK) {
        mStarted = true;
    }

    return err;
}
Beispiel #12
0
    TEST(Logging, CheckOpFail)
    {
        int i1 = 1;
        int i2 = 2;
        unsigned u1 = 3;
        unsigned u2 = 4;
        float f1 = 5.5f;
        float f2 = 6.6f;
        int* p1 = &i1;
        int* p2 = &i2;
        char const * message = "message";

        EXPECT_THROW(CHECK_NE(i1, i1) << message, Logging::CheckException);
        EXPECT_THROW(CHECK_NE(u1, u1) << message, Logging::CheckException);
        EXPECT_THROW(CHECK_NE(f1, f1) << message, Logging::CheckException);
        EXPECT_THROW(CHECK_NE(p1, p1) << message, Logging::CheckException);

        EXPECT_THROW(CHECK_EQ(i1, i2) << message, Logging::CheckException);
        EXPECT_THROW(CHECK_EQ(u1, u2) << message, Logging::CheckException);
        EXPECT_THROW(CHECK_EQ(f1, f2) << message, Logging::CheckException);
        EXPECT_THROW(CHECK_EQ(p1, p2) << message, Logging::CheckException);

        EXPECT_THROW(CHECK_GT(i1, i2) << message, Logging::CheckException);
        EXPECT_THROW(CHECK_GT(u1, u2) << message, Logging::CheckException);
        EXPECT_THROW(CHECK_GT(f1, f2) << message, Logging::CheckException);

        EXPECT_THROW(CHECK_GT(i1, i1) << message, Logging::CheckException);
        EXPECT_THROW(CHECK_GT(u1, u2) << message, Logging::CheckException);
        EXPECT_THROW(CHECK_GT(f1, f1) << message, Logging::CheckException);

        EXPECT_THROW(CHECK_LT(i2, i1) << message, Logging::CheckException);
        EXPECT_THROW(CHECK_LT(u2, u1) << message, Logging::CheckException);
        EXPECT_THROW(CHECK_LT(f2, f1) << message, Logging::CheckException);

        EXPECT_THROW(CHECK_LT(i1, i1) << message, Logging::CheckException);
        EXPECT_THROW(CHECK_LT(u2, u1) << message, Logging::CheckException);
        EXPECT_THROW(CHECK_LT(f1, f1) << message, Logging::CheckException);
    }
Beispiel #13
0
void SliceLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
      vector<Blob<Dtype>*>* top) {
  Layer<Dtype>::SetUp(bottom, top);
  const SliceParameter& slice_param = this->layer_param_.slice_param();
  slice_dim_ = slice_param.slice_dim();
  CHECK_GE(slice_dim_, 0);
  CHECK_LE(slice_dim_, 1) << "Can only slice num and channels";
  slice_point_.clear();
  std::copy(slice_param.slice_point().begin(),
      slice_param.slice_point().end(),
      std::back_inserter(slice_point_));
  count_ = 0;
  num_ = bottom[0]->num();
  channels_ = bottom[0]->channels();
  height_ = bottom[0]->height();
  width_ = bottom[0]->width();
  if (slice_point_.size() != 0) {
    CHECK_EQ(slice_point_.size(), top->size() - 1);
    if (slice_dim_ == 0) {
      CHECK_LE(top->size(), num_);
    } else {
      CHECK_LE(top->size(), channels_);
    }
    int prev = 0;
    vector<int> slices;
    for (int i = 0; i < slice_point_.size(); ++i) {
      CHECK_GT(slice_point_[i], prev);
      slices.push_back(slice_point_[i] - prev);
      prev = slice_point_[i];
    }
    if (slice_dim_ == 0) {
      slices.push_back(num_ - prev);
      for (int i = 0; i < top->size(); ++i) {
        (*top)[i]->Reshape(slices[i], channels_, height_, width_);
         count_ += (*top)[i]->count();
      }
    } else {
      slices.push_back(channels_ - prev);
      for (int i = 0; i < top->size(); ++i) {
        (*top)[i]->Reshape(num_, slices[i], height_, width_);
         count_ += (*top)[i]->count();
      }
    }

  } else {
    if (slice_dim_ == 0) {
      CHECK_EQ(num_ % top->size(), 0)
          << "Number of top blobs (" << top->size() << ") "
          << "should evenly divide input num ( " << num_ << ")";
      num_ = num_ / top->size();
    } else {
      CHECK_EQ(channels_ % top->size(), 0)
          << "Number of top blobs (" << top->size() << ") "
          << "should evenly divide input channels ( " << channels_ << ")";
      channels_ = channels_ / top->size();
    }
    for (int i = 0; i < top->size(); ++i) {
      (*top)[i]->Reshape(num_, channels_, height_, width_);
      count_ += (*top)[i]->count();
    }
  }
  CHECK_EQ(count_, bottom[0]->count());
}
Beispiel #14
0
void ImageDataLayer<Dtype>::DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
        const vector<Blob<Dtype>*>& top) {
    const int new_height = this->layer_param_.image_data_param().new_height();
    const int new_width  = this->layer_param_.image_data_param().new_width();
    const bool is_color  = this->layer_param_.image_data_param().is_color();
    string root_folder = this->layer_param_.image_data_param().root_folder();

    CHECK((new_height == 0 && new_width == 0) ||
          (new_height > 0 && new_width > 0)) << "Current implementation requires "
                  "new_height and new_width to be set at the same time.";
    // Read the file with filenames and labels
    const string& source = this->layer_param_.image_data_param().source();
    LOG(INFO) << "Opening file " << source;
    std::ifstream infile(source.c_str());
    string filename;
    int label;
    while (infile >> filename >> label) {
        lines_.push_back(std::make_pair(filename, label));
    }

    if (this->layer_param_.image_data_param().shuffle()) {
        // randomly shuffle data
        LOG(INFO) << "Shuffling data";
        const unsigned int prefetch_rng_seed = caffe_rng_rand();
        prefetch_rng_.reset(new Caffe::RNG(prefetch_rng_seed));
        ShuffleImages();
    }
    LOG(INFO) << "A total of " << lines_.size() << " images.";

    lines_id_ = 0;
    // Check if we would need to randomly skip a few data points
    if (this->layer_param_.image_data_param().rand_skip()) {
        unsigned int skip = caffe_rng_rand() %
                            this->layer_param_.image_data_param().rand_skip();
        LOG(INFO) << "Skipping first " << skip << " data points.";
        CHECK_GT(lines_.size(), skip) << "Not enough points to skip";
        lines_id_ = skip;
    }
    // Read an image, and use it to initialize the top blob.
    cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first,
                                      new_height, new_width, is_color);
    const int channels = cv_img.channels();
    const int height = cv_img.rows;
    const int width = cv_img.cols;
    // image
    const int crop_size = this->layer_param_.transform_param().crop_size();
    const int batch_size = this->layer_param_.image_data_param().batch_size();
    if (crop_size > 0) {
        top[0]->Reshape(batch_size, channels, crop_size, crop_size);
        this->prefetch_data_.Reshape(batch_size, channels, crop_size, crop_size);
        this->transformed_data_.Reshape(1, channels, crop_size, crop_size);
    } else {
        top[0]->Reshape(batch_size, channels, height, width);
        this->prefetch_data_.Reshape(batch_size, channels, height, width);
        this->transformed_data_.Reshape(1, channels, height, width);
    }
    LOG(INFO) << "output data size: " << top[0]->num() << ","
              << top[0]->channels() << "," << top[0]->height() << ","
              << top[0]->width();
    // label
    top[1]->Reshape(batch_size, 1, 1, 1);
    this->prefetch_label_.Reshape(batch_size, 1, 1, 1);
}
Beispiel #15
0
void Solver<Dtype>::InitTestNets() {
  CHECK(Caffe::root_solver());
  const bool has_net_param = param_.has_net_param();
  const bool has_net_file = param_.has_net();
  const int num_generic_nets = has_net_param + has_net_file;
  CHECK_LE(num_generic_nets, 1)
      << "Both net_param and net_file may not be specified.";
  const int num_test_net_params = param_.test_net_param_size();
  const int num_test_net_files = param_.test_net_size();
  const int num_test_nets = num_test_net_params + num_test_net_files;
  if (num_generic_nets) {
      CHECK_GE(param_.test_iter_size(), num_test_nets)
          << "test_iter must be specified for each test network.";
  } else {
      CHECK_EQ(param_.test_iter_size(), num_test_nets)
          << "test_iter must be specified for each test network.";
  }
  // If we have a generic net (specified by net or net_param, rather than
  // test_net or test_net_param), we may have an unlimited number of actual
  // test networks -- the actual number is given by the number of remaining
  // test_iters after any test nets specified by test_net_param and/or test_net
  // are evaluated.
  const int num_generic_net_instances = param_.test_iter_size() - num_test_nets;
  const int num_test_net_instances = num_test_nets + num_generic_net_instances;
  if (param_.test_state_size()) {
    CHECK_EQ(param_.test_state_size(), num_test_net_instances)
        << "test_state must be unspecified or specified once per test net.";
  }
  if (num_test_net_instances) {
    CHECK_GT(param_.test_interval(), 0);
  }
  int test_net_id = 0;
  vector<string> sources(num_test_net_instances);
  vector<NetParameter> net_params(num_test_net_instances);
  for (int i = 0; i < num_test_net_params; ++i, ++test_net_id) {
      sources[test_net_id] = "test_net_param";
      net_params[test_net_id].CopyFrom(param_.test_net_param(i));
  }
  for (int i = 0; i < num_test_net_files; ++i, ++test_net_id) {
      sources[test_net_id] = "test_net file: " + param_.test_net(i);
      ReadNetParamsFromTextFileOrDie(param_.test_net(i),
          &net_params[test_net_id]);
  }
  const int remaining_test_nets = param_.test_iter_size() - test_net_id;
  if (has_net_param) {
    for (int i = 0; i < remaining_test_nets; ++i, ++test_net_id) {
      sources[test_net_id] = "net_param";
      net_params[test_net_id].CopyFrom(param_.net_param());
    }
  }
  if (has_net_file) {
    for (int i = 0; i < remaining_test_nets; ++i, ++test_net_id) {
      sources[test_net_id] = "net file: " + param_.net();
      ReadNetParamsFromTextFileOrDie(param_.net(), &net_params[test_net_id]);
    }
  }
  test_nets_.resize(num_test_net_instances);
  for (int i = 0; i < num_test_net_instances; ++i) {
    // Set the correct NetState.  We start with the solver defaults (lowest
    // precedence); then, merge in any NetState specified by the net_param
    // itself; finally, merge in any NetState specified by the test_state
    // (highest precedence).
    NetState net_state;
    net_state.set_phase(TEST);
    net_state.MergeFrom(net_params[i].state());
    if (param_.test_state_size()) {
      net_state.MergeFrom(param_.test_state(i));
    }
    net_params[i].mutable_state()->CopyFrom(net_state);
    LOG(INFO)
        << "Creating test net (#" << i << ") specified by " << sources[i];
    if (Caffe::root_solver()) {
      test_nets_[i].reset(new Net<Dtype>(net_params[i]));
    } else {
      test_nets_[i].reset(new Net<Dtype>(net_params[i],
          root_solver_->test_nets_[i].get()));
    }
    test_nets_[i]->set_debug_info(param_.debug_info());
  }
}
Beispiel #16
0
void MKLPoolingLayer<Dtype>::Init(
      const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  PoolingParameter pool_param = this->layer_param_.pooling_param();

  channels_ = bottom[0]->channels();
  height_ = bottom[0]->height();
  width_ = bottom[0]->width();
  num_ = bottom[0]->num();

  if (pool_param.global_pooling()) {
    CHECK(!(pool_param.has_kernel_size() ||
      pool_param.has_kernel_h() || pool_param.has_kernel_w()))
      << "With Global_pooling: true Filter size cannot specified";
  } else {
    CHECK(!pool_param.has_kernel_size() !=
      !(pool_param.has_kernel_h() && pool_param.has_kernel_w()))
      << "Filter size is kernel_size OR kernel_h and kernel_w; not both";
    CHECK(pool_param.has_kernel_size() ||
      (pool_param.has_kernel_h() && pool_param.has_kernel_w()))
      << "For non-square filters both kernel_h and kernel_w are required.";
  }
  CHECK((!pool_param.has_pad() && pool_param.has_pad_h()
      && pool_param.has_pad_w())
      || (!pool_param.has_pad_h() && !pool_param.has_pad_w()))
      << "pad is pad OR pad_h and pad_w are required.";
  CHECK((!pool_param.has_stride() && pool_param.has_stride_h()
      && pool_param.has_stride_w())
      || (!pool_param.has_stride_h() && !pool_param.has_stride_w()))
      << "Stride is stride OR stride_h and stride_w are required.";
  global_pooling_ = pool_param.global_pooling();
  if (global_pooling_) {
    kernel_h_ = bottom[0]->height();
    kernel_w_ = bottom[0]->width();
  } else {
    if (pool_param.has_kernel_size()) {
      kernel_h_ = kernel_w_ = pool_param.kernel_size();
    } else {
      kernel_h_ = pool_param.kernel_h();
      kernel_w_ = pool_param.kernel_w();
    }
  }
  CHECK_GT(kernel_h_, 0) << "Filter dimensions cannot be zero.";
  CHECK_GT(kernel_w_, 0) << "Filter dimensions cannot be zero.";
  if (!pool_param.has_pad_h()) {
    pad_h_ = pad_w_ = pool_param.pad();
  } else {
    pad_h_ = pool_param.pad_h();
    pad_w_ = pool_param.pad_w();
  }
  if (!pool_param.has_stride_h()) {
    stride_h_ = stride_w_ = pool_param.stride();
  } else {
    stride_h_ = pool_param.stride_h();
    stride_w_ = pool_param.stride_w();
  }
  if (global_pooling_) {
    CHECK(pad_h_ == 0 && pad_w_ == 0 && stride_h_ == 1 && stride_w_ == 1)
      << "With Global_pooling: true; only pad = 0 and stride = 1";
  }
  if (pad_h_ != 0 || pad_w_ != 0) {
    CHECK(this->layer_param_.pooling_param().pool()
        == PoolingParameter_PoolMethod_AVE
        || this->layer_param_.pooling_param().pool()
        == PoolingParameter_PoolMethod_MAX)
        << "Padding implemented only for average and max pooling.";
    CHECK_LT(pad_h_, kernel_h_);
    CHECK_LT(pad_w_, kernel_w_);
  }

  pooled_height_ = static_cast<int>(ceil(static_cast<float>(
      bottom[0]->height() + 2 * pad_h_ - kernel_h_) / stride_h_)) + 1;
  pooled_width_ = static_cast<int>(ceil(static_cast<float>(
      bottom[0]->width() + 2 * pad_w_ - kernel_w_) / stride_w_)) + 1;
  if (pad_h_ || pad_w_) {
    // If we have padding, ensure that the last pooling starts strictly
    // inside the image (instead of at the padding); otherwise clip the last.
    if ((pooled_height_ - 1) * stride_h_ >= bottom[0]->height() + pad_h_) {
      --pooled_height_;
    }
    if ((pooled_width_ - 1) * stride_w_ >= bottom[0]->width() + pad_w_) {
      --pooled_width_;
    }
    CHECK_LT((pooled_height_ - 1) * stride_h_, bottom[0]->height() + pad_h_);
    CHECK_LT((pooled_width_ - 1) * stride_w_, bottom[0]->width() + pad_w_);
  }

  top[0]->Reshape(bottom[0]->num(), channels_, pooled_height_,
      pooled_width_);
  if (top.size() > 1) {
    (reinterpret_cast<Blob<size_t>* > (top[1]) )->Reshape(bottom[0]->num(),
            channels_, pooled_height_, pooled_width_);
  }
  // If max/min/avg pooling, we will initialize the vector index part.
  if (top.size() == 1) {
    max_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_,
            pooled_width_);
  }
  // If stochastic pooling, we will initialize the random index part.
  if (this->layer_param_.pooling_param().pool() ==
      PoolingParameter_PoolMethod_STOCHASTIC) {
    rand_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_,
      pooled_width_);
  }

  size_t dim = 4;
  size_t src_sizes[4], src_strides[4];
  size_t dst_sizes[4], dst_strides[4];

  src_sizes[0] = bottom[0]->width();
  src_sizes[1] = bottom[0]->height();
  src_sizes[2] = bottom[0]->channels();
  src_sizes[3] = bottom[0]->num();

  src_strides[0] = 1;
  src_strides[1] = src_sizes[0];
  src_strides[2] = src_sizes[0]*src_sizes[1];
  src_strides[3] = src_sizes[0]*src_sizes[1]*src_sizes[2];

  dst_sizes[0] = pooled_width_;
  dst_sizes[1] = pooled_height_;
  dst_sizes[2] = src_sizes[2];
  dst_sizes[3] = src_sizes[3];

  dst_strides[0] = 1;
  dst_strides[1] = dst_sizes[0];
  dst_strides[2] = dst_sizes[0]*dst_sizes[1];
  dst_strides[3] = dst_sizes[0]*dst_sizes[1]*dst_sizes[2];

  src_offset[0] = -pad_w_;
  src_offset[1] = -pad_h_;

  kernel_stride[0] = stride_w_;
  kernel_stride[1] = stride_h_;

  kernel_size[0] = kernel_w_;
  kernel_size[1] = kernel_h_;

  // Names are for debugging only
  fwd_bottom_data->name = "fwd_bottom_data   @ " + this->layer_param_.name();
  fwd_top_data->name =    "fwd_top_data      @ " + this->layer_param_.name();
  bwd_top_diff->name =    "bwd_top_diff      @ " + this->layer_param_.name();
  bwd_bottom_diff->name = "bwd_bottom_diff   @ " + this->layer_param_.name();

  fwd_bottom_data->create_user_layout(dim, src_sizes, src_strides, false);
  fwd_top_data   ->create_user_layout(dim, dst_sizes, dst_strides, false);
  bwd_bottom_diff->create_user_layout(dim, src_sizes, src_strides, false);
  bwd_top_diff   ->create_user_layout(dim, dst_sizes, dst_strides, false);
  // Primitives will be allocated during the first fwd pass
  dnnDelete<Dtype>(poolingFwd);
  dnnDelete<Dtype>(poolingBwd);

#ifdef USE_MLSL

  DataType dt = (sizeof(Dtype) == 4)? DT_FLOAT : DT_DOUBLE;
  ComputeOpRegInfo *myRegInfo;
  myRegInfo = new ComputeOpRegInfo(COMP_OP_TYPE_POOL);
  myRegInfo->SetName(this->layer_param_.name().c_str());
  int channels_ = bottom[0]->channels();
  for(int i=0; i<bottom.size(); i++)
  {
      int ic = bottom[i]->channels();
      int iw = bottom[i]->width();
      int ih = bottom[i]->height();
      myRegInfo->AddInputFeatureMap(ic, iw*ih, dt);
  }

  for(int i=0; i<top.size(); i++)
  {
      int oc = channels_;
      int ow = pooled_width_;
      int oh = pooled_height_;
      myRegInfo->AddOutputFeatureMap(oc, ow*oh, dt);
  }

  myRegInfo->Validate();
  this->layerOp = new ComputeOp(myRegInfo, caffe::internode::data_parallelism);
  delete myRegInfo;

#endif

}
Beispiel #17
0
void GraphicBufferSource::codecBufferEmptied(OMX_BUFFERHEADERTYPE* header, int fenceFd) {
    Mutex::Autolock autoLock(mMutex);
    if (!mExecuting) {
        return;
    }

    int cbi = findMatchingCodecBuffer_l(header);
    if (cbi < 0) {
        // This should never happen.
        ALOGE("codecBufferEmptied: buffer not recognized (h=%p)", header);
        if (fenceFd >= 0) {
            ::close(fenceFd);
        }
        return;
    }

    ALOGV("codecBufferEmptied h=%p size=%" PRIu32 " filled=%" PRIu32 " p=%p",
            header, header->nAllocLen, header->nFilledLen,
            header->pBuffer);
    CodecBuffer& codecBuffer(mCodecBuffers.editItemAt(cbi));

    // header->nFilledLen may not be the original value, so we can't compare
    // that to zero to see of this was the EOS buffer.  Instead we just
    // see if the GraphicBuffer reference was null, which should only ever
    // happen for EOS.
    if (codecBuffer.mGraphicBuffer == NULL) {
        if (!(mEndOfStream && mEndOfStreamSent)) {
            // This can happen when broken code sends us the same buffer
            // twice in a row.
            ALOGE("ERROR: codecBufferEmptied on non-EOS null buffer "
                    "(buffer emptied twice?)");
        }
        // No GraphicBuffer to deal with, no additional input or output is
        // expected, so just return.
        if (fenceFd >= 0) {
            ::close(fenceFd);
        }
        return;
    }

    if (EXTRA_CHECK && header->nAllocLen >= sizeof(MetadataBufferType)) {
        // Pull the graphic buffer handle back out of the buffer, and confirm
        // that it matches expectations.
        OMX_U8* data = header->pBuffer;
        MetadataBufferType type = *(MetadataBufferType *)data;
        if (type == kMetadataBufferTypeGrallocSource
                && header->nAllocLen >= sizeof(VideoGrallocMetadata)) {
            VideoGrallocMetadata &grallocMeta = *(VideoGrallocMetadata *)data;
            if (grallocMeta.pHandle != codecBuffer.mGraphicBuffer->handle) {
                // should never happen
                ALOGE("codecBufferEmptied: buffer's handle is %p, expected %p",
                        grallocMeta.pHandle, codecBuffer.mGraphicBuffer->handle);
                CHECK(!"codecBufferEmptied: mismatched buffer");
            }
        } else if (type == kMetadataBufferTypeANWBuffer
                && header->nAllocLen >= sizeof(VideoNativeMetadata)) {
            VideoNativeMetadata &nativeMeta = *(VideoNativeMetadata *)data;
            if (nativeMeta.pBuffer != codecBuffer.mGraphicBuffer->getNativeBuffer()) {
                // should never happen
                ALOGE("codecBufferEmptied: buffer is %p, expected %p",
                        nativeMeta.pBuffer, codecBuffer.mGraphicBuffer->getNativeBuffer());
                CHECK(!"codecBufferEmptied: mismatched buffer");
            }
        }
    }

    // Find matching entry in our cached copy of the BufferQueue slots.
    // If we find a match, release that slot.  If we don't, the BufferQueue
    // has dropped that GraphicBuffer, and there's nothing for us to release.
    int id = codecBuffer.mBuf;
    sp<Fence> fence = new Fence(fenceFd);
    if (mBufferSlot[id] != NULL &&
        mBufferSlot[id]->handle == codecBuffer.mGraphicBuffer->handle) {
        ALOGV("cbi %d matches bq slot %d, handle=%p",
                cbi, id, mBufferSlot[id]->handle);

        if (id == mLatestBufferId) {
            CHECK_GT(mLatestBufferUseCount--, 0);
        } else {
            releaseBuffer(id, codecBuffer.mFrameNumber, mBufferSlot[id], fence);
        }
    } else {
        ALOGV("codecBufferEmptied: no match for emptied buffer in cbi %d",
                cbi);
        // we will not reuse codec buffer, so there is no need to wait for fence
    }

    // Mark the codec buffer as available by clearing the GraphicBuffer ref.
    codecBuffer.mGraphicBuffer = NULL;

    if (mNumFramesAvailable) {
        // Fill this codec buffer.
        CHECK(!mEndOfStreamSent);
        ALOGV("buffer freed, %zu frames avail (eos=%d)",
                mNumFramesAvailable, mEndOfStream);
        fillCodecBuffer_l();
    } else if (mEndOfStream) {
        // No frames available, but EOS is pending, so use this buffer to
        // send that.
        ALOGV("buffer freed, EOS pending");
        submitEndOfInputStream_l();
    } else if (mRepeatBufferDeferred) {
        bool success = repeatLatestBuffer_l();
        if (success) {
            ALOGV("deferred repeatLatestBuffer_l SUCCESS");
        } else {
            ALOGV("deferred repeatLatestBuffer_l FAILURE");
        }
        mRepeatBufferDeferred = false;
    }

    return;
}
Beispiel #18
0
	void NonLocalLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
		const vector<Blob<Dtype>*>& top)
	{
		CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, "
			<< "corresponding to (num, channels, height, width)";
		// Configure the kernel size, padding, stride, and inputs.
		ConvolutionParameter conv_param = this->layer_param_.convolution_param();
		/*CHECK(!conv_param.has_kernel_size() !=
			!(conv_param.has_kernel_h() && conv_param.has_kernel_w()))
			<< "Filter size is kernel_size OR kernel_h and kernel_w; not both";
		CHECK(conv_param.has_kernel_size() ||
			(conv_param.has_kernel_h() && conv_param.has_kernel_w()))
			<< "For non-square filters both kernel_h and kernel_w are required.";
		CHECK((!conv_param.has_pad() && conv_param.has_pad_h()
			&& conv_param.has_pad_w())
			|| (!conv_param.has_pad_h() && !conv_param.has_pad_w()))
			<< "pad is pad OR pad_h and pad_w are required.";
		CHECK((!conv_param.has_stride() && conv_param.has_stride_h()
			&& conv_param.has_stride_w())
			|| (!conv_param.has_stride_h() && !conv_param.has_stride_w()))
			<< "Stride is stride OR stride_h and stride_w are required.";
		if (conv_param.has_kernel_size()) {
			kernel_h_ = kernel_w_ = conv_param.kernel_size();
		}
		else {
			kernel_h_ = conv_param.kernel_h();
			kernel_w_ = conv_param.kernel_w();
		}
		CHECK_GT(kernel_h_, 0) << "Filter dimensions cannot be zero.";
		CHECK_GT(kernel_w_, 0) << "Filter dimensions cannot be zero.";
		if (!conv_param.has_pad_h()) {
			pad_h_ = pad_w_ = conv_param.pad();
		}
		else {
			pad_h_ = conv_param.pad_h();
			pad_w_ = conv_param.pad_w();
		}
		if (!conv_param.has_stride_h()) {
			stride_h_ = stride_w_ = conv_param.stride();
		}
		else {
			stride_h_ = conv_param.stride_h();
			stride_w_ = conv_param.stride_w();
		}*/
		//kernel
		if (conv_param.has_kernel_h() || conv_param.has_kernel_w()) {
			CHECK_EQ(0, conv_param.kernel_size_size())
				<< "Either kernel_size or kernel_h/w should be specified; not both.";
			kernel_h_ = conv_param.kernel_h();
			kernel_w_ = conv_param.kernel_w();
		}
		else {
			const int num_kernel_dims = conv_param.kernel_size_size();
			CHECK(num_kernel_dims == 1)
				<< "kernel_size must be specified once, or once per spatial dimension "
				<< "(kernel_size specified " << num_kernel_dims << " times; ";

			kernel_h_ = kernel_w_ = conv_param.kernel_size(0);
		}
		CHECK_GT(kernel_h_, 0) << "Filter dimensions cannot be zero.";
		CHECK_GT(kernel_w_, 0) << "Filter dimensions cannot be zero.";
		//stride
		if (conv_param.has_stride_h() || conv_param.has_stride_w()) {
			CHECK_EQ(0, conv_param.stride_size())
				<< "Either stride or stride_h/w should be specified; not both.";
			stride_h_ = conv_param.stride_h();
			stride_w_ = conv_param.stride_w();
		}
		else {
			const int num_stride_dims = conv_param.stride_size();
			CHECK(num_stride_dims == 0 || num_stride_dims == 1)
				<< "stride must be specified once, or once per spatial dimension "
				<< "(stride specified " << num_stride_dims << " times; ";
			const int kDefaultStride = 1;
			stride_h_ = stride_w_ = (num_stride_dims == 0) ? kDefaultStride : conv_param.stride(0);
		}
		//pad
		if (conv_param.has_pad_h() || conv_param.has_pad_w()) {
			CHECK_EQ(0, conv_param.pad_size())
				<< "Either pad or pad_h/w should be specified; not both.";
			pad_h_ = conv_param.pad_h();
			pad_w_ = conv_param.pad_w();
		}
		else {
			const int num_pad_dims = conv_param.pad_size();
			CHECK(num_pad_dims == 0 || num_pad_dims == 1)
				<< "pad must be specified once, or once per spatial dimension "
				<< "(pad specified " << num_pad_dims << " times; ";
			const int kDefaultPad = 0;
			pad_h_ = pad_w_ = (num_pad_dims == 0) ? kDefaultPad : conv_param.pad(0);
		}

		// Special case: im2col is the identity for 1x1 convolution with stride 1
		// and no padding, so flag for skipping the buffer and transformation.
		is_1x1_ = kernel_w_ == 1 && kernel_h_ == 1
			&& stride_h_ == 1 && stride_w_ == 1 && pad_h_ == 0 && pad_w_ == 0;
		// Configure output channels and groups.
		channels_ = bottom[0]->channels();

		num_output_ = channels_ * kernel_h_ * kernel_w_;

		num_ = bottom[0]->num();
		height_ = bottom[0]->height();
		width_ = bottom[0]->width();

		height_out_ = (height_ + 2 * pad_h_ - kernel_h_)
			/ stride_h_ + 1;
		width_out_ = (width_ + 2 * pad_w_ - kernel_w_)
			/ stride_w_ + 1;

		LayerParameter split_param;
		split_layer_0.reset(new SplitLayer<Dtype>(split_param));
		split_0_top_vec.clear();
		split_0_top_vec.push_back(&split_0_top_0);
		split_0_top_vec.push_back(&split_0_top_1);
		split_layer_0->SetUp(bottom, split_0_top_vec);

		img2col_0_top.Reshape(num_, channels_*kernel_h_*kernel_w_, height_out_, width_out_);
		img2col_1_top.Reshape(num_, channels_*kernel_h_*kernel_w_, height_out_, width_out_);

		split_layer_1.reset(new SplitLayer<Dtype>(split_param));
		split_1_bottom_vec.clear();
		split_1_top_vec.clear();
		split_1_bottom_vec.push_back(&img2col_0_top);
		split_1_top_vec.push_back(&split_1_top_0);
		split_1_top_vec.push_back(&split_1_top_1);
		split_layer_1->SetUp(split_1_bottom_vec, split_1_top_vec);

		euclidean_bottom_0.Reshape(num_, channels_, kernel_h_*kernel_w_, height_out_*width_out_);
		euclidean_bottom_1.Reshape(num_, channels_, kernel_h_*kernel_w_, height_out_*width_out_);
		euclidean_bottom_0.ShareData(*split_1_top_vec[1]);
		euclidean_bottom_0.ShareDiff(*split_1_top_vec[1]);
		euclidean_bottom_1.ShareData(img2col_1_top);
		euclidean_bottom_1.ShareDiff(img2col_1_top);

		LayerParameter euclidean_param;
		euclidean_layer.reset(new EuclideanLayer<Dtype>(euclidean_param));
		euclidean_bottom_vec.clear();
		euclidean_top_vec.clear();
		euclidean_bottom_vec.push_back(&euclidean_bottom_0);
		euclidean_bottom_vec.push_back(&euclidean_bottom_1);
		euclidean_top_vec.push_back(&euclidean_top);
		euclidean_layer->SetUp(euclidean_bottom_vec, euclidean_top_vec);

		this->blobs_.resize(1);
		this->blobs_[0].reset(new Blob<Dtype>(1, 1, 1, 1));
		smooth_threshold_layer.reset(new SmoothThresholdLayer<Dtype>(this->layer_param()));
		smooth_bottom_vec.clear();
		smooth_top_vec.clear();
		smooth_bottom_vec.push_back(euclidean_top_vec[0]);
		smooth_top_vec.push_back(&smooth_top);
		smooth_threshold_layer->SetUp(smooth_bottom_vec, smooth_top_vec);
		this->blobs_[0]->ShareData(*smooth_threshold_layer->blobs()[0]);
		this->blobs_[0]->ShareDiff(*smooth_threshold_layer->blobs()[0]);

		split_layer_3.reset(new SplitLayer<Dtype>(split_param));
		split_3_bottom_vec.clear();
		split_3_top_vec.clear();
		split_3_bottom_vec.push_back(smooth_top_vec[0]);
		split_3_top_vec.push_back(&split_3_top_0);
		split_3_top_vec.push_back(&split_3_top_1);
		split_layer_3->SetUp(split_3_bottom_vec, split_3_top_vec);

		LayerParameter normalize_param;
		normalize_layer.reset(new NormalizeLayer<Dtype>(normalize_param));
		normalize_bottom_vec.clear();
		normalize_top_vec.clear();
		normalize_bottom.Reshape(split_3_top_vec[1]->num()*split_3_top_vec[1]->channels(),
			split_3_top_vec[1]->height(), 1, split_3_top_vec[1]->width());
		//normalize_bottom_vec.push_back(split_3_top_vec[1]);
		normalize_bottom_vec.push_back(&normalize_bottom);
		normalize_top_vec.push_back(&normalize_top);
		normalize_layer->SetUp(normalize_bottom_vec, normalize_top_vec);

		split_layer_2.reset(new SplitLayer<Dtype>(split_param));
		split_2_bottom_vec.clear();
		split_2_top_vec.clear();
		split_2_bottom_vec.push_back(&split_2_bottom);
		split_2_bottom_vec[0]->ReshapeLike(*split_1_top_vec[0]);
		split_2_top_vec.push_back(&split_2_top_0);
		split_2_top_vec.push_back(top[0]);
		split_layer_2->SetUp(split_2_bottom_vec, split_2_top_vec);

		LayerParameter eltwise_param;
		eltwise_param.mutable_eltwise_param()->set_operation(EltwiseParameter_EltwiseOp_PROD);
		eltwise_layer.reset(new EltwiseLayer<Dtype>(eltwise_param));
		eltwise_bottom_vec.clear();
		eltwise_top_vec.clear();
		eltwise_bottom_vec.push_back(split_1_top_vec[0]);
		eltwise_bottom_vec.push_back(split_2_top_vec[0]);
		if (top.size() == 3)
		{
			eltwise_top_vec.push_back(top[2]);
			eltwise_layer->SetUp(eltwise_bottom_vec, eltwise_top_vec);
		}
		else
			eltwise_top_vec.push_back(&mask_top);

	}
Beispiel #19
0
void Im2colLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  ConvolutionParameter conv_param = this->layer_param_.convolution_param();
  force_nd_im2col_ = conv_param.force_nd_im2col();
  const int input_num_dims = bottom[0]->shape().size();
  channel_axis_ = bottom[0]->CanonicalAxisIndex(conv_param.axis());
  const int first_spatial_dim = channel_axis_ + 1;
  num_spatial_axes_ = input_num_dims - first_spatial_dim;
  CHECK_GE(num_spatial_axes_, 1);
  vector<int> dim_blob_shape(1, num_spatial_axes_);
  // Setup filter kernel dimensions (kernel_shape_).
  kernel_shape_.Reshape(dim_blob_shape);
  int* kernel_shape_data = kernel_shape_.mutable_cpu_data();
  if (conv_param.has_kernel_h() || conv_param.has_kernel_w()) {
    CHECK_EQ(num_spatial_axes_, 2)
        << "kernel_h & kernel_w can only be used for 2D convolution.";
    CHECK_EQ(0, conv_param.kernel_size_size())
        << "Either kernel_size or kernel_h/w should be specified; not both.";
    kernel_shape_data[0] = conv_param.kernel_h();
    kernel_shape_data[1] = conv_param.kernel_w();
  } else {
    const int num_kernel_dims = conv_param.kernel_size_size();
    CHECK(num_kernel_dims == 1 || num_kernel_dims == num_spatial_axes_)
        << "kernel_size must be specified once, or once per spatial dimension "
        << "(kernel_size specified " << num_kernel_dims << " times; "
        << num_spatial_axes_ << " spatial dims);";
      for (int i = 0; i < num_spatial_axes_; ++i) {
        kernel_shape_data[i] =
            conv_param.kernel_size((num_kernel_dims == 1) ? 0 : i);
      }
  }
  for (int i = 0; i < num_spatial_axes_; ++i) {
    CHECK_GT(kernel_shape_data[i], 0) << "Filter dimensions must be nonzero.";
  }
  // Setup stride dimensions (stride_).
  stride_.Reshape(dim_blob_shape);
  int* stride_data = stride_.mutable_cpu_data();
  if (conv_param.has_stride_h() || conv_param.has_stride_w()) {
    CHECK_EQ(num_spatial_axes_, 2)
        << "stride_h & stride_w can only be used for 2D convolution.";
    CHECK_EQ(0, conv_param.stride_size())
        << "Either stride or stride_h/w should be specified; not both.";
    stride_data[0] = conv_param.stride_h();
    stride_data[1] = conv_param.stride_w();
  } else {
    const int num_stride_dims = conv_param.stride_size();
    CHECK(num_stride_dims == 0 || num_stride_dims == 1 ||
          num_stride_dims == num_spatial_axes_)
        << "stride must be specified once, or once per spatial dimension "
        << "(stride specified " << num_stride_dims << " times; "
        << num_spatial_axes_ << " spatial dims);";
    const int kDefaultStride = 1;
    for (int i = 0; i < num_spatial_axes_; ++i) {
      stride_data[i] = (num_stride_dims == 0) ? kDefaultStride :
          conv_param.stride((num_stride_dims == 1) ? 0 : i);
      CHECK_GT(stride_data[i], 0) << "Stride dimensions must be nonzero.";
    }
  }
  // Setup pad dimensions (pad_).
  pad_.Reshape(dim_blob_shape);
  int* pad_data = pad_.mutable_cpu_data();
  if (conv_param.has_pad_h() || conv_param.has_pad_w()) {
    CHECK_EQ(num_spatial_axes_, 2)
        << "pad_h & pad_w can only be used for 2D convolution.";
    CHECK_EQ(0, conv_param.pad_size())
        << "Either pad or pad_h/w should be specified; not both.";
    pad_data[0] = conv_param.pad_h();
    pad_data[1] = conv_param.pad_w();
  } else {
    const int num_pad_dims = conv_param.pad_size();
    CHECK(num_pad_dims == 0 || num_pad_dims == 1 ||
          num_pad_dims == num_spatial_axes_)
        << "pad must be specified once, or once per spatial dimension "
        << "(pad specified " << num_pad_dims << " times; "
        << num_spatial_axes_ << " spatial dims);";
    const int kDefaultPad = 0;
    for (int i = 0; i < num_spatial_axes_; ++i) {
      pad_data[i] = (num_pad_dims == 0) ? kDefaultPad :
          conv_param.pad((num_pad_dims == 1) ? 0 : i);
    }
  }
}
Beispiel #20
0
void DataTransformer<Dtype>::Transform(const cv::Mat& cv_img,
                                       Blob<Dtype>* transformed_blob, bool fixed_trans) {
  const int crop_size = param_.crop_size();
  const int img_channels = cv_img.channels();
  const int img_height = cv_img.rows;
  const int img_width = cv_img.cols;

  // Check dimensions.
  const int channels = transformed_blob->channels();
  const int height = transformed_blob->height();
  const int width = transformed_blob->width();
  const int num = transformed_blob->num();

  CHECK_EQ(channels, img_channels);
  CHECK_LE(height, img_height);
  CHECK_LE(width, img_width);
  CHECK_GE(num, 1);

  CHECK(cv_img.depth() == CV_8U) << "Image data type must be unsigned byte";

  const Dtype scale = param_.scale();
  const bool do_mirror = ( param_.mirror() && Rand(2) ) || (fixed_trans && apply_mirror_);
  if ( !fixed_trans ) apply_mirror_ = do_mirror;
  const bool has_mean_file = param_.has_mean_file();
  const bool has_mean_values = mean_values_.size() > 0;

  CHECK_GT(img_channels, 0);
  CHECK_GE(img_height, crop_size);
  CHECK_GE(img_width, crop_size);

  Dtype* mean = NULL;
  if (has_mean_file) {
    CHECK_EQ(img_channels, data_mean_.channels());
    CHECK_EQ(img_height, data_mean_.height());
    CHECK_EQ(img_width, data_mean_.width());
    mean = data_mean_.mutable_cpu_data();
  }
  if (has_mean_values) {
    CHECK(mean_values_.size() == 1 || mean_values_.size() == img_channels) <<
     "Specify either 1 mean_value or as many as channels: " << img_channels;
    if (img_channels > 1 && mean_values_.size() == 1) {
      // Replicate the mean_value for simplicity
      for (int c = 1; c < img_channels; ++c) {
        mean_values_.push_back(mean_values_[0]);
      }
    }
  }

  int h_off = 0;
  int w_off = 0;
  cv::Mat cv_cropped_img = cv_img;
  if (crop_size) {
    CHECK_EQ(crop_size, height);
    CHECK_EQ(crop_size, width);
    // We only do random crop when we do training.
    if (phase_ == TRAIN) {
      h_off = Rand(img_height - crop_size + 1);
      w_off = Rand(img_width - crop_size + 1);
    } else {
      h_off = (img_height - crop_size) / 2;
      w_off = (img_width - crop_size) / 2;
    }
    if ( fixed_trans ) {
        h_off = offset_h_;
        w_off = offset_w_;
    } else {
        offset_h_ = h_off;
        offset_w_ = w_off;
    }
    cv::Rect roi(w_off, h_off, crop_size, crop_size);
    cv_cropped_img = cv_img(roi);
  } else {
    CHECK_EQ(img_height, height);
    CHECK_EQ(img_width, width);
  }

  CHECK(cv_cropped_img.data);

  Dtype* transformed_data = transformed_blob->mutable_cpu_data();
  int top_index;
  for (int h = 0; h < height; ++h) {
    const uchar* ptr = cv_cropped_img.ptr<uchar>(h);
    int img_index = 0;
    for (int w = 0; w < width; ++w) {
      for (int c = 0; c < img_channels; ++c) {
        if (do_mirror) {
          top_index = (c * height + h) * width + (width - 1 - w);
        } else {
          top_index = (c * height + h) * width + w;
        }
        // int top_index = (c * height + h) * width + w;
        Dtype pixel = static_cast<Dtype>(ptr[img_index++]);
        if (has_mean_file) {
          int mean_index = (c * img_height + h_off + h) * img_width + w_off + w;
          transformed_data[top_index] =
            (pixel - mean[mean_index]) * scale;
        } else {
          if (has_mean_values) {
            transformed_data[top_index] =
              (pixel - mean_values_[c]) * scale;
          } else {
            transformed_data[top_index] = pixel * scale;
          }
        }
      }
    }
  }
}
Beispiel #21
0
  std::string evaluate(const Model& model, 
                       const Data& validation_data,
                       const Data& train_data = Data()) const {

    CHECK_GT(validation_data.size(), 0);
    auto validation_user_itemset = validation_data.get_feature_to_set_hashtable(0, 1);

    std::unordered_map<size_t, std::unordered_set<size_t>> train_user_itemset;
    if (train_data.size() != 0) {
      train_user_itemset = train_data.get_feature_to_set_hashtable(0, 1);
    }
    
    size_t num_users = train_data.feature_group_total_dimension(0);
    CHECK_EQ(num_users, train_user_itemset.size());

    std::vector<std::vector<double>> user_rets(num_users);
    parallel_for(0, num_users, [&](size_t uid) {
                  user_rets[uid] = std::vector<double>(8, 0.);
                });
    dynamic_parallel_for(0, num_users, [&](size_t uid) {
    //for (size_t uid = 0; uid < num_users; ++uid) {
      auto iter = validation_user_itemset.find(uid);
      if (iter == validation_user_itemset.end()) return;
      auto train_it = train_user_itemset.find(iter->first);
      CHECK(train_it != train_user_itemset.end());
      std::unordered_set<size_t>& validation_set = iter->second;
      // Models are required to have this function
      auto rec_list = model.recommend(iter->first, 10, train_it->second);
      
      for (auto& rec_iid : rec_list) {
        CHECK_LT(rec_iid, train_data.feature_group_total_dimension(1));
      }
      for (auto& iid : validation_set){
        CHECK_LT(iid, train_data.feature_group_total_dimension(1));
      }
      auto eval_rets = evaluate_rec_list(rec_list, validation_set);
      //std::transform(rets.begin(), rets.end(), eval_rets.begin(), rets.begin(),
      //               std::plus<double>());
      user_rets[uid].assign(eval_rets.begin(), eval_rets.end()); 
    });
    //}
    double num_users_for_test = static_cast<double>(validation_user_itemset.size());
    std::vector<double> rets(8, 0.);
    parallel_for(0, 8, [&](size_t colid) {
              for (size_t uid = 0; uid < num_users; ++uid) {
                rets[colid] += user_rets[uid][colid] / num_users_for_test;
              }
    });

    std::stringstream ss;
    ss << std::setw(8) << std::setprecision(5) << rets[0] << "|"
        << std::setw(8) << std::setprecision(5) << rets[1]  << "|"
        << std::setw(8) << std::setprecision(5) << rets[2] << "|"
        << std::setw(8) << std::setprecision(5) << rets[3] << "|"
        << std::setw(8) << std::setprecision(5) << rets[4] << "|"
        << std::setw(8) << std::setprecision(5) << rets[5] << "|"
        << std::setw(8) << std::setprecision(5) << rets[6] << "|"
        << std::setw(8) << std::setprecision(5) << rets[7];// << "|"
        //<< std::setw(8) << std::setprecision(5) << rets[8] << "|"
        //<< std::setw(8) << std::setprecision(5) << rets[9]; 
    return ss.str(); 
  } 
  void MultiImageDataLayer<Dtype>::DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
                                             const vector<Blob<Dtype>*>& top) {
    const int new_height = this->layer_param_.multi_image_data_param().new_height();
    const int new_width  = this->layer_param_.multi_image_data_param().new_width();
    const bool is_color  = this->layer_param_.multi_image_data_param().is_color();
    string root_folder = this->layer_param_.multi_image_data_param().root_folder();
    const int num_images = this->layer_param_.multi_image_data_param().num_images();
    
    CHECK((new_height == 0 && new_width == 0) ||
          (new_height > 0 && new_width > 0)) << "Current implementation requires "
      "new_height and new_width to be set at the same time.";
    CHECK_GT(num_images, 0) << "The number of images should be positive.";
    // Read the file with filenames and labels
    const string& source = this->layer_param_.multi_image_data_param().source();
    LOG(INFO) << "Opening file " << source;
    std::ifstream infile(source.c_str());
    std::string line;
    while (std::getline(infile, line)) {
      std::istringstream iss(line);
      std::vector<string> filenames(num_images);
      for (int image_index = 0; image_index < num_images; image_index++)
        iss >> filenames[image_index];
      int label;
      iss >> label;
      lines_.push_back(std::make_pair(filenames, label));
    }

    if (this->layer_param_.multi_image_data_param().shuffle()) {
      // randomly shuffle data
      LOG(INFO) << "Shuffling data";
      const unsigned int prefetch_rng_seed = caffe_rng_rand();
      prefetch_rng_.reset(new Caffe::RNG(prefetch_rng_seed));
      ShuffleImages();
    }
    LOG(INFO) << "A total of " << lines_.size() << " x " << num_images << " images.";

    lines_id_ = 0;
    // Check if we would need to randomly skip a few data points
    if (this->layer_param_.multi_image_data_param().rand_skip()) {
      unsigned int skip = caffe_rng_rand() %
        this->layer_param_.multi_image_data_param().rand_skip();
      LOG(INFO) << "Skipping first " << skip << " data points.";
      CHECK_GT(lines_.size(), skip) << "Not enough points to skip";
      lines_id_ = skip;
    }
    // Read an image, and use it to initialize the top blob.
    CHECK(lines_[lines_id_].first.size()) << "There is no image in the first line.";
    cv::Mat cv_img = ReadImageToCVMat(root_folder + *lines_[lines_id_].first.begin(),
                                      new_height, new_width, is_color);
    CHECK(cv_img.data) << "Could not load " << *lines_[lines_id_].first.begin();
    // Use data_transformer to infer the expected blob shape from a cv_image.
    vector<int> top_shape = this->data_transformer_->InferBlobShape(cv_img);
    this->transformed_data_.Reshape(top_shape);
    top_shape[1] *= num_images;
    // Reshape prefetch_data and top[0] according to the batch_size.
    const int batch_size = this->layer_param_.multi_image_data_param().batch_size();
    CHECK_GT(batch_size, 0) << "Positive batch size required";
    top_shape[0] = batch_size;
    for (int i = 0; i < this->PREFETCH_COUNT; ++i) {
      this->prefetch_[i].data_.Reshape(top_shape);
    }
    top[0]->Reshape(top_shape);

    LOG(INFO) << "output data size: " << top[0]->num() << ","
              << top[0]->channels() << "," << top[0]->height() << ","
              << top[0]->width();
    // label
    vector<int> label_shape(1, batch_size);
    top[1]->Reshape(label_shape);
    for (int i = 0; i < this->PREFETCH_COUNT; ++i) {
      this->prefetch_[i].label_.Reshape(label_shape);
    }
  }
  void MultiImageDataLayer<Dtype>::load_batch(Batch<Dtype>* batch) {
    CPUTimer batch_timer;
    batch_timer.Start();
    double read_time = 0;
    double trans_time = 0;
    CPUTimer timer;
    CHECK(batch->data_.count());
    CHECK(this->transformed_data_.count());
    MultiImageDataParameter multi_image_data_param = this->layer_param_.multi_image_data_param();
    const int batch_size = multi_image_data_param.batch_size();
    const int new_height = multi_image_data_param.new_height();
    const int new_width = multi_image_data_param.new_width();
    const bool is_color = multi_image_data_param.is_color();
    string root_folder = multi_image_data_param.root_folder();
    const int num_images = this->layer_param_.multi_image_data_param().num_images();
    
    // Reshape according to the first image of each batch
    // on single input batches allows for inputs of varying dimension.
    cv::Mat cv_img = ReadImageToCVMat(root_folder + *lines_[lines_id_].first.begin(),
                                      new_height, new_width, is_color);
    CHECK(cv_img.data) << "Could not load " << *lines_[lines_id_].first.begin();
    // Use data_transformer to infer the expected blob shape from a cv_img.
    vector<int> top_shape = this->data_transformer_->InferBlobShape(cv_img);
    this->transformed_data_.Reshape(top_shape);
    top_shape[1] *= num_images;
    // Reshape batch according to the batch_size.
    top_shape[0] = batch_size;
    batch->data_.Reshape(top_shape);

    Dtype* prefetch_data = batch->data_.mutable_cpu_data();
    Dtype* prefetch_label = batch->label_.mutable_cpu_data();

    // datum scales
    const int lines_size = lines_.size();
    for (int item_id = 0; item_id < batch_size; ++item_id) {
      // get a blob
      timer.Start();
      CHECK_GT(lines_size, lines_id_);
      
      if (this->layer_param_.multi_image_data_param().shuffle_images() == true) {
	caffe::rng_t* prefetch_rng =
	  static_cast<caffe::rng_t*>(prefetch_rng_->generator());
	shuffle(lines_[lines_id_].first.begin(), lines_[lines_id_].first.end(), prefetch_rng);
      }
      read_time += timer.MicroSeconds();
      timer.Start();
      for (int image_index = 0; image_index < num_images; image_index++) {
             cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first[image_index], new_height, new_width, is_color);
             CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first[image_index];
             // Apply transformations (mirror, crop...) to the image
             int offset = batch->data_.offset(item_id, image_index * cv_img.channels());
             this->transformed_data_.set_cpu_data(prefetch_data + offset);
             this->data_transformer_->Transform(cv_img, &(this->transformed_data_));	     
      }

      trans_time += timer.MicroSeconds();

      prefetch_label[item_id] = lines_[lines_id_].second;
      // go to the next iter
      lines_id_++;
      if (lines_id_ >= lines_size) {
        // We have reached the end. Restart from the first.
        DLOG(INFO) << "Restarting data prefetching from start.";
        lines_id_ = 0;
        if (this->layer_param_.multi_image_data_param().shuffle()) {
          ShuffleImages();
        }
      }
    }
    batch_timer.Stop();
    DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
    DLOG(INFO) << "     Read time: " << read_time / 1000 << " ms.";
    DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
  }
Beispiel #24
0
void CropLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
    const vector<Blob<Dtype>*>& top) {
  // Construct a map from top blobs to layer inds, skipping over in-place
  // connections.
  map<Blob<Dtype>*, int> down_map;
  for (int layer_ind = 0; layer_ind < this->net_->top_vecs().size();
       ++layer_ind) {
    vector<Blob<Dtype>*> tops = this->net_->top_vecs()[layer_ind];
    for (int top_ind = 0; top_ind < tops.size(); ++top_ind) {
      if (down_map.find(tops[top_ind]) == down_map.end()) {
        down_map[tops[top_ind]] = layer_ind;
      }
    }
  }
  // Walk back from the first bottom, keeping track of all the blobs we pass.
  set<Blob<Dtype>*> path_blobs;
  Blob<Dtype>* blob = bottom[0];
  int layer_ind;
  // TODO this logic can be simplified if all blobs are tops
  path_blobs.insert(blob);
  while (down_map.find(blob) != down_map.end()) {
    layer_ind = down_map[blob];
    if (this->net_->bottom_vecs()[layer_ind].size() == 0) {
      break;
    }
    blob = this->net_->bottom_vecs()[layer_ind][0];
    path_blobs.insert(blob);
  }
  // Now walk back from the second bottom, until we find a blob of intersection.
  Blob<Dtype>* inter_blob = bottom[1];
  while (path_blobs.find(inter_blob) == path_blobs.end()) {
    CHECK(down_map.find(inter_blob) != down_map.end())
        << "Cannot align apparently disconnected blobs.";
    layer_ind = down_map[inter_blob];
    CHECK_GT(this->net_->bottom_vecs()[layer_ind].size(), 0)
        << "Cannot align apparently disconnected blobs.";
    inter_blob = this->net_->bottom_vecs()[layer_ind][0];
  }
  // Compute the coord map from the blob of intersection to each bottom.
  vector<DiagonalAffineMap<Dtype> > coord_maps(2,
      DiagonalAffineMap<Dtype>::identity(2));
  for (int i = 0; i < 2; ++i) {
    for (Blob<Dtype>* blob = bottom[i]; blob != inter_blob;
         blob = this->net_->bottom_vecs()[down_map[blob]][0]) {
      shared_ptr<Layer<Dtype> > layer = this->net_->layers()[down_map[blob]];
      //std::cout<<i<<"} "<<coord_maps[i].coefs()[0].first<<","<<coord_maps[i].coefs()[0].second<<"; "<<coord_maps[i].coefs()[1].first<<","<<coord_maps[i].coefs()[1].second<<" compose with "<<layer->coord_map().coefs()[0].first<<","<<layer->coord_map().coefs()[0].first<<"; "<<layer->coord_map().coefs()[1].first<<","<<layer->coord_map().coefs()[1].first<<std::endl;
      coord_maps[i] = coord_maps[i].compose(layer->coord_map());
      //std::cout<<"    is "<<coord_maps[i].coefs()[0].first<<","<<coord_maps[i].coefs()[0].second<<"; "<<coord_maps[i].coefs()[1].first<<","<<coord_maps[i].coefs()[1].second<<std::endl;
    }
  }
  // Compute the mapping from first bottom coordinates to second.
  DiagonalAffineMap<Dtype> crop_map =
      coord_maps[1].compose(coord_maps[0].inv());
    /*std::cout<<"cood_maps[0]="<<coord_maps[0].coefs()[0].first<<","<<coord_maps[0].coefs()[0].second<<std::endl;
    std::cout<<"cood_maps[0]="<<coord_maps[0].coefs()[1].first<<","<<coord_maps[0].coefs()[1].second<<std::endl;
    std::cout<<"cood_maps[1]="<<coord_maps[1].coefs()[0].first<<","<<coord_maps[1].coefs()[0].second<<std::endl;
    std::cout<<"cood_maps[1]="<<coord_maps[1].coefs()[1].first<<","<<coord_maps[1].coefs()[1].second<<std::endl;
    std::cout<<"cood_map="<<crop_map.coefs()[0].first<<","<<crop_map.coefs()[0].second<<std::endl;
    std::cout<<"crop_map="<<crop_map.coefs()[1].first<<","<<crop_map.coefs()[1].second<<std::endl;*/
  for (int i = 0; i < 2; ++i) {
    // Check for scale mismatch (unfortunately, CHECK_DOUBLE_EQ does not
    // support a message like the other CHECKs).
    CHECK_DOUBLE_EQ(crop_map.coefs()[i].first, 1);
    CHECK_LE(crop_map.coefs()[i].second, 0) << "Negative crop width.";
    // Check that the crop width is an integer.
    CHECK_DOUBLE_EQ(crop_map.coefs()[i].second,
        round(crop_map.coefs()[i].second));
  }
  crop_h_ = - round(crop_map.coefs()[0].second);
  crop_w_ = - round(crop_map.coefs()[1].second);
}
void BaseConvolutionLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  // Configure the kernel size, padding, stride, and inputs.
  ConvolutionParameter conv_param = this->layer_param_.convolution_param();
  force_nd_im2col_ = conv_param.force_nd_im2col();
  channel_axis_ = bottom[0]->CanonicalAxisIndex(conv_param.axis());
  const int num_axes = bottom[0]->num_axes();
  if (num_axes == 5 && channel_axis_ == 1 && bottom[0]->shape(2) == 1) {
    forced_3d_ = true;
  } else {
    forced_3d_ = false;
  }
  const int first_spatial_axis = channel_axis_ + 1 + forced_3d_;
  num_spatial_axes_ = num_axes - first_spatial_axis;
  CHECK_GE(num_spatial_axes_, 0);
  vector<int> spatial_dim_blob_shape(1, std::max(num_spatial_axes_, 1));
  // Setup filter kernel dimensions (kernel_shape_).
  kernel_shape_.Reshape(spatial_dim_blob_shape);
  int* kernel_shape_data = kernel_shape_.mutable_cpu_data();
  if (conv_param.has_kernel_h() || conv_param.has_kernel_w()) {
    CHECK_EQ(num_spatial_axes_, 2)
        << "kernel_h & kernel_w can only be used for 2D convolution.";
    CHECK_EQ(0, conv_param.kernel_size_size())
        << "Either kernel_size or kernel_h/w should be specified; not both.";
    kernel_shape_data[0] = conv_param.kernel_h();
    kernel_shape_data[1] = conv_param.kernel_w();
  } else {
    const int num_kernel_dims = conv_param.kernel_size_size();
    CHECK(num_kernel_dims == 1 || num_kernel_dims == num_spatial_axes_)
        << "kernel_size must be specified once, or once per spatial dimension "
        << "(kernel_size specified " << num_kernel_dims << " times; "
        << num_spatial_axes_ << " spatial dims).";
      for (int i = 0; i < num_spatial_axes_; ++i) {
        kernel_shape_data[i] =
            conv_param.kernel_size((num_kernel_dims == 1) ? 0 : i);
      }
  }
  for (int i = 0; i < num_spatial_axes_; ++i) {
    CHECK_GT(kernel_shape_data[i], 0) << "Filter dimensions must be nonzero.";
  }
  // Setup stride dimensions (stride_).
  stride_.Reshape(spatial_dim_blob_shape);
  int* stride_data = stride_.mutable_cpu_data();
  if (conv_param.has_stride_h() || conv_param.has_stride_w()) {
    CHECK_EQ(num_spatial_axes_, 2)
        << "stride_h & stride_w can only be used for 2D convolution.";
    CHECK_EQ(0, conv_param.stride_size())
        << "Either stride or stride_h/w should be specified; not both.";
    stride_data[0] = conv_param.stride_h();
    stride_data[1] = conv_param.stride_w();
  } else {
    const int num_stride_dims = conv_param.stride_size();
    CHECK(num_stride_dims == 0 || num_stride_dims == 1 ||
          num_stride_dims == num_spatial_axes_)
        << "stride must be specified once, or once per spatial dimension "
        << "(stride specified " << num_stride_dims << " times; "
        << num_spatial_axes_ << " spatial dims).";
    const int kDefaultStride = 1;
    for (int i = 0; i < num_spatial_axes_; ++i) {
      stride_data[i] = (num_stride_dims == 0) ? kDefaultStride :
          conv_param.stride((num_stride_dims == 1) ? 0 : i);
      CHECK_GT(stride_data[i], 0) << "Stride dimensions must be nonzero.";
    }
  }
  // Setup pad dimensions (pad_).
  pad_.Reshape(spatial_dim_blob_shape);
  int* pad_data = pad_.mutable_cpu_data();
  if (conv_param.has_pad_h() || conv_param.has_pad_w()) {
    CHECK_EQ(num_spatial_axes_, 2)
        << "pad_h & pad_w can only be used for 2D convolution.";
    CHECK_EQ(0, conv_param.pad_size())
        << "Either pad or pad_h/w should be specified; not both.";
    pad_data[0] = conv_param.pad_h();
    pad_data[1] = conv_param.pad_w();
  } else {
    const int num_pad_dims = conv_param.pad_size();
    CHECK(num_pad_dims == 0 || num_pad_dims == 1 ||
          num_pad_dims == num_spatial_axes_)
        << "pad must be specified once, or once per spatial dimension "
        << "(pad specified " << num_pad_dims << " times; "
        << num_spatial_axes_ << " spatial dims).";
    const int kDefaultPad = 0;
    for (int i = 0; i < num_spatial_axes_; ++i) {
      pad_data[i] = (num_pad_dims == 0) ? kDefaultPad :
          conv_param.pad((num_pad_dims == 1) ? 0 : i);
    }
  }
  // Setup dilation dimensions (dilation_).
  dilation_.Reshape(spatial_dim_blob_shape);
  int* dilation_data = dilation_.mutable_cpu_data();
  const int num_dilation_dims = conv_param.dilation_size();
  CHECK(num_dilation_dims == 0 || num_dilation_dims == 1 ||
        num_dilation_dims == num_spatial_axes_)
      << "dilation must be specified once, or once per spatial dimension "
      << "(dilation specified " << num_dilation_dims << " times; "
      << num_spatial_axes_ << " spatial dims).";
  const int kDefaultDilation = 1;
  for (int i = 0; i < num_spatial_axes_; ++i) {
    dilation_data[i] = (num_dilation_dims == 0) ? kDefaultDilation :
                       conv_param.dilation((num_dilation_dims == 1) ? 0 : i);
  }
  // Special case: im2col is the identity for 1x1 convolution with stride 1
  // and no padding, so flag for skipping the buffer and transformation.
  is_1x1_ = true;
  for (int i = 0; i < num_spatial_axes_; ++i) {
    is_1x1_ &=
        kernel_shape_data[i] == 1 && stride_data[i] == 1 && pad_data[i] == 0;
    if (!is_1x1_) { break; }
  }
  // Configure output channels and groups.
  channels_ = bottom[0]->shape(channel_axis_);
  num_output_ = this->layer_param_.convolution_param().num_output();
  CHECK_GT(num_output_, 0);
  group_ = this->layer_param_.convolution_param().group();
  CHECK_EQ(channels_ % group_, 0);
  CHECK_EQ(num_output_ % group_, 0)
      << "Number of output should be multiples of group.";
  if (reverse_dimensions()) {
    conv_out_channels_ = channels_;
    conv_in_channels_ = num_output_;
  } else {
    conv_out_channels_ = num_output_;
    conv_in_channels_ = channels_;
  }
  // Handle the parameters: weights and biases.
  // - blobs_[0] holds the filter weights
  // - blobs_[1] holds the biases (optional)
  vector<int> weight_shape(2);
  weight_shape[0] = conv_out_channels_;
  weight_shape[1] = conv_in_channels_ / group_;
  for (int i = 0; i < num_spatial_axes_; ++i) {
    weight_shape.push_back(kernel_shape_data[i]);
  }
  bias_term_ = this->layer_param_.convolution_param().bias_term();
  vector<int> bias_shape(bias_term_, num_output_);
  if (this->blobs_.size() > 0) {
    CHECK_EQ(1 + bias_term_, this->blobs_.size())
        << "Incorrect number of weight blobs.";
    // true_blob_shape is original blob_shape (n,c,h,w) in case of forced_3d_
    // where blob_shape is expanded to (n,c,1,h,w)
    vector<int> true_blob_shape = this->blobs_[0]->shape();
    if (forced_3d_) true_blob_shape.erase(true_blob_shape.begin()+2);
    if (weight_shape != true_blob_shape) {
      Blob<Dtype> weight_shaped_blob(weight_shape);
      LOG(FATAL) << "Incorrect weight shape: expected shape "
          << weight_shaped_blob.shape_string() << "; instead, shape was "
          << this->blobs_[0]->shape_string();
    }
    if (bias_term_ && bias_shape != this->blobs_[1]->shape()) {
      Blob<Dtype> bias_shaped_blob(bias_shape);
      LOG(FATAL) << "Incorrect bias shape: expected shape "
          << bias_shaped_blob.shape_string() << "; instead, shape was "
          << this->blobs_[1]->shape_string();
    }
    LOG(INFO) << "Skipping parameter initialization";
  } else {
    if (bias_term_) {
      this->blobs_.resize(2);
    } else {
      this->blobs_.resize(1);
    }
    // Initialize and fill the weights:
    // output channels x input channels per-group x kernel height x kernel width
    this->blobs_[0].reset(new Blob<Dtype>(weight_shape));
    shared_ptr<Filler<Dtype> > weight_filler(GetFiller<Dtype>(
        this->layer_param_.convolution_param().weight_filler()));
    weight_filler->Fill(this->blobs_[0].get());
    // If necessary, initialize and fill the biases.
    if (bias_term_) {
      this->blobs_[1].reset(new Blob<Dtype>(bias_shape));
      shared_ptr<Filler<Dtype> > bias_filler(GetFiller<Dtype>(
          this->layer_param_.convolution_param().bias_filler()));
      bias_filler->Fill(this->blobs_[1].get());
    }
  }
  kernel_dim_ = this->blobs_[0]->count(1);
  weight_offset_ = conv_out_channels_ * kernel_dim_ / group_;
  // Propagate gradients to the parameters (as directed by backward pass).
  this->param_propagate_down_.resize(this->blobs_.size(), true);
}
void ImageLabelmapDataLayer<Dtype>::load_batch(LabelmapBatch<Dtype>* batch) {
  CPUTimer batch_timer;
  batch_timer.Start();
  double read_time = 0;
  double trans_time = 0;
  CPUTimer timer;
  CHECK(batch->data_.count());
  CHECK(batch->labelmap_.count());
  CHECK(this->transformed_data_.count());
  CHECK(this->transformed_labelmap_.count());
  ImageDataParameter image_data_param = this->layer_param_.image_data_param();
  const int batch_size = image_data_param.batch_size();
  const int new_height = image_data_param.new_height();
  const int new_width = image_data_param.new_width();
  const bool is_color = image_data_param.is_color();
  string root_folder = image_data_param.root_folder();

  // Reshape according to the first image of each batch
  // on single input batches allows for inputs of varying dimension.
  cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first,
      new_height, new_width, is_color);
  cv::Mat cv_gt = ReadImageToCVMat(root_folder + lines_[lines_id_].second,
      new_height, new_width, 0);
  CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first;
  // Use data_transformer to infer the expected blob shape from a cv_img.
  vector<int> top_shape = this->data_transformer_->InferBlobShape(cv_img);
  vector<int> top_shape_labelmap = this->data_transformer_->InferBlobShape(cv_gt);
  
  this->transformed_data_.Reshape(top_shape);
  this->transformed_labelmap_.Reshape(top_shape_labelmap);
  // Reshape prefetch_data and top[0] according to the batch_size.
  top_shape[0] = batch_size;
  top_shape_labelmap[0] = batch_size;
  
  batch->data_.Reshape(top_shape);
  batch->labelmap_.Reshape(top_shape_labelmap);

  Dtype* prefetch_data = batch->data_.mutable_cpu_data();
  Dtype* prefetch_labelmap = batch->labelmap_.mutable_cpu_data();

  // datum scales
  const int lines_size = lines_.size();
  for (int item_id = 0; item_id < batch_size; ++item_id) {
    // get a blob
    timer.Start();
    CHECK_GT(lines_size, lines_id_);
    cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first,
                                    0, 0, is_color);
    cv::Mat cv_gt = ReadImageToCVMat(root_folder + lines_[lines_id_].second,
                                    0, 0, 0);

    CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first;

    const int height = cv_img.rows;
    const int width = cv_img.cols;
    const int gt_channels = cv_gt.channels();
    const int gt_height = cv_gt.rows;
    const int gt_width = cv_gt.cols;

    CHECK((height == gt_height) && (width == gt_width)) << "GT image size should be equal to true image size";
    CHECK(gt_channels == 1) << "GT image channel number should be 1";
 
    if (new_height > 0 && new_width > 0) {
        cv::resize(cv_img, cv_img, cv::Size(new_width, new_height));
        cv::resize(cv_gt, cv_gt, cv::Size(new_width, new_height), 0, 0, cv::INTER_LINEAR);
    }

    if (!cv_img.data || !cv_gt.data) {
      continue;
    }


    read_time += timer.MicroSeconds();
    timer.Start();
    // Apply transformations (mirror, crop...) to the image
    int offset = batch->data_.offset(item_id);
    int offset_gt = batch->labelmap_.offset(item_id);
    //CHECK(offset == offset_gt) << "fetching should be synchronized";
    this->transformed_data_.set_cpu_data(prefetch_data + offset);
    this->transformed_labelmap_.set_cpu_data(prefetch_labelmap + offset_gt);
    std::pair<int, int> hw_off = this->data_transformer_->LocTransform(cv_img, &(this->transformed_data_));
    
    cv::Mat encoded_gt;
    //regression
    encoded_gt = cv_gt/255;
    //[***Cautions***]
    //One small trick leveraging opencv roundoff feature for **consensus sampling** in Holistically-Nested Edge Detection paper.
    //For general binary edge maps this is okay
    //For 5-subject aggregated edge maps (BSDS), this will abandon weak edge points labeled by only two or less labelers.

    this->data_transformer_->LabelmapTransform(encoded_gt, &(this->transformed_labelmap_), hw_off);
    
    trans_time += timer.MicroSeconds();

    // go to the next iter
    lines_id_++;
    if (lines_id_ >= lines_size) {
      // We have reached the end. Restart from the first.
      DLOG(INFO) << "Restarting data prefetching from start.";
      lines_id_ = 0;
      if (this->layer_param_.image_data_param().shuffle()) {
        ShuffleImages();
      }
    }
  }
  batch_timer.Stop();
  DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
  DLOG(INFO) << "     Read time: " << read_time / 1000 << " ms.";
  DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
Beispiel #27
0
void FloDataLayer<Dtype>::load_batch(Batch<Dtype>* batch) {
  CPUTimer batch_timer;
  batch_timer.Start();
  double read_time = 0;
  double trans_time = 0;
  CPUTimer timer;
  CHECK(batch->data_.count());
  CHECK(this->transformed_data_.count());
  ImageDataParameter image_data_param = this->layer_param_.image_data_param();
  const int batch_size = image_data_param.batch_size();
  string root_folder = image_data_param.root_folder();

  // Reshape according to the first image of each batch
  // on single input batches allows for inputs of varying dimension.
  int xSize, ySize;
  CHECK(readFloFile(root_folder + lines_[lines_id_].first, NULL, xSize, ySize))
      << "Could not load " << lines_[lines_id_].first;
  
  // Use data_transformer to infer the expected blob shape from a cv_img.
  vector<int> top_shape = vector<int>(4);
  top_shape[0] = 1;
  top_shape[1] = 2;
  top_shape[2] = ySize;
  top_shape[3] = xSize;
  
  //this->transformed_data_.Reshape(top_shape);
  
  // Reshape batch according to the batch_size.
  top_shape[0] = batch_size;
  batch->data_.Reshape(top_shape);

  Dtype* prefetch_data = batch->data_.mutable_cpu_data();

  // datum scales
  const int lines_size = lines_.size();
  for (int item_id = 0; item_id < batch_size; ++item_id) {
    // get a blob
    timer.Start();
    CHECK_GT(lines_size, lines_id_);
    
    read_time += timer.MicroSeconds();
    timer.Start();
    // Apply transformations (mirror, crop...) to the image
    int offset = batch->data_.offset(item_id);
    //this->transformed_data_.set_cpu_data(prefetch_data + offset);
    
    CHECK(readFloFile(root_folder + lines_[lines_id_].first, prefetch_data + offset, xSize, ySize))
        << "Could not load " << lines_[lines_id_].first;
    
    //this->data_transformer_->Transform(cv_img, &(this->transformed_data_));
    
    trans_time += timer.MicroSeconds();

    // go to the next iter
    lines_id_++;
    if (lines_id_ >= lines_size) {
      // We have reached the end. Restart from the first.
      DLOG(INFO) << "Restarting data prefetching from start.";
      lines_id_ = 0;
      if (this->layer_param_.image_data_param().shuffle()) {
        ShuffleImages();
      }
    }
  }
  batch_timer.Stop();
  DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
  DLOG(INFO) << "     Read time: " << read_time / 1000 << " ms.";
  DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
void ImageLabelmapDataLayer<Dtype>::DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  const int new_height = this->layer_param_.image_data_param().new_height();
  const int new_width  = this->layer_param_.image_data_param().new_width();
  const bool is_color  = this->layer_param_.image_data_param().is_color();
  string root_folder = this->layer_param_.image_data_param().root_folder();

  CHECK((new_height == 0 && new_width == 0) ||
      (new_height > 0 && new_width > 0)) << "Current implementation requires "
      "new_height and new_width to be set at the same time.";
  // Read the file with filenames and labels
  const string& source = this->layer_param_.image_data_param().source();
  LOG(INFO) << "Opening file " << source;
  std::ifstream infile(source.c_str());
  string img_filename;
  string gt_filename;
  while (infile >> img_filename >> gt_filename) {
    lines_.push_back(std::make_pair(img_filename, gt_filename));
  }

  if (this->layer_param_.image_data_param().shuffle()) {
    // randomly shuffle data
    LOG(INFO) << "Shuffling data";
    const unsigned int prefetch_rng_seed = caffe_rng_rand();
    prefetch_rng_.reset(new Caffe::RNG(prefetch_rng_seed));
    ShuffleImages();
  }
  LOG(INFO) << "A total of " << lines_.size() << " images.";

  lines_id_ = 0;
  // Check if we would need to randomly skip a few data points
  if (this->layer_param_.image_data_param().rand_skip()) {
    unsigned int skip = caffe_rng_rand() %
        this->layer_param_.image_data_param().rand_skip();
    LOG(INFO) << "Skipping first " << skip << " data points.";
    CHECK_GT(lines_.size(), skip) << "Not enough points to skip";
    lines_id_ = skip;
  }
  // Read an image, and use it to initialize the top blob.
  cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first,
                                    new_height, new_width, is_color);

  cv::Mat cv_gt = ReadImageToCVMat(root_folder + lines_[lines_id_].second,
                                    new_height, new_width, 0);

  //const int channels = cv_img.channels(); 
  const int height = cv_img.rows; 
  const int width = cv_img.cols; 
   
  const int gt_channels = cv_gt.channels(); 
  const int gt_height = cv_gt.rows; 
  const int gt_width = cv_gt.cols; 
 
  CHECK((height == gt_height) && (width == gt_width)) << "groundtruth size != image size"; 
  CHECK(gt_channels == 1) << "GT image channel number should be 1";
 
  CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first;
  
  if (new_height > 0 && new_width > 0) {
    cv::resize(cv_img, cv_img, cv::Size(new_width, new_height));
    cv::resize(cv_gt, cv_gt, cv::Size(new_width, new_height));
  }

  // Use data_transformer to infer the expected blob shape from a cv_image.
  vector<int> top_shape = this->data_transformer_->InferBlobShape(cv_img);
  vector<int> top_shape_labelmap = this->data_transformer_->InferBlobShape(cv_gt);
  
  this->transformed_data_.Reshape(top_shape);
  this->transformed_labelmap_.Reshape(top_shape_labelmap);
  // Reshape prefetch_data and top[0] according to the batch_size.
  const int batch_size = this->layer_param_.image_data_param().batch_size();
  CHECK_GT(batch_size, 0) << "Positive batch size required";
  top_shape[0] = batch_size;
  top_shape_labelmap[0] = batch_size;
  for (int i = 0; i < this->PREFETCH_COUNT; ++i) {
    this->prefetch_[i].data_.Reshape(top_shape);
    this->prefetch_[i].labelmap_.Reshape(top_shape_labelmap);
  }
  top[0]->Reshape(top_shape);
  top[1]->Reshape(top_shape_labelmap);

  LOG(INFO) << "output data size: " << top[0]->num() << ","
      << top[0]->channels() << "," << top[0]->height() << ","
      << top[0]->width();
  LOG(INFO) << "output label size: " << top[1]->num() << ","
      << top[1]->channels() << "," << top[1]->height() << ","
      << top[1]->width();
}
Beispiel #29
0
void DataTransformer<Dtype>::Transform(const Datum& datum,
                                       Dtype* transformed_data) {
  const string& data = datum.data();
  const int datum_channels = datum.channels();
  const int datum_height = datum.height();
  const int datum_width = datum.width();

  const int crop_size = param_.crop_size();
  const Dtype scale = param_.scale();
  const bool do_mirror = param_.mirror() && Rand(2);
  const bool has_mean_file = param_.has_mean_file();
  const bool has_uint8 = data.size() > 0;
  const bool has_mean_values = mean_values_.size() > 0;

  CHECK_GT(datum_channels, 0);
  CHECK_GE(datum_height, crop_size);
  CHECK_GE(datum_width, crop_size);

  Dtype* mean = NULL;
  if (has_mean_file) {
    CHECK_EQ(datum_channels, data_mean_.channels());
    CHECK_EQ(datum_height, data_mean_.height());
    CHECK_EQ(datum_width, data_mean_.width());
    mean = data_mean_.mutable_cpu_data();
  }
  if (has_mean_values) {
    CHECK(mean_values_.size() == 1 || mean_values_.size() == datum_channels) <<
     "Specify either 1 mean_value or as many as channels: " << datum_channels;
    if (datum_channels > 1 && mean_values_.size() == 1) {
      // Replicate the mean_value for simplicity
      for (int c = 1; c < datum_channels; ++c) {
        mean_values_.push_back(mean_values_[0]);
      }
    }
  }

  int height = datum_height;
  int width = datum_width;

  int h_off = 0;
  int w_off = 0;
  if (crop_size) {
    height = crop_size;
    width = crop_size;
    // We only do random crop when we do training.
    if (phase_ == TRAIN) {
      h_off = Rand(datum_height - crop_size + 1);
      w_off = Rand(datum_width - crop_size + 1);
    } else {
      h_off = (datum_height - crop_size) / 2;
      w_off = (datum_width - crop_size) / 2;
    }
  }

  Dtype datum_element;
  int top_index, data_index;
  for (int c = 0; c < datum_channels; ++c) {
    for (int h = 0; h < height; ++h) {
      for (int w = 0; w < width; ++w) {
        data_index = (c * datum_height + h_off + h) * datum_width + w_off + w;
        if (do_mirror) {
          top_index = (c * height + h) * width + (width - 1 - w);
        } else {
          top_index = (c * height + h) * width + w;
        }
        if (has_uint8) {
          datum_element =
            static_cast<Dtype>(static_cast<uint8_t>(data[data_index]));
        } else {
          datum_element = datum.float_data(data_index);
        }
        if (has_mean_file) {
          transformed_data[top_index] =
            (datum_element - mean[data_index]) * scale;
        } else {
          if (has_mean_values) {
            transformed_data[top_index] =
              (datum_element - mean_values_[c]) * scale;
          } else {
            transformed_data[top_index] = datum_element * scale;
          }
        }
      }
    }
  }
}
void MultiStageMeanfieldLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {

  init_cpu = false;
  init_gpu = false;
  const caffe::MultiStageMeanfieldParameter meanfield_param = this->layer_param_.multi_stage_meanfield_param();

  num_iterations_ = meanfield_param.num_iterations();

  CHECK_GT(num_iterations_, 1) << "Number of iterations must be greater than 1.";

  theta_alpha_ = meanfield_param.theta_alpha();
  theta_beta_ = meanfield_param.theta_beta();
  theta_gamma_ = meanfield_param.theta_gamma();

  count_ = bottom[0]->count();
  num_ = bottom[0]->num();
  channels_ = bottom[0]->channels();
  height_ = bottom[0]->height();
  width_ = bottom[0]->width();
  num_pixels_ = height_ * width_;

  LOG(INFO) << "This implementation has not been tested batch size > 1.";

  top[0]->Reshape(num_, channels_, height_, width_);

  // Initialize the parameters that will updated by backpropagation.
  if (this->blobs_.size() > 0) {
    LOG(INFO) << "Multimeanfield layer skipping parameter initialization.";
  } else {

    this->blobs_.resize(3);// blobs_[0] - spatial kernel weights, blobs_[1] - bilateral kernel weights, blobs_[2] - compatability matrix

    // Allocate space for kernel weights.
    this->blobs_[0].reset(new Blob<Dtype>(1, 1, channels_, channels_));
    this->blobs_[1].reset(new Blob<Dtype>(1, 1, channels_, channels_));

    caffe_set(channels_ * channels_, Dtype(0.), this->blobs_[0]->mutable_cpu_data());
    caffe_set(channels_ * channels_, Dtype(0.), this->blobs_[1]->mutable_cpu_data());

    // Initialize the kernels weights. The two files spatial.par and bilateral.par should be available.
    FILE * pFile;
    pFile = fopen("spatial.par", "r");
    CHECK(pFile) << "The file 'spatial.par' is not found. Please create it with initial spatial kernel weights.";
    for (int i = 0; i < channels_; i++) {
      fscanf(pFile, "%lf", &this->blobs_[0]->mutable_cpu_data()[i * channels_ + i]);
    }
    fclose(pFile);

    pFile = fopen("bilateral.par", "r");
    CHECK(pFile) << "The file 'bilateral.par' is not found. Please create it with initial bilateral kernel weights.";
    for (int i = 0; i < channels_; i++) {
      fscanf(pFile, "%lf", &this->blobs_[1]->mutable_cpu_data()[i * channels_ + i]);
    }
    fclose(pFile);

    // Initialize the compatibility matrix.
    this->blobs_[2].reset(new Blob<Dtype>(1, 1, channels_, channels_));
    caffe_set(channels_ * channels_, Dtype(0.), this->blobs_[2]->mutable_cpu_data());

    // Initialize it to have the Potts model.
    for (int c = 0; c < channels_; ++c) {
      (this->blobs_[2]->mutable_cpu_data())[c * channels_ + c] = Dtype(-1.);
    }
  }

  float spatial_kernel[2 * num_pixels_];
  float *spatial_kernel_gpu_;
  compute_spatial_kernel(spatial_kernel);
  spatial_lattice_.reset(new ModifiedPermutohedral());
  spatial_norm_.Reshape(1, 1, height_, width_);
  Dtype* norm_data_gpu ;
  Dtype*  norm_data;
  // Initialize the spatial lattice. This does not need to be computed for every image because we use a fixed size.
  switch (Caffe::mode()) {
    case Caffe::CPU:
      norm_data = spatial_norm_.mutable_cpu_data();
      spatial_lattice_->init(spatial_kernel, 2, width_, height_);
      // Calculate spatial filter normalization factors.
      norm_feed_= new Dtype[num_pixels_];
      caffe_set(num_pixels_, Dtype(1.0), norm_feed_);
      // pass norm_feed and norm_data to gpu
      spatial_lattice_->compute(norm_data, norm_feed_, 1);
      bilateral_kernel_buffer_ = new float[5 * num_pixels_];
      init_cpu = true;
      break;
    #ifndef CPU_ONLY
    case Caffe::GPU:
      CUDA_CHECK(cudaMalloc((void**)&spatial_kernel_gpu_, 2*num_pixels_ * sizeof(float))) ;
      CUDA_CHECK(cudaMemcpy(spatial_kernel_gpu_, spatial_kernel, 2*num_pixels_ * sizeof(float), cudaMemcpyHostToDevice)) ;
      spatial_lattice_->init(spatial_kernel_gpu_, 2, width_, height_);
      CUDA_CHECK(cudaMalloc((void**)&norm_feed_, num_pixels_ * sizeof(Dtype))) ;
      caffe_gpu_set(num_pixels_, Dtype(1.0), norm_feed_);
      norm_data_gpu = spatial_norm_.mutable_gpu_data();
      spatial_lattice_->compute(norm_data_gpu, norm_feed_, 1); 
      norm_data = spatial_norm_.mutable_cpu_data();
      CUDA_CHECK(cudaMalloc((void**)&bilateral_kernel_buffer_, 5 * num_pixels_ * sizeof(float))) ;
      CUDA_CHECK(cudaFree(spatial_kernel_gpu_));
      init_gpu = true;
      break;
    #endif
    default:
    LOG(FATAL) << "Unknown caffe mode.";
  }
  
  for (int i = 0; i < num_pixels_; ++i) {
    norm_data[i] = 1.0f / (norm_data[i] + 1e-20f);
  }
  bilateral_norms_.Reshape(num_, 1, height_, width_);  

  // Configure the split layer that is used to make copies of the unary term. One copy for each iteration.
  // It may be possible to optimize this calculation later.
  split_layer_bottom_vec_.clear();
  split_layer_bottom_vec_.push_back(bottom[0]);

  split_layer_top_vec_.clear();

  split_layer_out_blobs_.resize(num_iterations_);
  for (int i = 0; i < num_iterations_; i++) {
    split_layer_out_blobs_[i].reset(new Blob<Dtype>());
    split_layer_top_vec_.push_back(split_layer_out_blobs_[i].get());
  }

  LayerParameter split_layer_param;
  split_layer_.reset(new SplitLayer<Dtype>(split_layer_param));
  split_layer_->SetUp(split_layer_bottom_vec_, split_layer_top_vec_);

  // Make blobs to store outputs of each meanfield iteration. Output of the last iteration is stored in top[0].
  // So we need only (num_iterations_ - 1) blobs.
  iteration_output_blobs_.resize(num_iterations_ - 1);
  for (int i = 0; i < num_iterations_ - 1; ++i) {
    iteration_output_blobs_[i].reset(new Blob<Dtype>(num_, channels_, height_, width_));
  }
  // Make instances of MeanfieldIteration and initialize them.
  meanfield_iterations_.resize(num_iterations_);
  for (int i = 0; i < num_iterations_; ++i) {
    meanfield_iterations_[i].reset(new MeanfieldIteration<Dtype>());
    meanfield_iterations_[i]->OneTimeSetUp(
        split_layer_out_blobs_[i].get(), // unary terms
        (i == 0) ? bottom[1] : iteration_output_blobs_[i - 1].get(), // softmax input
        (i == num_iterations_ - 1) ? top[0] : iteration_output_blobs_[i].get(), // output blob
        spatial_lattice_, // spatial lattice
        &spatial_norm_); // spatial normalization factors.
  }
  this->param_propagate_down_.resize(this->blobs_.size(), true);
  LOG(INFO) << ("MultiStageMeanfieldLayer initialized.");
}