Ejemplo n.º 1
0
TEST_F(CommonTest, TestRandSeedCPU) {
  SyncedMemory data_a(10 * sizeof(int), Caffe::GetDefaultDevice());
  SyncedMemory data_b(10 * sizeof(int), Caffe::GetDefaultDevice());
  Caffe::set_random_seed(1701, Caffe::GetDefaultDevice());
  caffe_rng_bernoulli(10, 0.5, static_cast<int*>(data_a.mutable_cpu_data()));

  Caffe::set_random_seed(1701, Caffe::GetDefaultDevice());
  caffe_rng_bernoulli(10, 0.5, static_cast<int*>(data_b.mutable_cpu_data()));

  for (int i = 0; i < 10; ++i) {
    EXPECT_EQ(static_cast<const int*>(data_a.cpu_data())[i],
        static_cast<const int*>(data_b.cpu_data())[i]);
  }
}
Ejemplo n.º 2
0
void DropoutLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
    const vector<Blob<Dtype>*>& top) {
  const Dtype* bottom_data = bottom[0]->cpu_data();
  Dtype* top_data = top[0]->mutable_cpu_data();
  unsigned int* mask = rand_vec_.mutable_cpu_data();
  const int count = bottom[0]->count();
  if (this->phase_ == TRAIN) {
    // Create random numbers
    caffe_rng_bernoulli(count, 1. - threshold_, mask);
    if (scale_train_) {
      for (int i = 0; i < count; ++i) {
        top_data[i] = bottom_data[i] * mask[i] * scale_;
      }
    } else {
      for (int i = 0; i < count; ++i) {
        top_data[i] = bottom_data[i] * mask[i];
      }
    }
  } else {
    caffe_copy(bottom[0]->count(), bottom_data, top_data);
    if (!scale_train_) {
      caffe_scal<Dtype>(  count, 1. / scale_, top_data);
    }
  }
}
Ejemplo n.º 3
0
 virtual void Fill(Blob<Dtype>* blob) {
     Dtype* data = blob->mutable_cpu_data();
     CHECK(blob->count());
     caffe_rng_gaussian<Dtype>(blob->count(), Dtype(this->filler_param_.mean()),
                               Dtype(this->filler_param_.std()),
                               blob->mutable_cpu_data());
     int sparse = this->filler_param_.sparse();
     CHECK_GE(sparse, -1);
     if (sparse >= 0) {
         // Sparse initialization is implemented for "weight" blobs; i.e. matrices.
         // These have num == channels == 1; width is number of inputs; height is
         // number of outputs.  The 'sparse' variable specifies the mean number
         // of non-zero input weights for a given output.
         CHECK_GE(blob->num_axes(), 1);
         const int num_outputs = blob->shape(0);
         Dtype non_zero_probability = Dtype(sparse) / Dtype(num_outputs);
         rand_vec_.reset(
             new SyncedMemory(blob->count() * sizeof(int),
                              blob->get_device()));
         int* mask = reinterpret_cast<int*>(rand_vec_->mutable_cpu_data());
         caffe_rng_bernoulli(blob->count(), non_zero_probability, mask);
         for (int i = 0; i < blob->count(); ++i) {
             data[i] *= mask[i];
         }
     }
 }
Ejemplo n.º 4
0
void DropoutLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
    const vector<Blob<Dtype>*>& top) {
  const Dtype* bottom_data = bottom[0]->cpu_data();
  Dtype* top_data = top[0]->mutable_cpu_data();
  unsigned int* mask = rand_vec_.mutable_cpu_data();
  const int count = bottom[0]->count();
  if (this->phase_ == TRAIN || this->layer_param_.dropout_param().sample_weights_test()) {
    // Create random numbers
    caffe_rng_bernoulli(count, 1. - threshold_, mask);
    for (int i = 0; i < count; ++i) {
      top_data[i] = bottom_data[i] * mask[i] * scale_;
    }
  } else {
    caffe_copy(bottom[0]->count(), bottom_data, top_data);
  }
}
Ejemplo n.º 5
0
void DropoutLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
    const vector<Blob<Dtype>*>& top) {
  const Dtype* bottom_data = bottom[0]->cpu_data();
  Dtype* top_data = top[0]->mutable_cpu_data();
  Dtype* mask = rand_vec_->mutable_cpu_data();
  const int count = rand_vec_->count();
  if (this->phase_ == TRAIN) {
	  switch (drop_type_){
	  case DropoutParameter_DropType_BERNOULLI:
	  {
	    // Create random numbers
	    caffe_rng_bernoulli(count, Dtype(1. - threshold_), mask);
	    break;
	  }
	  case DropoutParameter_DropType_GAUSSIAN:
	  {
	   caffe_rng_gaussian(count, Dtype(mu_), Dtype(sigma_), mask);
	   // clip to be in [0,1]
	   for (int i = 0; i < rand_vec_->count(); ++i){
	  	 Dtype m = mask[i];
	  	 mask[i] = m > 1 ? 1 : (m < 0 ? 0 : m);
	   }
	   break;
	  }
	  case DropoutParameter_DropType_UNIFORM:
	  {
	    caffe_rng_uniform(count, Dtype(a_), Dtype(b_), mask);
		break;
	  }
	  }
	  if (drop_batch_){
		  Dtype drop = mask[0];
		  caffe_copy(top[0]->count(), bottom_data, top_data);
		  caffe_scal(top[0]->count(), Dtype(scale_ * drop), top_data);
	  }
	  else{
		  vector<Blob<Dtype>*> scale_bottom(2, NULL);
		  scale_bottom[0] = bottom[0];
		  scale_bottom[1] = rand_vec_;
		  const vector<Blob<Dtype>*> scale_top(1, top[0]);
		  scale_layer_->Forward(scale_bottom, scale_top);
		  caffe_scal(top[0]->count(), scale_, top_data);
	  }
  } else {
    caffe_copy(bottom[0]->count(), bottom_data, top_data);
  }
}
Ejemplo n.º 6
0
Dtype DropoutLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
    vector<Blob<Dtype>*>* top) {
  const Dtype* bottom_data = bottom[0]->cpu_data();
  Dtype* top_data = (*top)[0]->mutable_cpu_data();
  unsigned int* mask = rand_vec_.mutable_cpu_data();
  const int count = bottom[0]->count();
  if (Caffe::phase() == Caffe::TRAIN) {
    // Create random numbers
    caffe_rng_bernoulli(count, 1. - threshold_, mask);
    for (int i = 0; i < count; ++i) {
      top_data[i] = bottom_data[i] * mask[i] * scale_;
    }
  } else {
    caffe_copy(bottom[0]->count(), bottom_data, top_data);
  }
  return Dtype(0);
}
void SpatialDropoutLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
    const vector<Blob<Dtype>*>& top) {
  const Dtype* bottom_data = bottom[0]->cpu_data();
  Dtype* top_data = top[0]->mutable_cpu_data();

  if(this->phase_ == TRAIN) {
    // Create random numbers
    const int mask_count = rand_vec_.count();
    unsigned int* mask = rand_vec_.mutable_cpu_data();
    caffe_rng_bernoulli(mask_count, 1. - threshold_, mask);
    
    const int num = bottom[0]->num();
    const int channels = bottom[0]->channels();
    const int height = bottom[0]->height();
    const int width = bottom[0]->width();
    const int sub_count = height * width;
    const int count = bottom[0]->count();
    CHECK_EQ(count, mask_count * sub_count);

    int d_offset = 0;
    int m_offset = 0;
    for(int n = 0; n < num; n++) {
      for(int c = 0; c < channels; c++) {
        const Dtype alpha = Dtype(mask[m_offset++] * this->scale_);
        for(int c = 0; c < sub_count; c++) {
          top_data[d_offset] = bottom_data[d_offset] * alpha;
          d_offset++;
        }
      }
    }
    CHECK_EQ(d_offset, count);
    CHECK_EQ(m_offset, mask_count);
  } else if(this->phase_ == TEST) {
    caffe_copy(bottom[0]->count(), bottom_data, top_data);
  } else {
    NOT_IMPLEMENTED;
  }
}
Ejemplo n.º 8
0
 void RngBernoulliFill(const Dtype p, void* cpu_data) {
   int* rng_data = static_cast<int*>(cpu_data);
   caffe_rng_bernoulli(sample_size_, p, rng_data);
 }
Ejemplo n.º 9
0
void Device::rng_bernoulli_half(const uint_tp n, const half_fp p,
                                    vptr<uint16_t> r) {
  vector<uint16_t> random(n);  // NOLINT
  caffe_rng_bernoulli(n, p, &random[0]);
  this->memcpy(sizeof(uint16_t) * n, &random[0], vptr<void>(r));
}
Ejemplo n.º 10
0
void Device::rng_bernoulli_double(const uint_tp n, const double p,
                                    vptr<int64_t> r) {
  vector<int64_t> random(n);  // NOLINT
  caffe_rng_bernoulli(n, p, &random[0]);
  this->memcpy(sizeof(int64_t) * n, &random[0], vptr<void>(r));
}