virtual void SetUp() { Caffe::set_random_seed(1601); vector<int> shape1, shape2; shape1.push_back(NUM); shape1.push_back(CHENNAL); shape1.push_back(HEIGHT); shape1.push_back(WIDTH); shape2.push_back(NUM); shape2.push_back(1); shape2.push_back(1); shape2.push_back(1); blob_bottom1_->Reshape(shape1); blob_bottom2_->Reshape(shape2); // fill the values FillerParameter filler_param; GaussianFiller<Dtype> filler1(filler_param); GaussianFiller<Dtype> filler2(filler_param); filler1.Fill(this->blob_bottom1_); for (int i = 0; i < NUM; ++i){ blob_bottom2_->mutable_cpu_data()[i] = caffe_rng_rand() % 2; } for (int i = 0; i < NUM; ++i){ for (int j = 0; j < CHENNAL * HEIGHT * WIDTH; ++j){ int idx = i * CHENNAL * HEIGHT * WIDTH + j; blob_bottom1_->mutable_cpu_data()[idx] = i * 10; } } blob_bottom_vec_.push_back(blob_bottom1_); blob_bottom_vec_.push_back(blob_bottom2_); blob_top_vec_.push_back(blob_top_); }
void TestForward() { // Get the loss without a specified objective weight -- should be // equivalent to explicitly specifiying a weight of 1. LayerParameter layer_param; layer_param.mutable_multi_t_loss_param()->set_num_center(N_); EntropyTLossLayer<Dtype> layer_weight_1(layer_param); layer_weight_1.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); layer_weight_1.Forward(this->blob_bottom_vec_, &this->blob_top_vec_); FillerParameter filler_param; GaussianFiller<Dtype> filler2(filler_param); filler2.Fill(layer_weight_1.blobs()[0].get()); caffe_rng_uniform(layer_weight_1.blobs()[1]->count(), Dtype(0.9), Dtype(1.1), layer_weight_1.blobs()[1]->mutable_cpu_data()); layer_weight_1.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); const Dtype loss_weight_1 = layer_weight_1.Forward(this->blob_bottom_vec_, &this->blob_top_vec_); // Get the loss again with a different objective weight; check that it is // scaled appropriately. const Dtype kLossWeight = 3.7; LayerParameter layer_param2; layer_param2.mutable_multi_t_loss_param()->set_num_center(N_); layer_param2.add_loss_weight(kLossWeight); EntropyTLossLayer<Dtype> layer_weight_2(layer_param2); layer_weight_2.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); layer_weight_2.Forward(this->blob_bottom_vec_, &this->blob_top_vec_); caffe_copy(layer_weight_2.blobs()[0]->count(), layer_weight_1.blobs()[0]->cpu_data(), layer_weight_2.blobs()[0]->mutable_cpu_data()); caffe_copy(layer_weight_2.blobs()[1]->count(), layer_weight_1.blobs()[1]->cpu_data(), layer_weight_2.blobs()[1]->mutable_cpu_data()); layer_weight_2.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); const Dtype loss_weight_2 = layer_weight_2.Forward(this->blob_bottom_vec_, &this->blob_top_vec_); const Dtype kErrorMargin = 1e-3; EXPECT_NEAR(loss_weight_1 * kLossWeight, loss_weight_2, kErrorMargin); // Make sure the loss is non-trivial. const Dtype kNonTrivialAbsThresh = 1e-1; EXPECT_GE(fabs(loss_weight_1), kNonTrivialAbsThresh); int m = M_, n = layer_param.multi_t_loss_param().num_center(), p = K_; Blob<Dtype> *distance = layer_weight_1.distance(); const Dtype *cpu_data = blob_bottom_data_->cpu_data(); const Dtype *cpu_dist = distance->cpu_data(); const Dtype *cpu_center = layer_weight_1.blobs()[0]->cpu_data(); const Dtype *cpu_sigma = layer_weight_1.blobs()[1]->cpu_data(); for (int i = 0; i < m; ++i) { for (int j = 0; j < n; ++j) { Dtype acc = Dtype(0); for (int k = 0; k < p; ++k) { acc += (cpu_data[i*p + k] - cpu_center[k*n + j])*(cpu_data[i*p + k] - cpu_center[k*n + j])*cpu_sigma[k*n+j]; } EXPECT_NEAR(acc, cpu_dist[i*n + j], kErrorMargin) << i << " " << j; } } }
bool Validator::ValidateBoards(const std::vector<Pos> & board1, const std::vector<Pos> & board2, const std::pair<int, int> & dimensions) { Filler filler1(board1, dimensions.first, dimensions.second); Filler filler2(board2, dimensions.first, dimensions.second); for (std::size_t i = 0, len = board1.size(); i < len; ++i) { std::unordered_set<int> cluster_board1; filler1.GetCluster(board1[i], cluster_board1); std::unordered_set<int> cluster_board2; filler2.GetCluster(board2[i], cluster_board2); if (!ValidateClusters(cluster_board1, cluster_board2)) { return false; } } return true; }