Ejemplo n.º 1
0
EXPORT float caffe_layer_Forward(void *layerAnon, void *bottomAnon, int bottomLength, void *topAnon, int topLength)
{
	Layer<float> *layer = (Layer<float> *)layerAnon;
	Blob<float>** bottomArray = (Blob<float>**)bottomAnon;
	Blob<float>** topArray = (Blob<float>**)topAnon;
	vector<Blob<float>*> bottom (bottomArray, bottomArray + bottomLength);
	vector<Blob<float>*> top (topArray, topArray + topLength);
	return layer->Forward(bottom, top);
}
void GradientChecker<Dtype>::CheckGradientSingle(Layer<Dtype>& layer,
		vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>& top,
		int check_bottom, int top_id, int top_data_id) {
	// First, figure out what blobs we need to check against.
	vector<Blob<Dtype>*> blobs_to_check;
	for (int i = 0; i < layer.blobs().size(); ++i) {
		blobs_to_check.push_back(layer.blobs()[i].get());
	}
	if (check_bottom < 0) {
		for (int i = 0; i < bottom.size(); ++i) {
			blobs_to_check.push_back(bottom[i]);
		}
	} else {
		CHECK(check_bottom < bottom.size());
		blobs_to_check.push_back(bottom[check_bottom]);
	}
	// go through the bottom and parameter blobs
	// LOG(ERROR) << "Checking " << blobs_to_check.size() << " blobs.";
	for (int blobid = 0; blobid < blobs_to_check.size(); ++blobid) {
		Blob<Dtype>* current_blob = blobs_to_check[blobid];
		// LOG(ERROR) << "Blob " << blobid << ": checking " << current_blob->count()
		//     << " parameters.";
		// go through the values
		for (int feat_id = 0; feat_id < current_blob->count(); ++feat_id) {
			// First, obtain the original data
			Caffe::set_random_seed(seed_);
			layer.Forward(bottom, &top);
			Dtype computed_objective = GetObjAndGradient(top, top_id,
					top_data_id);
			// Get any additional loss from the layer
			computed_objective += layer.Backward(top, true, &bottom);
			Dtype computed_gradient = current_blob->cpu_diff()[feat_id];
			// compute score by adding stepsize
			current_blob->mutable_cpu_data()[feat_id] += stepsize_;
			Caffe::set_random_seed(seed_);
			layer.Forward(bottom, &top);
			Dtype positive_objective = GetObjAndGradient(top, top_id,
					top_data_id);
			positive_objective += layer.Backward(top, true, &bottom);
			// compute score by subtracting stepsize
			current_blob->mutable_cpu_data()[feat_id] -= stepsize_ * 2;
			Caffe::set_random_seed(seed_);
			layer.Forward(bottom, &top);
			Dtype negative_objective = GetObjAndGradient(top, top_id,
					top_data_id);
			negative_objective += layer.Backward(top, true, &bottom);
			// Recover stepsize
			current_blob->mutable_cpu_data()[feat_id] += stepsize_;
			Dtype estimated_gradient = (positive_objective - negative_objective)
					/ stepsize_ / 2.;
			Dtype feature = current_blob->cpu_data()[feat_id];
			// LOG(ERROR) << "debug: " << current_blob->cpu_data()[feat_id] << " "
			//     << current_blob->cpu_diff()[feat_id];
			if (kink_ - kink_range_ > feature
					|| feature > kink_ + kink_range_) {
				// We check relative accuracy, but for too small values, we threshold
				// the scale factor by 1.
				Dtype scale = max(
						max(fabs(computed_gradient), fabs(estimated_gradient)),
						1.);
				EXPECT_GT(computed_gradient, estimated_gradient - threshold_ * scale)
						<< "debug: (top_id, top_data_id, blob_id, feat_id)="
						<< top_id << "," << top_data_id << "," << blobid << ","
						<< feat_id;
				EXPECT_LT(computed_gradient, estimated_gradient + threshold_ * scale)
						<< "debug: (top_id, top_data_id, blob_id, feat_id)="
						<< top_id << "," << top_data_id << "," << blobid << ","
						<< feat_id;
			}
			// LOG(ERROR) << "Feature: " << current_blob->cpu_data()[feat_id];
			// LOG(ERROR) << "computed gradient: " << computed_gradient
			//    << " estimated_gradient: " << estimated_gradient;
		}
	}
}