Ejemplo n.º 1
0
void LRNLayerTest<Dtype>::ReferenceLRNForward(
    const Blob<Dtype>& blob_bottom, const LayerParameter& layer_param,
    Blob<Dtype>* blob_top) {
  blob_top->Reshape(blob_bottom.num(), blob_bottom.channels(),
      blob_bottom.height(), blob_bottom.width());
  const Dtype* bottom_data = blob_bottom.cpu_data();
  Dtype* top_data = blob_top->mutable_cpu_data();
  Dtype alpha = layer_param.alpha();
  Dtype beta = layer_param.beta();
  int size = layer_param.local_size();
  for (int n = 0; n < blob_bottom.num(); ++n) {
    for (int c = 0; c < blob_bottom.channels(); ++c) {
      for (int h = 0; h < blob_bottom.height(); ++h) {
        for (int w = 0; w < blob_bottom.width(); ++w) {
          int c_start = c - (size - 1) / 2;
          int c_end = min(c_start + size, blob_bottom.channels());
          c_start = max(c_start, 0);
          Dtype scale = 1.;
          for (int i = c_start; i < c_end; ++i) {
            Dtype value = blob_bottom.data_at(n, i, h, w);
            scale += value * value * alpha / size;
          }
          *(top_data + blob_top->offset(n, c, h, w)) =
            blob_bottom.data_at(n, c, h, w) / pow(scale, beta);
        }
      }
    }
  }
}
Ejemplo n.º 2
0
void LRNLayerTest<Dtype>::ReferenceLRNForward(
    const Blob<Dtype>& blob_bottom, const LayerParameter& layer_param,
    Blob<Dtype>* blob_top) {
  blob_top->Reshape(blob_bottom.num(), blob_bottom.channels(),
      blob_bottom.height(), blob_bottom.width());
  Dtype* top_data = blob_top->mutable_cpu_data();
  LRNParameter lrn_param = layer_param.lrn_param();
  Dtype alpha = lrn_param.alpha();
  Dtype beta = lrn_param.beta();
  int size = lrn_param.local_size();
  switch (lrn_param.norm_region()) {
  case LRNParameter_NormRegion_ACROSS_CHANNELS:
    for (int n = 0; n < blob_bottom.num(); ++n) {
      for (int c = 0; c < blob_bottom.channels(); ++c) {
        for (int h = 0; h < blob_bottom.height(); ++h) {
          for (int w = 0; w < blob_bottom.width(); ++w) {
            int c_start = c - (size - 1) / 2;
            int c_end = min(c_start + size, blob_bottom.channels());
            c_start = max(c_start, 0);
            Dtype scale = 1.;
            for (int i = c_start; i < c_end; ++i) {
              Dtype value = blob_bottom.data_at(n, i, h, w);
              scale += value * value * alpha / size;
            }
            *(top_data + blob_top->offset(n, c, h, w)) =
              blob_bottom.data_at(n, c, h, w) / pow(scale, beta);
          }
        }
      }
    }
    break;
  case LRNParameter_NormRegion_WITHIN_CHANNEL:
    for (int n = 0; n < blob_bottom.num(); ++n) {
      for (int c = 0; c < blob_bottom.channels(); ++c) {
        for (int h = 0; h < blob_bottom.height(); ++h) {
          int h_start = h - (size - 1) / 2;
          int h_end = min(h_start + size, blob_bottom.height());
          h_start = max(h_start, 0);
          for (int w = 0; w < blob_bottom.width(); ++w) {
            Dtype scale = 1.;
            int w_start = w - (size - 1) / 2;
            int w_end = min(w_start + size, blob_bottom.width());
            w_start = max(w_start, 0);
            for (int nh = h_start; nh < h_end; ++nh) {
              for (int nw = w_start; nw < w_end; ++nw) {
                Dtype value = blob_bottom.data_at(n, c, nh, nw);
                scale += value * value * alpha / (size * size);
              }
            }
            *(top_data + blob_top->offset(n, c, h, w)) =
              blob_bottom.data_at(n, c, h, w) / pow(scale, beta);
          }
        }
      }
    }
    break;
  default:
    LOG(FATAL) << "Unknown normalization region.";
  }
}
Ejemplo n.º 3
0
void CaffeMobile::putImage(AndroidBitmapInfo* info, void* pixels, const vector<Blob<float>*>& resImage) {
	Blob<float> * srcBlob = *resImage.data();

	LOG(DEBUG) << "srcBlob received";

	vector<int> shape = {1, 3, (int) info->width, (int) info->height };

	LOG(DEBUG) << "shape configured";

	Blob<float>* imgBlob = new Blob<float>();
	LOG(DEBUG) << "Blob created";

	imgBlob->Reshape(shape);
	LOG(DEBUG) << "imgBlob reshaped";

	imgBlob->CopyFrom(*srcBlob, false, true);
	LOG(DEBUG) << "imgBlob copied";

	int size = imgBlob->count();
	LOG(DEBUG) << "imgBlob size is: " << size;

	/*Partially from https://github.com/ruckus/android-image-filter-ndk*/

	uint32_t* pixelRow;
	int ix, iy, red, green, blue;

	for(iy = 0; iy < (int) info->height; iy++){

		pixelRow = (uint32_t*) pixels;

		for(ix =0; ix < (int) info->width; ix++){
			red = (int) clip(imgBlob->data_at(0,0,iy,ix), 0, 255);
			green = (int) clip(imgBlob->data_at(0,1,iy,ix), 0, 255);
			blue = (int) clip(imgBlob->data_at(0,2,iy,ix), 0, 255);

			pixelRow[ix] =
					((red << 16) & 0x00FF0000) |
					((green << 8) & 0x0000FF00) |
					(blue & 0x000000FF);
		}

		pixels = (char*)pixels + info->stride;
	}

	LOG(DEBUG) << "before return putImage " << size;

	return;
}
void HDF5OutputLayerTest<TypeParam>::CheckBlobEqual(const Blob<Dtype>& b1,
                                                    const Blob<Dtype>& b2) {
  EXPECT_EQ(b1.num(), b2.num());
  EXPECT_EQ(b1.channels(), b2.channels());
  EXPECT_EQ(b1.height(), b2.height());
  EXPECT_EQ(b1.width(), b2.width());
  for (int n = 0; n < b1.num(); ++n) {
    for (int c = 0; c < b1.channels(); ++c) {
      for (int h = 0; h < b1.height(); ++h) {
        for (int w = 0; w < b1.width(); ++w) {
          EXPECT_EQ(b1.data_at(n, c, h, w), b2.data_at(n, c, h, w));
        }
      }
    }
  }
}
int main(int argc, char** argv)
{

	cudaSetDevice(0);
	Caffe::set_phase(Caffe::TEST);
	Caffe::SetDevice(3);
	//Caffe::set_mode(Caffe::CPU);

	if (argc == 8 && strcmp(argv[7], "CPU") == 0) {
		LOG(ERROR) << "Using CPU";
		Caffe::set_mode(Caffe::CPU);
	} else {
		LOG(ERROR) << "Using GPU";
		Caffe::set_mode(Caffe::GPU);
	}

//	Caffe::set_mode(Caffe::CPU);
	NetParameter test_net_param;
	ReadProtoFromTextFile(argv[1], &test_net_param);
	Net<float> caffe_test_net(test_net_param);
	NetParameter trained_net_param;
	ReadProtoFromBinaryFile(argv[2], &trained_net_param);
	caffe_test_net.CopyTrainedLayersFrom(trained_net_param);

	vector<shared_ptr<Layer<float> > > layers = caffe_test_net.layers();
	const DataLayer<float> *datalayer = dynamic_cast<const DataLayer<float>* >(layers[0].get());
	CHECK(datalayer);

	string labelFile(argv[3]);
	int data_counts = 0;
	FILE * file = fopen(labelFile.c_str(), "r");
	while(fgets(buf,100000,file) > 0)
	{
		data_counts++;
	}
	fclose(file);

	vector<Blob<float>*> dummy_blob_input_vec;
	string rootfolder(argv[4]);
	rootfolder.append("/");
	CreateDir(rootfolder.c_str(), rootfolder.size() - 1);
	string folder;
	string fName;

	float output;
	int counts = 0;

	file = fopen(labelFile.c_str(), "r");

	Blob<float>* c1 = (*(caffe_test_net.bottom_vecs().rbegin()))[0];
    int c2 = c1->num();
	int batchCount = std::ceil(data_counts / (floor)(c2));//(test_net_param.layers(0).layer().batchsize()));//                (test_net_param.layers(0).layer().batchsize() ));

	string resulttxt = rootfolder + "3dNormalResult.txt";
	FILE * resultfile = fopen(resulttxt.c_str(), "w");

	for (int batch_id = 0; batch_id < batchCount; ++batch_id)
	{
		LOG(INFO)<< "processing batch :" << batch_id+1 << "/" << batchCount <<"...";

		const vector<Blob<float>*>& result = caffe_test_net.Forward(dummy_blob_input_vec);
		Blob<float>* bboxs = (*(caffe_test_net.bottom_vecs().rbegin()))[0];
		int bsize = bboxs->num();

		const Blob<float>* labels = (*(caffe_test_net.bottom_vecs().rbegin()))[1];
		for (int i = 0; i < bsize && counts < data_counts; i++, counts++)
		{
			char fname[1010];
			fscanf(file, "%s", fname);
			/*for(int w = 0; w < LABEL_SIZE; w ++)
				for(int h = 0; h < LABEL_SIZE; h ++)
				{
					int lbl;
					fscanf(file,"%d",&lbl);
				}*/
			fprintf(resultfile, "%s ", fname);
			int len = LABEL_SIZE * LABEL_SIZE * LABEL_LEN;
			for(int j = 0; j < len; j ++)
			{
				fprintf(resultfile, "%f ", (float)(bboxs->data_at(i, j, 0, 0)) );
			}
			fprintf(resultfile, "\n");
		}
	}

	fclose(resultfile);
	fclose(file);


	return 0;
}
int main(int argc, char** argv)
{

	Caffe::set_phase(Caffe::TEST);
	Caffe::SetDevice(0);
	//Caffe::set_mode(Caffe::CPU);

	if (argc == 8 && strcmp(argv[7], "CPU") == 0) {
		LOG(ERROR) << "Using CPU";
		Caffe::set_mode(Caffe::CPU);
	} else {
		LOG(ERROR) << "Using GPU";
		Caffe::set_mode(Caffe::GPU);
	}


	//Caffe::set_mode(Caffe::CPU);

	NetParameter test_net_param;
	ReadProtoFromTextFile(argv[1], &test_net_param);
	Net<float> caffe_test_net(test_net_param);
	NetParameter trained_net_param;
	ReadProtoFromBinaryFile(argv[2], &trained_net_param);
	caffe_test_net.CopyTrainedLayersFrom(trained_net_param);

	vector<shared_ptr<Layer<float> > > layers = caffe_test_net.layers();
	const DataLayer<float> *datalayer = dynamic_cast<const DataLayer<float>* >(layers[0].get());
	CHECK(datalayer);

	string labelFile(argv[3]);
	int data_counts = 0;
	FILE * file = fopen(labelFile.c_str(), "r");
	while(fgets(buf,100000,file) > 0)
	{
		data_counts++;
	}
	fclose(file);

	vector<Blob<float>*> dummy_blob_input_vec;
	string rootfolder(argv[4]);
	rootfolder.append("/");
	CreateDir(rootfolder.c_str(), rootfolder.size() - 1);
	string folder;
	string fName;

	float output;
	int counts = 0;

	file = fopen(labelFile.c_str(), "r");

	Blob<float>* c1 = (*(caffe_test_net.bottom_vecs().rbegin()))[0];
    int c2 = c1->num();
	int batchCount = std::ceil(data_counts / (floor)(c2));//(test_net_param.layers(0).layer().batchsize()));//                (test_net_param.layers(0).layer().batchsize() ));

	string resulttxt = rootfolder + "3dNormalResult.txt";
	FILE * resultfile = fopen(resulttxt.c_str(), "w");

	float * st = new float[LABEL_LEN * LABEL_HEIGHT * LABEL_WIDTH];

	for (int batch_id = 0; batch_id < batchCount; ++batch_id)
	{
		LOG(INFO)<< "processing batch :" << batch_id+1 << "/" << batchCount <<"...";

		const vector<Blob<float>*>& result = caffe_test_net.Forward(dummy_blob_input_vec);
		Blob<float>* bboxs = (*(caffe_test_net.bottom_vecs().rbegin()))[0];
		int bsize = bboxs->num();

		const Blob<float>* labels = (*(caffe_test_net.bottom_vecs().rbegin()))[1];
		for (int i = 0; i < bsize && counts < data_counts; i++, counts++)
		{
			char fname[1010];
			fscanf(file, "%s", fname);
			for(int c = 0; c < LABEL_LEN; c ++)
			for(int w = 0; w < LABEL_WIDTH; w ++)
				for(int h = 0; h < LABEL_HEIGHT; h ++)
				{
					float lbl;
					fscanf(file,"%f", &lbl);
				}
			fprintf(resultfile, "%s ", fname);
			//char ss2[1010];
			//sprintf(ss2,"/home/dragon123/cnncode/3ddata/testReg/%d_fine_not.jpg",counts);
			//Mat imgOut(Size(LABEL_SIZE,LABEL_SIZE),CV_8UC3);
			//for(int c = 0; c < LABEL_LEN; c++)
			{
				for(int w = 0; w < LABEL_WIDTH; w ++)
					for(int h = 0; h < LABEL_HEIGHT; h ++)
					{
						float tnum1 = (float)(bboxs->data_at(i, 0, h, w));
						float tnum2 = (float)(bboxs->data_at(i, 1, h, w));
						float tnum3 = (float)(bboxs->data_at(i, 2, h, w));
						float z = tnum1 * tnum1 + tnum2 * tnum2 + tnum3 * tnum3;
						z = sqrt(z);
						st[w * LABEL_HEIGHT + h] = tnum1 / z;
						st[LABEL_HEIGHT * LABEL_WIDTH + w * LABEL_HEIGHT + h] = tnum2 / z;
						st[2 * LABEL_HEIGHT * LABEL_WIDTH + w * LABEL_HEIGHT + h] = tnum3 / z;
						//fprintf(resultfile, "%f ", tnum);
						//imgOut.at<cv::Vec3b>(h, w)[2 - c] = (uchar)(tnum) * 128 + 128);
					}
			}

			for(int c = 0; c < LABEL_LEN; c++)
			{
				for(int w = 0; w < LABEL_WIDTH; w ++)
					for(int h = 0; h < LABEL_HEIGHT; h ++)
					{
						float tnum = st[c * LABEL_HEIGHT * LABEL_WIDTH + w * LABEL_HEIGHT + h] ;
						fprintf(resultfile, "%f ", tnum);
			//			imgOut.at<cv::Vec3b>(h, w)[2 - c] = (uchar)((tnum) * 128 + 128);
					}
			}

			//imwrite(ss2,imgOut);
			fprintf(resultfile, "\n");
		}
	}

	delete st;

	fclose(resultfile);
	fclose(file);


	return 0;
}