コード例 #1
0
TEST_F(for_each_channel_accumulate_test, should_throw_when_image_dimensions_dont_match)
{
    rgb16_image_t img1(1, 2), img2(1, 3), img3(2, 2);

    auto zero = [](channel16_t, channel16_t) { return 0; };
    ASSERT_THROW(for_each_channel_accumulate(const_view(img1), const_view(img2), 0, zero), std::invalid_argument);
    ASSERT_THROW(for_each_channel_accumulate(const_view(img1), const_view(img3), 0, zero), std::invalid_argument);
}
コード例 #2
0
TEST_F(for_each_channel_accumulate_test, should_return_initial_value_for_empty_images)
{
    rgb16_image_t img1, img2;

    ASSERT_EQ(8, for_each_channel_accumulate(const_view(img1), const_view(img2), 8, [&](channel16_t p1, channel16_t p2)
    {
        return 10;
    }));
}
コード例 #3
0
void FrameAverager::addFrame(const RawImage& frame)
{
    if (view(accumulator).size() == 0)
    {
        accumulator.recreate(frame.dimensions());
        fill_pixels(view(accumulator), Color::black());
    }
    boost::gil::transform_pixels(const_view(frame), const_view(accumulator), view(accumulator), boost::gil::pixel_plus_t<RawPixel, AccumPixel, AccumPixel>());
    ++frameCount;
}
コード例 #4
0
void ImageGetter::resizeJPGImage(std::string const& imageURL, int sixex, int sizey)
{
	boost::gil::rgb8_image_t img;
	jpeg_read_image(imageURL,img);
	

    boost::gil::rgb8_image_t targetImageSize(sixex,sizey);

    resize_view(const_view(img), view(targetImageSize), boost::gil::bilinear_sampler());
    jpeg_write_view(imageURL,const_view(targetImageSize));
}
コード例 #5
0
TEST_F(for_each_channel_accumulate_test, should_include_the_initial_value)
{
    rgb16_image_t img1(1, 1), img2(1, 1);

    view(img1)(0, 0) = { 1, 0, 0 };
    view(img2)(0, 0) = { 1, 0, 0 };

    ASSERT_EQ(2 + 8, for_each_channel_accumulate(const_view(img1), const_view(img2), 8, [&](channel16_t p1, channel16_t p2)
    {
        return p1 + p2;
    }));
}
コード例 #6
0
TEST_F(for_each_channel_accumulate_test, should_call_functor_for_each_channel_in_parallel_in_both_images_and_sum_the_results)
{
    rgb16_image_t img1(1, 1);
    rgb8_image_t img2(1, 1);

    view(img1)(0, 0) = { 1025, 18, 36 };
    view(img2)(0, 0) = { 1, 2, 4 };

    ASSERT_EQ(1024 + 16 + 32, for_each_channel_accumulate(const_view(img1), const_view(img2), 0, [&](channel16_t p1, channel8_t p2)
    {
        return p1 - p2;
    })) << "should sum all results";
}
コード例 #7
0
TEST_F(for_each_channel_accumulate_test, should_call_functor_for_each_pixel_in_parallel_in_both_images)
{
    rgb16_image_t img1(3, 2), img2(3, 2);
    auto v1 = view(img1), v2 = view(img2);

    v1(0, 0) = { 1, 0, 0 }; v1(1, 0) = { 2, 0, 0 };  v1(2, 0) = { 4, 0, 0 };
    v1(0, 1) = { 8, 0, 0 }; v1(1, 1) = { 16, 0, 0 }; v1(2, 1) = { 32, 0, 0 };
    v2(0, 0) = { 64, 0, 0 };  v2(1, 0) = { 128, 0, 0 };  v2(2, 0) = { 256, 0, 0 };
    v2(0, 1) = { 512, 0, 0 }; v2(1, 1) = { 1024, 0, 0 }; v2(2, 1) = { 2048, 0, 0 };

    ASSERT_EQ(4095, for_each_channel_accumulate(const_view(img1), const_view(img2), 0, [&](channel16_t p1, channel16_t p2)
    {
        return p1 + p2;
    })) << "should call functor for each pixel";
}
コード例 #8
0
	void save_image(
		cell cells[GRID_W][GRID_H],
		Vec2 particles[PARTICLES_NUM],
		std::string filename) {
		int w = GRID_W;
		int h = GRID_H;
		
		rgb8_image_t img(w, h);
		
		rgb8_image_t::view_t v = view(img);
		
		for(int i = 0; i < w; i++){
			for(int j = 0; j < h; j++){
				cell * cell = &cells[i][j];
				Vec2 * cu = &cell->u;
				
				v(i,j) = rgb8_pixel_t(cu->x * 255 + 128, cu->y * 255 + 128, 0);
				//v(i,j) = rgb8_pixel_t(0, 0, 0);
			}
		}

		for(int i = 0; i < PARTICLES_NUM; i++){
			int posi = particles[i].x;
			int posj = particles[i].y;
			
			if(posi < 0 || posi >= w || posj < 0 || posj >= h){
				continue;
			}
			
			v(posi, posj) = rgb8_pixel_t(255, 255, 255);
		}
		
		png_write_view(filename, const_view(img));
	}
コード例 #9
0
ファイル: gaussian_pyramid.cpp プロジェクト: JohanAberg/Ramen
void rgba_gaussian_pyramid_t::build()
{
	float sigma = ( 1.0f / ratio_ - 1.0f);

	buffer_t tmp( const_view().width(), const_view().height(), 4);
	buffer_t tmp2( tmp.height(), tmp.width(), 4);
	
	for( int i = 1; i < num_levels_; ++i)
	{
		int w = const_view( i-1).width();
		int h = const_view( i-1).height();
		
		image::image_view_t tmp_view( boost::gil::subimage_view( tmp.rgba_view(), 0, 0, w, h));
		gaussian_blur_rgba( const_view( i-1), boost::gil::subimage_view( tmp2.rgba_view(), 0, 0, h, w), tmp_view, sigma, sigma);
		scale_view( tmp_view, view( i));
	}
}
コード例 #10
0
inline void PrintTo(const Nebula::RawImage& img, ::std::ostream* os)
{
    *os << "RawImage[ " << img.width() << " x " << img.height() << " :";
    for (auto p : const_view(img))
    {
        *os << " (";
        static_for_each(p, [=](Nebula::RawChannel ch)  { *os << " " << ch; });
        *os << " )";
    }
    *os << " ]";
}
コード例 #11
0
int FeaturesDetectionApplication::main_loop(program_options::variables_map &options)
{
    printf("FeaturesDetectionApplication::main_loop says hello world !\n");


    //init_gui(options);
    //run_gui();

	// initialization ---
    gst_video_input_p.reset(new GstVideoInput(options));
    features_detector_p.reset(new SimpleFAST(options));

    // video output ---
    rgb8_cimg_t current_image(gst_video_input_p->get_image_dimensions());
    gst_video_input_p->get_new_image(current_image.view); // copy the data


    CImgDisplay video_display(current_image.dimx(), current_image.dimy(), get_application_title().c_str());
    video_display.show();
    video_display.display(current_image);

    // intermediary image --
    gray8_image_t gray_image(current_image.view.dimensions());

    // main loop ---

    do
    {
        // get new image --
        gst_video_input_p->get_new_image(current_image.view); // copy the data

        // color to gray_image
        copy_and_convert_pixels(current_image.view, boost::gil::view(gray_image));
        
        // compute features
        const vector<FASTFeature> &features =
            features_detector_p->detect_features((const_view(gray_image)));

        // plot features on output image
        draw_features(features, current_image);

        video_display.display(current_image);

        // add a delay ---	
        wait_some_seconds(0.1); // [seconds]


    }
    while (video_display.is_closed == false);

    return 0;

}
コード例 #12
0
ファイル: io.cpp プロジェクト: Hannah1999/Dato-Core
void boost_parse_image(std::string filename, size_t& width, size_t& height, size_t& channels, Format& format, size_t& image_data_size, std::string format_string) {

  typedef boost::mpl::vector<gray8_image_t, gray16_image_t, rgb8_image_t, rgb16_image_t> my_img_types;

  any_image<my_img_types> src_image;

  if (format_string == "JPG"){
    jpeg_read_image(filename, src_image);
    format = Format::JPG;
  } else if (format_string == "PNG"){
    png_read_image(filename, src_image);
    format = Format::PNG;
  } else{
    if (boost::algorithm::ends_with(filename, "jpg") ||
      boost::algorithm::ends_with(filename, "jpeg")) {
      jpeg_read_image(filename, src_image);
      format = Format::JPG;
    } else if (boost::algorithm::ends_with(filename, "png")){
      png_read_image(filename, src_image);
      format = Format::PNG;
    } else {
      log_and_throw(std::string("Unsupported format."));
    }
  }

  // create a view of the image
  auto src_view = const_view(src_image);

  // extract image information
  width = src_view.width();
  height = src_view.height();
  channels = src_view.num_channels();
  image_data_size = width*height*channels;

  // Debug
  // std::cout << "Read image "<< filename << "\n"
  // << "width: " << width << "\n"
  // << "height: " << height << "\n"
  // << "num_channels " << channels << "\n"
  // << std::endl;
}
コード例 #13
0
///////////////////////////////////////////////////////////////////////////////////
/// 指定窓の画面をキャプチャして指定ファイルにPNG形式で保存する
void CaptureScreen(CWnd *pWnd, const std::wstring &szFilePath) {

    CRect rectClient;
    pWnd->GetClientRect( &rectClient );

    CClientDC dc(pWnd);

    CDC dc2;
    dc2.CreateCompatibleDC( &dc );

    CBitmap bitmap;
    bitmap.CreateCompatibleBitmap(&dc, rectClient.Width(), rectClient.Height() );

    auto pOldBitmap = dc2.SelectObject( &bitmap );

    dc2.BitBlt(0,0, rectClient.Width(), rectClient.Height(), &dc, 0, 0, SRCCOPY );

    int nWidthAdjust = rectClient.Width();
    if ( nWidthAdjust % 4 ) { nWidthAdjust += 4 - nWidthAdjust % 4;}

    std::vector<BYTE> bitdata( nWidthAdjust * rectClient.Height() * 3 );

    dc2.SelectObject( pOldBitmap );

    BITMAPINFO bitmapinfo;
    ZeroMemory(&bitmapinfo, sizeof(BITMAPINFO) );
    bitmapinfo.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
    bitmapinfo.bmiHeader.biWidth = nWidthAdjust;
    bitmapinfo.bmiHeader.biHeight = rectClient.Height();
    bitmapinfo.bmiHeader.biPlanes = 1;
    bitmapinfo.bmiHeader.biBitCount = 24;
    bitmapinfo.bmiHeader.biCompression = BI_RGB;
    GetDIBits( dc2, bitmap, 0, rectClient.Height(), bitdata.data(), &bitmapinfo, DIB_RGB_COLORS );


    boost::gil::rgb8_image_t img( nWidthAdjust, rectClient.Height() );
    copy_pixels(boost::gil::interleaved_view(nWidthAdjust, rectClient.Height(), (const boost::gil::rgb8_pixel_t*)bitdata.data(), nWidthAdjust*3), view(img));
    png_write_view(CStringA(szFilePath.c_str()), boost::gil::flipped_up_down_view( const_view(img) ) );
}
コード例 #14
0
void TiffImageWriter::writeImage(const std::string& filename, const RawImage& image)
{
    boost::gil::tiff_write_view(filename, const_view(image));
}
コード例 #15
0
ファイル: error_img.cpp プロジェクト: hezihertz/fmmtl
int main(int argc, char **argv)
{
  int N = 1 << 20;

  // Parse custom command line args
  for (int i = 1; i < argc; ++i) {
    if (strcmp(argv[i],"-N") == 0) {
      N = atoi(argv[++i]);
    }
  }
  // Round up to the nearest square number
  int n_side = int(std::ceil(std::sqrt(N)));
  N = n_side * n_side;

  // Init the FMM Kernel and options
  FMMOptions opts = get_options(argc, argv);
  //typedef UnitExpansion kernel_type;
  //typedef ExpExpansion kernel_type;
  typedef LaplaceSpherical kernel_type;
  //typedef YukawaCartesian kernel_type;

  // Init kernel
  kernel_type K;

  typedef kernel_type::point_type  point_type;
  typedef kernel_type::source_type source_type;
  typedef kernel_type::target_type target_type;
  typedef kernel_type::charge_type charge_type;
  typedef kernel_type::result_type result_type;

  std::cout << "Initializing source and N = " << N << " targets..." << std::endl;

  // Init a square targets
  double xmin = -1;
  double xmax = 1;
  double ymin = -1;
  double ymax = 1;

  std::vector<target_type> targets;
  targets.reserve(N);
  for (int n = 0; n < n_side; ++n) {
    for (int m = 0; m < n_side; ++m) {
      targets.push_back(target_type(xmin + n * (xmax-xmin) / (n_side-1),
                                    ymin + m * (ymax-ymin) / (n_side-1)));
    }
  }
  //int middle = n_side/2 * n_side + n_side/2;
  int middle = fmmtl::random<unsigned>::get(0, N);

  // Init charges, only the middle source has a charge
  std::vector<charge_type> charges(N);
  charges[middle] = charge_type(1);

  std::cout << "Building the kernel matrix..." << std::endl;

  // Build the FMM
  fmmtl::kernel_matrix<kernel_type> A = K(targets, targets);
  A.set_options(opts);

  std::cout << "Performing the kernel matrix-vector mult..." << std::endl;

  // Execute the FMM
  std::vector<result_type> result = A * charges;

  // Check the result
  std::cout << "Computing direct kernel matrix-vector mult..." << std::endl;

  // Compute the result with a direct matrix-vector multiplication
  std::vector<result_type> exact(N);
  fmmtl::direct(K,
                 targets.begin()+middle, targets.begin()+middle+1,
                 charges.begin()+middle,
                 targets.begin(), targets.end(),
                 exact.begin());

  std::cout << "Computing the errors..." << std::endl;

  std::vector<double> log_error(result.size());
  double min_error = std::numeric_limits<double>::max();
  double max_error = std::numeric_limits<double>::lowest();
  for (unsigned k = 0; k < result.size(); ++k) {
    double error = norm_2(exact[k] - result[k]) / norm_2(exact[k]);
    if (error > 1e-15)
      log_error[k] = std::log10(error);
    else
      log_error[k] = -16;
    min_error = std::min(min_error, log_error[k]);
    max_error = std::max(max_error, log_error[k]);
  }

  std::cout << "Min log error: " << min_error << std::endl;
  std::cout << "Max log error: " << max_error << std::endl;

  // Fill the image with the errors computed above
  gil::rgb8_image_t img(n_side, n_side);
  auto img_view = gil::view(img);
  for (int n = 0; n < n_side; ++n) {
    for (int m = 0; m < n_side; ++m) {
      img_view(n,m) = make_heat((log_error[n*n_side+m] - min_error) / (max_error - min_error));
    }
  }
  gil::png_write_view("fmmtl_errors.png", const_view(img));
}
コード例 #16
0
ファイル: any_image.hpp プロジェクト: fiskercui/testcommon
template <typename Image> result_type operator()(const Image& img) const { return result_type(const_view(img)); }
コード例 #17
0
RawImage FrameAverager::getCombinedFrame() const
{
    RawImage output(accumulator.dimensions());
    boost::gil::view_divides_scalar<AccumPixel>(const_view(accumulator), frameCount, view(output));
    return output;
}