Ejemplo n.º 1
0
game_system::game_system(HWND p_hwnd, const uint2& viewport_size, const core::input_state& input_state)
	:input_state_(input_state),
	render_system_(p_hwnd, viewport_size),
	viewport_is_visible_(true),
	camera_(float3::unit_z, float3::zero)
{
	p_material_editor_view_ = std::make_unique<material_editor_view>(p_hwnd, render_system_.material_editor_tool());

	frame_.projection_matrix = math::perspective_matrix_directx(
		game_system::projection_fov, aspect_ratio(viewport_size),
		game_system::projection_near, game_system::projection_far);
}
Ejemplo n.º 2
0
void gradient_node_t::do_process( const render::context_t& context)
{
    gradient_fun grad( get_absolute_value<Imath::V2f>( param( "start")) / context.subsample,
						get_absolute_value<Imath::V2f>( param( "end")) / context.subsample,
						get_value<Imath::Color4f>( param( "startcol")),
						get_value<Imath::Color4f>( param( "endcol")),
						get_value<float>( param( "gamma")), aspect_ratio());

	generate_pixels( grad);
	
	if( get_value<bool>( param( "premult")))
		image::premultiply( image_view(), image_view());
}
Ejemplo n.º 3
0
void game_system::on_resize_viewport(const uint2& size)
{
	if (size.x == 0 || size.y == 0) {
		viewport_is_visible_ = false;
		return;
	}

	ImGui::GetIO().DisplaySize = ImVec2(float(size.x), float(size.y));

	viewport_is_visible_ = true;
	frame_.projection_matrix = math::perspective_matrix_directx(game_system::projection_fov, 
		aspect_ratio(size), game_system::projection_near, game_system::projection_far);
	render_system_.resize_viewport(size);
}
Ejemplo n.º 4
0
Imath::Box2i lens_distort_node_t::redistort_box( const Imath::Box2i& b) const
{
	switch( get_value<int>( param( "model")))
	{
		case syntheyes_model:
		{
			float k = get_value<float>( param( "synth_k"));
			float k3 = get_value<float>( param( "synth_k3"));			
			Imath::V2f center = format().center();

			if( k3 == 0)
			{
				camera::syntheyes_quadratic_redistort dist( k, format(), aspect_ratio());
				return distort_box( b, center, dist);
			}
			else
			{
				camera::syntheyes_redistort dist( k, k3, format(), aspect_ratio());
				return distort_box( b, center, dist);
			}
		}
		break;
	};
}
Ejemplo n.º 5
0
image_t *image_read(FILE *fp) {
    JSAMPARRAY buffer;
    int row_stride;
    struct jpeg_decompress_struct jpg;
    struct jpeg_error_mgr jerr;
    image_t *p;

    global_image_resize_fun = image_resize_interpolation;

    jpg.err = jpeg_std_error(&jerr);

    jpeg_create_decompress(&jpg);
    jpeg_stdio_src(&jpg, fp);
    jpeg_read_header(&jpg, TRUE);
    jpeg_start_decompress(&jpg);

    if (jpg.data_precision != 8) {
        fprintf(stderr, "jp2a: can only handle 8-bit color channels\n");
        exit(1);
    }

    row_stride = jpg.output_width * jpg.output_components;
    buffer = (*jpg.mem->alloc_sarray)((j_common_ptr) &jpg, JPOOL_IMAGE, row_stride, 1);

    aspect_ratio(jpg.output_width, jpg.output_height);
    p = image_new(jpg.output_width, jpg.output_height);

    while (jpg.output_scanline < jpg.output_height) {
        jpeg_read_scanlines(&jpg, buffer, 1);

        if (jpg.output_components == 3) {
            memcpy(&p->pixels[(jpg.output_scanline-1)*p->w], &buffer[0][0], sizeof(rgb_t)*p->w);
        } else {
            rgb_t *pixels = &p->pixels[(jpg.output_scanline-1) * p->w];

            // grayscale
            for (int x = 0; x < (int)jpg.output_width; ++x)
                pixels[x].r = pixels[x].g = pixels[x].b = buffer[0][x];
        }
    }

    jpeg_finish_decompress(&jpg);
    jpeg_destroy_decompress(&jpg);
    return p;
}
Ejemplo n.º 6
0
bool
FracturedMediaRefiner<TGrid, TAPosition>::
mark(Face* f, RefinementMark refMark)
{
//	make sure that the position accessor is valid
	UG_ASSERT(m_aaPos.valid(),
			  "Set a position attachment before refining!");

	bool wasMarked = BaseClass::is_marked(f);
	if(!BaseClass::mark(f, refMark))
		return false;

	if(!wasMarked){
		if(aspect_ratio(f) < m_aspectRatioThreshold)
			m_queDegeneratedFaces.push(f);
	}
	return true;
}
Ejemplo n.º 7
0
/* Separation metrics */
void separationMetrics( std::vector<std::vector<cv::Point>> contours, 
                        float *mean_diameter,
                        float *stddev_diameter,
                        float *mean_aspect_ratio,
                        float *stddev_aspect_ratio,
                        float *mean_error_ratio,
                        float *stddev_error_ratio ) {

    // Compute the normal distribution parameters of cells
    std::vector<cv::Point2f> mc(contours.size());
    std::vector<float> dia(contours.size());
    std::vector<float> aspect_ratio(contours.size());
    std::vector<float> error_ratio(contours.size());

    for (size_t i = 0; i < contours.size(); i++) {
        cv::Moments mu = moments(contours[i], true);
        mc[i] = cv::Point2f(static_cast<float>(mu.m10/mu.m00), 
                                            static_cast<float>(mu.m01/mu.m00));
        cv::RotatedRect min_area_rect = minAreaRect(cv::Mat(contours[i]));
        aspect_ratio[i] = float(min_area_rect.size.width)/min_area_rect.size.height;
        if (aspect_ratio[i] > 1.0) {
            aspect_ratio[i] = 1.0/aspect_ratio[i];
        }
        float actual_area = contourArea(contours[i]);
        dia[i] = 2 * sqrt(actual_area / PI);
        float ellipse_area = 
            (float) (PI * min_area_rect.size.width * min_area_rect.size.height);
        error_ratio[i] = (ellipse_area - actual_area) / ellipse_area;
    }
    cv::Scalar mean_dia, stddev_dia;
    cv::meanStdDev(dia, mean_dia, stddev_dia);
    *mean_diameter = static_cast<float>(mean_dia.val[0]);
    *stddev_diameter = static_cast<float>(stddev_dia.val[0]);

    cv::Scalar mean_ratio, stddev_ratio;
    cv::meanStdDev(aspect_ratio, mean_ratio, stddev_ratio);
    *mean_aspect_ratio = static_cast<float>(mean_ratio.val[0]);
    *stddev_aspect_ratio = static_cast<float>(stddev_ratio.val[0]);

    cv::Scalar mean_err_ratio, stddev_err_ratio;
    cv::meanStdDev(error_ratio, mean_err_ratio, stddev_err_ratio);
    *mean_error_ratio = static_cast<float>(mean_err_ratio.val[0]);
    *stddev_error_ratio = static_cast<float>(stddev_err_ratio.val[0]);
}
Ejemplo n.º 8
0
glm::mat4 PerspectiveCamera::getProjectionMatrix() const {
    return glm::perspective(fov_y_, aspect_ratio(), near_clipping_distance(),
            far_clipping_distance());
}
Ejemplo n.º 9
0
move2d_node_t::matrix3_type move2d_node_t::do_calc_transform_matrix_at_frame( float frame, int subsample) const
{
    const transform2_param_t *p = dynamic_cast<const transform2_param_t*>( &param( "xf"));
    RAMEN_ASSERT( p);
    return p->matrix_at_frame( frame, aspect_ratio(), subsample);
}
Ejemplo n.º 10
0
void grid_node_t::do_process( const render::context_t& context)
{
	Imath::Color4f color( get_value<Imath::Color4f>( param( "bgcol")));
	boost::gil::fill_pixels( image_view(), image::pixel_t( color.r, color.g, color.b, color.a));
	
	Imath::V2f size( get_absolute_value<Imath::V2f>( param( "size")));
	Imath::V2f translate( get_absolute_value<Imath::V2f>( param( "translate")));
	Imath::V2f line_width( get_absolute_value<Imath::V2f>( param( "linewidth")));
	color = get_value<Imath::Color4f>( param( "fgcol"));
	
	// adjust params
	size.x = size.x / context.subsample / aspect_ratio();
	size.y /= context.subsample;

	if( size.x == 0 || size.y == 0)
		return;
	
	translate.x = translate.x / context.subsample / aspect_ratio();
	translate.y /= context.subsample;
	
	line_width.x = line_width.x / context.subsample / aspect_ratio();
	line_width.y /= context.subsample;
	
	if( line_width.x == 0 || line_width.y == 0)
		return;
	
	// setup agg
    typedef image::agg_rgba32f_renderer_t ren_base_type;
    typedef ren_base_type::color_type color_type;
    typedef agg::renderer_scanline_aa_solid<ren_base_type> renderer_type;
	
    ren_base_type ren_base( image_view());
    renderer_type ren( ren_base);

    agg::rasterizer_scanline_aa<> ras;
    ras.gamma( agg::gamma_none());

    agg::scanline_u8 sl;
	
	ras.reset();

	agg::path_storage path;
	agg::conv_stroke<agg::path_storage> stroke_conv( path);

	// Vertical
	stroke_conv.width( line_width.x);
	
	int w = image_view().width();
	int h = image_view().height();
	
	Imath::Box2f area( defined().min - translate, 
					   defined().max - translate);
	
	float x = Imath::Math<float>::floor( area.min.x / size.x) * size.x;
	for( ; x < area.max.x + line_width.x; x += size.x)
	{
		path.move_to( x - area.min.x, 0);
		path.line_to( x - area.min.x, h);
	}

	ras.add_path( stroke_conv);
    ren.color( image::pixel_t( color.r, color.g, color.b, color.a));
    agg::render_scanlines( ras, sl, ren);

	// Horizontal
	path.remove_all();
	stroke_conv.width( line_width.y);

	float y = Imath::Math<float>::floor( area.min.y / size.y) * size.y;
	for( ; y < area.max.y + line_width.y; y += size.y)
	{
		path.move_to( 0, y - area.min.y);
		path.line_to( w, y - area.min.y);
	}

	ras.add_path( stroke_conv);
    ren.color( image::pixel_t( color.r, color.g, color.b, color.a));
    agg::render_scanlines( ras, sl, ren);
}
Ejemplo n.º 11
0
void decompress(FILE *fp, FILE *fout) {
	int row_stride;
	struct jpeg_error_mgr jerr;
	struct jpeg_decompress_struct jpg;
	JSAMPARRAY buffer;
	Image image;

	jpg.err = jpeg_std_error(&jerr);
	jpeg_create_decompress(&jpg);
	jpeg_stdio_src(&jpg, fp);
	jpeg_read_header(&jpg, TRUE);
	jpeg_start_decompress(&jpg);

	if ( jpg.data_precision != 8 ) {
		fprintf(stderr,
			"Image has %d bits color channels, we only support 8-bit.\n",
			jpg.data_precision);
		exit(1);
	}

	row_stride = jpg.output_width * jpg.output_components;

	buffer = (*jpg.mem->alloc_sarray)((j_common_ptr) &jpg, JPOOL_IMAGE, row_stride, 1);

	aspect_ratio(jpg.output_width, jpg.output_height);

	malloc_image(&image);
	clear(&image);

	if ( verbose ) print_info(&jpg);

	init_image(&image, &jpg);

	while ( jpg.output_scanline < jpg.output_height ) {
		jpeg_read_scanlines(&jpg, buffer, 1);
		process_scanline(&jpg, buffer[0], &image);
		if ( verbose ) print_progress(&jpg);
	}

	if ( verbose ) {
		fprintf(stderr, "\n");
		fflush(stderr);
	}

	normalize(&image);

	if ( clearscr ) {
		fprintf(fout, "%c[2J", 27); // ansi code for clear
		fprintf(fout, "%c[0;0H", 27); // move to upper left
	}

	if ( html && !html_rawoutput ) print_html_start(html_fontsize, fout);
	if ( use_border ) print_border(image.width);

	(!usecolors? print_image : print_image_colors) (&image, (int) strlen(ascii_palette) - 1, fout);

	if ( use_border ) print_border(image.width);
	if ( html && !html_rawoutput ) print_html_end(fout);

	free_image(&image);

	jpeg_finish_decompress(&jpg);
	jpeg_destroy_decompress(&jpg);
}
Ejemplo n.º 12
0
void
FracturedMediaRefiner<TGrid, TAPosition>::
collect_objects_for_refine()
{
//	get the grid on which we'll operate
	if(!BaseClass::get_associated_grid())
		UG_THROW("No grid has been set for the refiner.");

	Grid& grid = *BaseClass::get_associated_grid();

//	make sure that the position accessor is valid
	if(!m_aaPos.valid())
		UG_THROW("A position attachment has to be specified before this method is called.");

//	push all marked degenerated faces to a queue.
//	pop elements from that queue, mark them anisotropic and unmark associated
//	degenerated edges.
//	Furthermore we'll push degenerated faces, which are connected to the current
//	face through a regular edge to the queue (only unprocessed ones).

	typename BaseClass::selector_t& sel = BaseClass::get_refmark_selector();

//	some helpers
	vector<Edge*> edges;
	vector<Face*> faces;

//	we need two while-loops. The outer is required to process changes which
//	stem from the base-class implementation.
//todo:	This is a lot of processing due to repeated calls to collect_objects_for_refine.
	do{
		while(!m_queDegeneratedFaces.empty())
		{
			Face* f = m_queDegeneratedFaces.front();
			m_queDegeneratedFaces.pop();

		//	mark as anisotropic
			if(BaseClass::get_mark(f) != RM_ANISOTROPIC)
				BaseClass::mark(f, RM_ANISOTROPIC);

		//	check edges
			CollectAssociated(edges, grid, f);

		//	get the edge with the maximal length
			number eMax = 0;
			for(size_t i_edge = 0; i_edge < edges.size(); ++i_edge){
				number len = EdgeLength(edges[i_edge], m_aaPos);
				if(len > eMax)
					eMax = len;
			}

			if(eMax <= 0)
				eMax = SMALL;

		//	degenerated neighbors of non-degenerated edges have to be selected.
		//	degenerated edges may not be selected
			size_t numDeg = 0;
			for(size_t i_edge = 0; i_edge< edges.size(); ++i_edge){
				Edge* e = edges[i_edge];
				if(EdgeLength(e, m_aaPos) / eMax >= m_aspectRatioThreshold){
				//	non-degenerated edge
				//	make sure it is selected
					if(BaseClass::get_mark(e) != RM_REFINE)
						BaseClass::mark(e, RM_REFINE);

				//	this edge possibly connects to an unselected degenerated neighbor.
				//	If this is the case, we'll have to mark it and push it to the queue.
					CollectAssociated(faces, grid, e);
					for(size_t i_face = 0; i_face < faces.size(); ++i_face){
						Face* nbr = faces[i_face];
						if(!sel.is_selected(nbr)){
							if(aspect_ratio(f) < m_aspectRatioThreshold){
							//	push it to the queue.
								m_queDegeneratedFaces.push(nbr);
							}
						}
					}
				}
				else{
				//	degenerated edge. unmark it
					BaseClass::mark(e, RM_NONE);
					++numDeg;
				}
			}

		//	if all edges are degenerate, we will have to perform regular refinement
			if(numDeg == edges.size()){
				BaseClass::mark(f, RM_REFINE);
				for(size_t i = 0; i < edges.size(); ++i)
					BaseClass::mark(edges[i], RM_REFINE);
			}
		}

	//	now call the base implementation. If degenerated faces are selected during
	//	that step, then we have to process them too.
		BaseClass::collect_objects_for_refine();
	}while(!m_queDegeneratedFaces.empty());
}