Example #1
0
void make_screenshot(
	std::unique_ptr<Example>& example,
	GLuint width,
	GLuint height,
	const char* screenshot_path
)
{
	double s = example->HeatUpTime();
	double t = example->ScreenshotTime();
	double dt = example->FrameTime();

	ExampleClock clock(s);

	while(true)
	{
		s += dt;
		clock.Update(s);
		example->Render(clock);
		if(s < t) glfwSwapBuffers();
		else break;
	}
	glFinish();

	std::vector<char> pixels(width * height * 3);
	glReadPixels(
		0, 0,
		width,
		height,
		GL_RGB,
		GL_UNSIGNED_BYTE,
		pixels.data()
	);
	std::ofstream output(screenshot_path);
	output.write(pixels.data(), pixels.size());
	glfwSwapBuffers();
}
Example #2
0
int bmp_image::output_to_file(const std::string& file_name) const {
    std::ofstream out(file_name.c_str(), std::ios_base::binary | std::ios_base::out);

    if ( !out ) {
        return 1;
    }
    //full header size 122
    /* BMP structure
     *    File Header
     * 0   Magic number 0x42, 0x4d (2 bytes)
     * 2   File size = w*h + h*(w%4) + 122 ? (4 bytes)
     * 6   Unused (4 bytes)
     * 10  Pixel array offset = 122 (4 bytes)
     *    DIB Header
     * 14  Bytes in DIB Header = 108 (4 bytes)
     * 18  Bitmap width (4 bytes)
     * 22  Bitmap width (4 bytes)
     * 26  Color planes = 1 (2 bytes) 
     * 28  Bits/pixel = 32 (2 bytes)
     * 30  BI_BITFIELDS = 3 (no compression used) (4 bytes)
     * 34  Size of the raw data in the Pixel Array = w*h + h*(w%4) ? (incl padding) (4 bytes)
     * 38  horizonal pixels/meter = 2835 (4 bytes)
     * 42  vertival pixels/meter = 2835 (4 bytes)
     * 46  Number of colors in the palette = 0 (4 bytes)
     * 50  Important colors = 0 (4 bytes)
     * 54  Red channel bit mask = 0x00FF0000 (4 bytes)
     * 58  Green channel bit mask = 0x0000FF00 (4 bytes)
     * 62  Blue channel bit mask = 0x000000FF (4 bytes)
     * 66  Alpha channel bit mask = 0xFF000000 (4 bytes)
     * 70  Color space type = 0x206E6957 ?? (4 bytes) //LCS_WINDOWS_COLOR_SPACE
     * 74  CIEXYZTRIPLE Color Space (unused) (36 bytes)
     * 110 red gamma = unused (4 bytes)
     * 114 green gamma = unused (4 bytes)
     * 118 blue gamma = unused (4 bytes)
     * 122 <Pixel Data>
     */

    //TODO endianess

    //const boost::uint8_t unused_8 = 0;
    //const boost::uint16_t unused_16 = 0;
    const boost::uint32_t unused_32 = 0;

    //File Header
    out.write("\x42\x4d", 2); //magic number

    const boost::uint32_t file_size = width*height*4 + 122; 
    out.write( (const char *)(&file_size), 4); //file_size

    out.write( (const char *)(&unused_32), 4); //unused

    const boost::uint32_t pixel_array_offset = 122;
    out.write( (const char *)(&pixel_array_offset), 4); //pixel_array_offset

    const boost::uint32_t dib_header_size = 108;
    out.write( (const char *)(&dib_header_size), 4); //dib_header_size

    const boost::uint32_t bitmap_width = width;
    const boost::uint32_t bitmap_height = height;
    out.write( (const char *)(&bitmap_width), 4); //bitmap_width
    out.write( (const char *)(&bitmap_height), 4); //bitmap_height

    const boost::uint16_t color_planes = 1;
    out.write( (const char *)(&color_planes), 2); //color_planes

    const boost::uint16_t bits_per_pixel = 32;
    out.write( (const char *)(&bits_per_pixel), 2); //bits_per_pixel

    const boost::uint32_t bitfields = 3;
    out.write( (const char *)(&bitfields), 4); //bitfields

    const boost::uint32_t pixel_array_size = width*height*4;
    out.write( (const char *)(&pixel_array_size), 4); //pixel_array_size

    const boost::uint32_t horizontal_physical_resolution = 2835;
    const boost::uint32_t vertical_physical_resolution = 2835;
    out.write( (const char *)(&horizontal_physical_resolution), 4); //horizontal_physical_resolution
    out.write( (const char *)(&vertical_physical_resolution), 4); //vertical_physical_resolution

    out.write( (const char *)(&unused_32), 4); //num of colors on palette
    out.write( (const char *)(&unused_32), 4); //num of important colors

    const boost::uint32_t red_channel_bit_mask = bmp_impl::endian_swap( 0x00FF0000U );
    const boost::uint32_t green_channel_bit_mask = bmp_impl::endian_swap( 0x0000FF00U );
    const boost::uint32_t blue_channel_bit_mask = bmp_impl::endian_swap( 0x000000FFU );
    const boost::uint32_t alpha_channel_bit_mask = bmp_impl::endian_swap( 0xFF000000U );
    out.write( (const char *)(&red_channel_bit_mask), 4); //red_channel_bit_mask    
    out.write( (const char *)(&green_channel_bit_mask), 4); //green_channel_bit_mask
    out.write( (const char *)(&blue_channel_bit_mask), 4); //blue_channel_bit_mask
    out.write( (const char *)(&alpha_channel_bit_mask), 4); //alpha_channel_bit_mask

    const boost::uint32_t color_space_type = bmp_impl::endian_swap( 0x206E6957U );
    out.write( (const char *)(&color_space_type), 4); //color_space_type

    //CIEXYZTRIPLE Color Space (unused) (36 bytes)
    for(unsigned i = 0; i < 36/4; ++i) {
        out.write( (const char *)(&unused_32), 4);
    }

    out.write( (const char *)(&unused_32), 4); //red gamma
    out.write( (const char *)(&unused_32), 4); //green gamma
    out.write( (const char *)(&unused_32), 4); //blue gamma

    for(unsigned cy = 0; cy < pixels.height(); ++cy) {
        for(unsigned x = 0; x < pixels.width(); ++x) {

            unsigned y = pixels.height() - cy - 1;
           
            const single_color_t alpha = get_alpha(pixels(x, y));
            const single_color_t red = get_red(pixels(x, y));
            const single_color_t green = get_green(pixels(x, y));
            const single_color_t blue = get_blue(pixels(x, y));
            
            out.write((const char *)&alpha, 1);
            out.write((const char *)&red, 1);
            out.write((const char *)&green, 1);
            out.write((const char *)&blue, 1);
            
            

        }
        //no padding required as the pixels themselves are 4 bytes long
    }

    out.close();

    return 0;
}
Example #3
0
    Image* loadBMP( std::string filename) {
        ifstream input;
        input.open(filename.c_str(), ifstream::binary);
        assert(!input.fail() || !"Could not find file");
        char buffer[2];
        input.read(buffer, 2);
        assert((buffer[0] == 'B' && buffer[1] == 'M') || !"Not a bitmap file");
        input.ignore(8);
        int dataOffset = readInt(input);
        
        //Read the header
        int headerSize = readInt(input);
        int width;
        int height;
        switch(headerSize) {
            case 40:
                //V3
                width = readInt(input);
                height = readInt(input);
                input.ignore(2);
                assert(readShort(input) == 24 || !"Image is not 24 bits per pixel");
                assert(readShort(input) == 0 || !"Image is compressed");
                break;
            case 12:
                //OS/2 V1
                width = readShort(input);
                height = readShort(input);
                input.ignore(2);
                assert(readShort(input) == 24 || !"Image is not 24 bits per pixel");
                break;
            case 64:
                //OS/2 V2
                assert(!"Can't load OS/2 V2 bitmaps");
                break;
            case 108:
                //Windows V4
                assert(!"Can't load Windows V4 bitmaps");
                break;
            case 124:
                //Windows V5
                assert(!"Can't load Windows V5 bitmaps");
                break;
            default:
                assert(!"Unknown bitmap format");
        }
        
        //Read the data
        int bytesPerRow = ((width * 3 + 3) / 4) * 4 - (width * 3 % 4);
        int size = bytesPerRow * height;
        auto_array<char> pixels(new char[size]);
        input.seekg(dataOffset, ios_base::beg);
        input.read(pixels.get(), size);

        //Get the data into the right format
        auto_array<char> pixels2(new char[width * height * 3]);
        for(int y = 0; y < height; y++) {
            for(int x = 0; x < width; x++) {
                for(int c = 0; c < 3; c++) {
                    pixels2[3 * (width * y + x) + c] =
                        pixels[bytesPerRow * y + 3 * x + (2 - c)];
                }
            }
        }
        
        input.close();
        return new Image(pixels2.release(), width, height);
    }
Example #4
0
	const PixelC* pixels (const CSite& st) const {return pixels (st.x, st.y);}
 bool empty() { return pixels() == NULL; }
Example #6
0
static void track_baseline(const struct rect *rect, const TTF_Font *a,
                           struct rect *aligned, const TTF_Font *b)
{
    split(*rect, pixels(from_top(TTF_FontAscent(a)  - TTF_FontAscent(b), 0)),
          NULL, aligned);
}
status_t
EXRTranslator::DerivedTranslate(BPositionIO* source,
	const translator_info* info, BMessage* settings,
	uint32 outType, BPositionIO* target, int32 baseType)
{
	if (!outType)
		outType = B_TRANSLATOR_BITMAP;
	if (outType != B_TRANSLATOR_BITMAP || baseType != 0)
		return B_NO_TRANSLATOR;

	status_t err = B_NO_TRANSLATOR;
	try {
		IStreamWrapper istream("filename", source);
		RgbaInputFile in(istream);

		//Imath::Box2i dw = in.dataWindow();
		const Imath::Box2i &displayWindow = in.displayWindow();
		const Imath::Box2i &dataWindow = in.dataWindow();
		//float a = in.pixelAspectRatio(); // TODO take into account the aspect ratio
		int dataWidth = dataWindow.max.x - dataWindow.min.x + 1;
		int dataHeight = dataWindow.max.y - dataWindow.min.y + 1;
		int displayWidth = displayWindow.max.x - displayWindow.min.x + 1;
		int displayHeight = displayWindow.max.y - displayWindow.min.y + 1;

		// Write out the data to outDestination
		// Construct and write Be bitmap header
		TranslatorBitmap bitsHeader;
		bitsHeader.magic = B_TRANSLATOR_BITMAP;
		bitsHeader.bounds.left = 0;
		bitsHeader.bounds.top = 0;
		bitsHeader.bounds.right = displayWidth - 1;
		bitsHeader.bounds.bottom = displayHeight - 1;
		bitsHeader.rowBytes = 4 * displayWidth;
		bitsHeader.colors = B_RGBA32;
		bitsHeader.dataSize = bitsHeader.rowBytes * displayHeight;
		if (swap_data(B_UINT32_TYPE, &bitsHeader,
			sizeof(TranslatorBitmap), B_SWAP_HOST_TO_BENDIAN) != B_OK) {
			return B_ERROR;
		}
		target->Write(&bitsHeader, sizeof(TranslatorBitmap));

		Array2D <Rgba> pixels(dataHeight, dataWidth);
		in.setFrameBuffer (&pixels[0][0] - dataWindow.min.y * dataWidth - dataWindow.min.x, 1, dataWidth);
		in.readPixels (dataWindow.min.y, dataWindow.max.y);

		float	_gamma = 0.4545f;
		float	_exposure = 0.0f;
		float	_defog = 0.0f;
		float	_kneeLow = 0.0f;
		float	_kneeHigh = 5.0f;

		float	_fogR = 0.0f;
		float	_fogG = 0.0f;
		float	_fogB = 0.0f;

		halfFunction<float>
		rGamma (Gamma (_gamma,
					_exposure,
					_defog * _fogR,
					_kneeLow,
					_kneeHigh),
			-HALF_MAX, HALF_MAX,
			0.f, 255.f, 0.f, 0.f);

		halfFunction<float>
		gGamma (Gamma (_gamma,
					_exposure,
					_defog * _fogG,
					_kneeLow,
					_kneeHigh),
			-HALF_MAX, HALF_MAX,
			0.f, 255.f, 0.f, 0.f);

		halfFunction<float>
		bGamma (Gamma (_gamma,
					_exposure,
					_defog * _fogB,
					_kneeLow,
					_kneeHigh),
			-HALF_MAX, HALF_MAX,
			0.f, 255.f, 0.f, 0.f);

		for (int y = displayWindow.min.y; y <= displayWindow.max.y; ++y) {
			if (y < dataWindow.min.y
				|| y > dataWindow.max.y) {
				unsigned char sp[4];
				sp[0] = 128;
				sp[1] = 128;
				sp[2] = 128;
				sp[3] = 255;
				for (int x = displayWindow.min.x; x <= displayWindow.max.x; ++x) {
					target->Write(sp, 4);
				}
				continue;
			}

			for (int x = displayWindow.min.x; x <= displayWindow.max.x; ++x) {
				unsigned char sp[4];
				if (x < dataWindow.min.x
					|| x > dataWindow.max.x) {
					sp[0] = 128;
					sp[1] = 128;
					sp[2] = 128;
					sp[3] = 255;
				} else {
					const Imf::Rgba &rp = pixels[y][x];

					sp[0] = (unsigned char)bGamma(rp.b);
					sp[1] = (unsigned char)gGamma(rp.g);
					sp[2] = (unsigned char)rGamma(rp.r);
					sp[3] = 255;
				}
				target->Write(sp, 4);
			}
		}

		err = B_OK;
	} catch (const std::exception &e) {
		std::cerr << e.what() << std::endl;
	}
	return err;
}
int _tmain(int argc, _TCHAR* argv[])
{
    CoInitializeEx(nullptr, COINIT_MULTITHREADED);

    // Make a big bitmap.
    std::vector<BYTE> pixels(300000000);

    // Initialize it with the pattern 0,1,2,0,1,2...
    ColorGen colorGen;
    std::generate(pixels.begin(), pixels.end(), colorGen);

    // Total times for each implementation.
    long long elapsed_While = 0;
    long long elapsed_Serial = 0;
    long long elapsed_Task = 0;
    long long elapsed_ParallelInvoke = 0;
    long long elapsed_ParallelReduce = 0;
    long long elapsed_ParallelInvokeReduce = 0;
    long long elapsed_StdThread = 0;
    long long elapsed_StdThreadRecursive = 0;
    long long elapsed_StdAsync = 0;
    long long elapsed_ParallelAccumulate = 0;

    const size_t iterations = 10;

    for (size_t i = 0; i < iterations; ++i)
    {
        DWORD averageColor;
        long long elapsed;

        elapsed = time_call(
            [&]
        {
            averageColor = AverageColor_While(pixels.cbegin(), pixels.cend());
        });

        std::wcout << averageColor << L" Elapsed time AverageColor_While(): " << elapsed << L" ms" << std::endl;
        elapsed_While += elapsed;

        elapsed = time_call(
            [&]
        {
            averageColor = AverageColor_Serial(pixels.cbegin(), pixels.cend());
        });

        std::wcout << averageColor << L" Elapsed time AverageColor_Serial(): " << elapsed << L" ms" << std::endl;
        elapsed_Serial += elapsed;

        elapsed = time_call(
            [&]
        {
            averageColor = AverageColor_Task(pixels.cbegin(), pixels.cend());
        });

        std::wcout << averageColor << L" Elapsed time AverageColor_Task(): " << elapsed << L" ms" << std::endl;
        elapsed_Task += elapsed;

        elapsed = time_call(
            [&]
        {
            averageColor = AverageColor_ParallelInvoke(pixels.cbegin(), pixels.cend());
        });

        std::wcout << averageColor << L" Elapsed time AverageColor_ParallelInvoke(): " << elapsed << L" ms" << std::endl;
        elapsed_ParallelInvoke += elapsed;

        elapsed = time_call(
            [&]
        {
            averageColor = AverageColor_ParallelReduce(pixels.cbegin(), pixels.cend());
        });

        std::wcout << averageColor << L" Elapsed time AverageColor_ParallelReduce(): " << elapsed << L" ms" << std::endl;
        elapsed_ParallelReduce += elapsed;

        elapsed = time_call(
            [&]
        {
            averageColor = AverageColor_ParallelInvokeReduce(pixels.cbegin(), pixels.cend());
        });

        std::wcout << averageColor << L" Elapsed time AverageColor_ParallelInvokeReduce(): " << elapsed << L" ms" << std::endl;
        elapsed_ParallelInvokeReduce += elapsed;

        elapsed = time_call(
            [&]
        {
            averageColor = AverageColor_StdThread(pixels.cbegin(), pixels.cend());
        });

        std::wcout << averageColor << L" Elapsed time AverageColor_StdThread(): " << elapsed << L" ms" << std::endl;
        elapsed_StdThread += elapsed;

        elapsed = time_call(
            [&]
        {
            averageColor = AverageColor_StdThreadRecursive(pixels.cbegin(), pixels.cend());
        });

        std::wcout << averageColor << L" Elapsed time AverageColor_StdThreadRecursive(): " << elapsed << L" ms" << std::endl;
        elapsed_StdThreadRecursive += elapsed;

        elapsed = time_call(
            [&]
        {
            averageColor = AverageColor_StdAsync(pixels.cbegin(), pixels.cend());
        });

        std::wcout << averageColor << L" Elapsed time AverageColor_StdAsync(): " << elapsed << L" ms" << std::endl;
        elapsed_StdAsync += elapsed;

        elapsed = time_call(
            [&]
        {
            averageColor = AverageColor_ParallelAccumulate(pixels.cbegin(), pixels.cend());
        });

        std::wcout << averageColor << L" Elapsed time AverageColor_ParallelAccumulate(): " << elapsed << L" ms" << std::endl;
        elapsed_ParallelAccumulate += elapsed;


    }

    std::wcout << L"Average AverageColor_While(): " << elapsed_While / iterations << L" ms" << std::endl;
    std::wcout << L"Average AverageColor_Serial(): " << elapsed_Serial / iterations << L" ms" << std::endl;
    std::wcout << L"Average AverageColor_Task(): " << elapsed_Task / iterations << L" ms" << std::endl;
    std::wcout << L"Average AverageColor_ParallelInvoke(): " << elapsed_ParallelInvoke / iterations << L" ms" << std::endl;
    std::wcout << L"Average AverageColor_ParallelReduce(): " << elapsed_ParallelReduce / iterations << L" ms" << std::endl;
    std::wcout << L"Average AverageColor_ParallelInvokeReduce(): " << elapsed_ParallelInvokeReduce / iterations << L" ms" << std::endl;
    std::wcout << L"Average AverageColor_StdThread(): " << elapsed_StdThread / iterations << L" ms" << std::endl;
    std::wcout << L"Average AverageColor_StdThreadRecursive(): " << elapsed_StdThreadRecursive / iterations << L" ms" << std::endl;
    std::wcout << L"Average AverageColor_StdAsync(): " << elapsed_StdAsync / iterations << L" ms" << std::endl;
    std::wcout << L"Average AverageColor_ParallelAccumulate(): " << elapsed_ParallelAccumulate / iterations << L" ms" << std::endl;

    CoUninitialize();
    return 0;
}
Example #9
0
int main(int argc, char** argv)
{
	if(argc != 3)
	{
		std::cerr << "syntax: " << argv[0] << " input_sky.hdr output.ppm" << std::endl;
		return EXIT_FAILURE;
	}

	// Load the sky
	mcvoxel::sky sky(argv[1]);

	// Size of output
	const int w(850), h(480);

	// Create a camera
	mcvoxel::camera camera;
	camera.set_centre(Eigen::Vector3f(0.f, 128.f, 0.f));
	camera.set_focal_length(h);
	camera.yaw_left(190.f * (2.f * M_PI / 360.f));
	//camera.pitch_up(-20.f * (2.f * M_PI / 360.f));
	camera.pitch_up(20.f * (2.f * M_PI / 360.f));

	// What is the ray corresponding to (0,0)
	mcvoxel::ray origin_ray(camera.eye_ray(0.f,0.f));
	std::cout << "Camera origin: " << origin_ray.origin().transpose() << '\n';
	std::cout << "   Looking at: " << origin_ray.direction().transpose() << '\n';

	// Create a collection of pixel samples which is 3x(w*h)
	Eigen::ArrayXXf samples(3, w*h);

	// Get camera vectors
	Eigen::Vector3f centre, look_at, up, right;
	camera.get_frame(centre, look_at, up, right);

	// Draw samples from the sky.
	for(int j=0; j<15; ++j)
	{
		std::cout << "j: " << j << std::endl;
		for(int i=0; i<w*h; ++i)
		{
			Eigen::Vector3f direction, chrominance;
			sky.sample_direction(direction, chrominance);

			// Get the image-plane co-ordinates for that ray
			float x, y;
			if(!camera.pixel_coord(direction, x, y))
				continue;

			// convert to image pixel co-ordinate
			x = floor(x + 0.5f * w); y = floor(-y + 0.5f * h);

			if((x < 0.f) || (x >= w) || (y < 0.f) || (y >= h))
				continue;

			int idx = static_cast<int>(y) * w + static_cast<int>(x);
			samples.matrix().col(idx) += chrominance;
		}
	}

	std::cout << "Rendering done." << std::endl;

	// Poor-man's tone-mapping
	Eigen::ArrayXXf tone_mapped_samples((samples / std::max(1e-3f, samples.maxCoeff())).cwiseSqrt());

	std::vector<data::pixel<uint8_t> > pixels(w*h, data::pixel<uint8_t>(0,0,0));
	for(int i=0; i<w*h; ++i)
	{
		pixels[i].r = static_cast<uint8_t>(255.f * tone_mapped_samples(0,i));
		pixels[i].g = static_cast<uint8_t>(255.f * tone_mapped_samples(1,i));
		pixels[i].b = static_cast<uint8_t>(255.f * tone_mapped_samples(2,i));
	}
	std::ofstream output(argv[2]);
	io::write_ppm(output, &(pixels[0]), w, h);

	return EXIT_SUCCESS;
}
/***********************************************************************//**
 * @brief Set Monte Carlo simulation cone
 *
 * @param[in] centre Simulation cone centre.
 * @param[in] radius Simulation cone radius (degrees).
 *
 * Sets the simulation cone centre and radius that defines the directions
 * that will be simulated using the mc() method.
 ***************************************************************************/
void GModelSpatialDiffuseCube::set_mc_cone(const GSkyDir& centre,
                                           const double&  radius)
{
    // Initialise cache
    m_mc_cache.clear();
    m_mc_spectrum.clear();

    // Fetch cube
    fetch_cube();

    // Determine number of cube pixels and maps
    int npix  = pixels();
    int nmaps = maps();

    // Continue only if there are pixels and maps
    if (npix > 0 && nmaps > 0) {

        // Reserve space for all pixels in cache
        m_mc_cache.reserve((npix+1)*nmaps);

        // Loop over all maps
        for (int i = 0; i < nmaps; ++i) {

            // Compute pixel offset
            int offset = i * (npix+1);

            // Set first cache value to 0
            m_mc_cache.push_back(0.0);

            // Initialise cache with cumulative pixel fluxes and compute
            // total flux in skymap for normalization. Negative pixels are
            // excluded from the cumulative map.
            double total_flux = 0.0;
        	for (int k = 0; k < npix; ++k) {

                // Derive effective pixel radius from half opening angle
                // that corresponds to the pixel's solid angle. For security,
                // the radius is enhanced by 50%.
                double pixel_radius =
                       std::acos(1.0 - m_cube.solidangle(k)/gammalib::twopi) *
                       gammalib::rad2deg * 1.5;

                // Add up flux with simulation cone radius + effective pixel
                // radius. The effective pixel radius is added to make sure
                // that all pixels that overlap with the simulation cone are
                // taken into account. There is no problem of having even
                // pixels outside the simulation cone taken into account as
                // long as the mc() method has an explicit test of whether a
                // simulated event is contained in the simulation cone.
                double distance = centre.dist_deg(m_cube.pix2dir(k));
                if (distance <= radius+pixel_radius) {
                    double flux = m_cube(k,i) * m_cube.solidangle(k);
                    if (flux > 0.0) {
                        total_flux += flux;
                    }
                }

                // Push back flux
        		m_mc_cache.push_back(total_flux); // units: ph/cm2/s/MeV
        	}

            // Normalize cumulative pixel fluxes so that the values in the
            // cache run from 0 to 1
            if (total_flux > 0.0) {
        		for (int k = 0; k < npix; ++k) {
        			m_mc_cache[k+offset] /= total_flux;
        		}
        	}

            // Make sure that last pixel in the cache is >1
            m_mc_cache[npix+offset] = 1.0001;

            // Store centre flux in node array
            if (m_logE.size() == nmaps) {
                GEnergy energy;
                energy.log10MeV(m_logE[i]);
                
                // Only append node if flux > 0
                if (total_flux > 0.0) {
                	m_mc_spectrum.append(energy, total_flux);
                }

            }

        } // endfor: looped over all maps

        // Dump cache values for debugging
        #if defined(G_DEBUG_CACHE)
        for (int i = 0; i < m_mc_cache.size(); ++i) {
            std::cout << "i=" << i;
            std::cout << " c=" << m_mc_cache[i] << std::endl;
        }
        #endif

    } // endif: there were cube pixels and maps

    // Return
    return;
}
Example #11
0
void SDLRenderer::drawRect(unsigned x, unsigned y, unsigned xs, unsigned ys) {
	SDL_LockSurface(_remap);
	memcpy(_remap->pixels, pixels(), width() * height() * 3 * sizeof(uint8_t));
	SDL_UnlockSurface(_remap);
	flushRect(x, y, xs, ys);
}
/***********************************************************************//**
 * @brief Returns MC sky direction
 *
 * @param[in] energy Photon energy.
 * @param[in] time Photon arrival time.
 * @param[in,out] ran Random number generator.
 * @return Sky direction.
 *
 * @exception GException::invalid_value
 *            No energy boundaries specified, or energy boundaries do not
 *            cover the specified @p energy.
 *
 * Returns a random sky direction according to the intensity distribution of
 * the model sky map and the specified energy. The method makes use of a
 * cache array that contains the normalised cumulative flux values for each
 * of the sky maps in the cube. The specified energy is used to select the
 * appropriate cache array from the cube. Using a uniform random number, the
 * selected cache array is scanned using a bi-section method to determine
 * the skymap pixel for which the position should be returned. To avoid
 * binning problems, the exact position within the pixel is set by a uniform
 * random number generator (neglecting thus pixel distortions). The
 * fractional skymap pixel is then converted into a sky direction.
 ***************************************************************************/
GSkyDir GModelSpatialDiffuseCube::mc(const GEnergy& energy,
                                     const GTime&   time,
                                     GRan&          ran) const
{
    // Allocate sky direction
    GSkyDir dir;

    // Fetch cube
    fetch_cube();

    // Determine number of skymap pixels
    int npix = pixels();

    // Continue only if there are skymap pixels
    if (npix > 0) {

        // If no energy boundaries are defined, throw an exception
        if (m_ebounds.size() < 1) {
            std::string msg = "The energy boundaries of the maps in the cube"
                              " have not been defined. Maybe the map cube file"
                              " is missing the \"ENERGIES\" extension which"
                              " defines the energy of each map in the cube.\n"
                              "Please provide the energy information."; 
            throw GException::invalid_value(G_MC, msg);
        }

        // Determine the map that corresponds best to the specified energy.
        // This is not 100% clean, as ideally some map interpolation should
        // be done to the exact energy specified. However, as long as the map
        // does not change drastically with energy, taking the closest map
        // seems to be fine.
        int i = m_ebounds.index(energy);
        if (i < 0) {
            if (energy <= m_ebounds.emin()) {
                i = 0;
            }
            else if (energy >= m_ebounds.emax()) {
                i = m_ebounds.size()-1;
            }
            else {
                std::string msg = "The specified energy "+energy.print()+" does"
                                  " not fall in any of the energy boundaries of"
                                  " the map cube.\n"
                                  "Please make sure that the map cube energies"
                                  " are properly defined.";
                throw GException::invalid_value(G_MC, msg);
            }
        }
        
        // Get uniform random number
        double u = ran.uniform();

        // Get pixel index according to random number. We use a bi-section
        // method to find the corresponding skymap pixel
        int offset = i * (npix+1);
        int low    = offset;
        int high   = offset + npix;
        while ((high - low) > 1) {
            int mid = (low+high) / 2;
            if (u < m_mc_cache[mid]) {
                high = mid;
            }
            else if (m_mc_cache[mid] <= u) {
                low = mid;
            }
        }

        // Convert sky map index to sky map pixel
        GSkyPixel pixel = m_cube.inx2pix(low-offset);

        // Randomize pixel
        pixel.x(pixel.x() + ran.uniform() - 0.5);
        pixel.y(pixel.y() + ran.uniform() - 0.5);

        // Get sky direction
        dir = m_cube.pix2dir(pixel);
    
    } // endif: there were pixels in sky map

    // Return sky direction
    return dir;
}
Example #13
0
	//TODO proper error handling
	TextureParameters PNGTexture::init_texture() 
	{
		TextureParameters ret = {false};
		FILE* fi;

		std::string dir = resource;

		try {
			if(Util::ext_fopen(&fi, dir.c_str(), "rb") != 0) {
				std::cout << "Warning(PNGTexture): " << dir << " is not a valid resource." << std::endl;
				return ret;
			}
		}
		catch(std::exception e) {
			std::cout << "Exception(PNGTexture): " << e.what() << std::endl;
		}


		char sig[8];
		fread(sig, 1, 8, fi);

		if(png_sig_cmp((png_const_bytep)sig, 0, 8) != 0) {
			fclose(fi);

			std::cout << "Warning(PNGTexture): " << dir << " has an invalid signature." << std::endl;
			return ret;
		}

		png_structp png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, nullptr, nullptr, nullptr);
		png_infop info_ptr = png_create_info_struct(png_ptr);
		png_infop end_info = png_create_info_struct(png_ptr);

		png_init_io(png_ptr, fi);
		png_set_sig_bytes(png_ptr, 8);
		png_read_info(png_ptr, info_ptr);

		ret.valid = true;

		ret.height = png_get_image_height(png_ptr, info_ptr);
		ret.width  = png_get_image_width(png_ptr, info_ptr);
		ret.format = convert_png_colortype(png_get_color_type(png_ptr, info_ptr));
		
		int rowbytes = png_get_rowbytes(png_ptr, info_ptr);
		rowbytes += 3 - ((rowbytes-1)%4); //align row to 4 bytes

		image_byte* data = new image_byte[rowbytes*(1+ret.height)];
		png_bytepp row_pointers = (png_bytepp)(malloc(ret.height*sizeof(png_bytep)));

		unsigned height = ret.height;
		for(unsigned i = 0; i < height; i++) {
			row_pointers[i] = data + i*rowbytes;
		}

		png_read_image(png_ptr, row_pointers);
		
		png_destroy_read_struct(&png_ptr, &info_ptr, &end_info);
		free(row_pointers);

		std::shared_ptr<image_byte> pixels(data);
		ret.pixels = pixels;
		this->pixel_data = pixels;

		fclose(fi);

		return ret;
	}
Example #14
0
void example_wrapper::render(void)
{
	assert(_example);

	bool save_frame = _params.doing_framedump();
	save_frame |= _params.doing_screenshot() &&
		(_state.exec_time() >= _params.screenshot_time());

	if(_state.multiple_tiles())
	{
		assert(_state.first_tile());
		do {
			if(_params.auto_tiles())
			{
				glScissor(
					_state.tile_x(),
					_state.tile_y(),
					_state.tile_w(),
					_state.tile_h()
				);
			}

			_example->render(_state);
			glFlush();
			
		} while(!_state.next_tile());
	}
	else _example->render(_state);

	if(save_frame)
	{
		glReadPixels(
			0, 0,
			GLsizei(_state.width()),
			GLsizei(_state.height()),
			GL_RGBA,
			GL_UNSIGNED_BYTE,
			pixels().data()
		);

		std::stringstream filename;

		if(_params.doing_framedump())
		{
			filename <<
				_params.framedump_prefix() <<
				std::setfill('0') << std::setw(6) <<
				_state.frame_number() << ".rgba";
		}
		else if(_params.doing_screenshot())
		{
			filename << _params.screenshot_path();
		}

		std::ofstream file(filename.str());
		file.write(pixels().data(), std::streamsize(pixels().size()));
		file.flush();

		if(_params.doing_framedump())
		{
			std::cout << filename.str() << std::endl;
			textbuf(filename.str().size()+1);

			std::cin.getline(
				_textbuf.data(),
				std::streamsize(_textbuf.size())
			);

			if(std::strncmp(
				filename.str().c_str(),
				_textbuf.data(),
				_textbuf.size()
			) != 0)
			{
				throw std::runtime_error(
					"Expected frame-dump filepath on stdin."
				);
			}
		}
		else if(_params.doing_screenshot())
		{
			_screenshot_done = true;
		}
	}
}
/************************************
verificar:
Funcion que valida el atributo y
el valor asignado a este atributo,por
medio de funciones auxiliares.
*************************************/
void verificar(char* tag, char* att,char* valor){
int atr;
if (strcmp(tag,"a")==0){
	len=(sizeof(a)/sizeof(a[0]));
	atr=verificar_atributo(a,att,"a");
        if(atr==1){
             if(strcmp(att,"href")==0)
                  url(att,valor);
             else if(strcmp(att,"shape")==0){
                  len_v=(sizeof(shape)/sizeof(shape[0]));
                  verificar_valor(shape,valor,"shape"); }
             else texto(att,valor);// acepta los valores de los atributos generales , que son texto "type","id","class"
}}
else if (strcmp(tag,"b")==0){
	len=(sizeof(b)/sizeof(b[0]));
	atr=verificar_atributo(b,att,"b");
        if(atr==1){
             texto(att,valor);// acepta los valores de los atributos generales , que son texto "type","id","class"
             }}
else if (strcmp(tag,"blockquote")==0){
	len=(sizeof(blockquote)/sizeof(blockquote[0]));
	atr=verificar_atributo(blockquote,att,"blockquote");
	if(atr==1){
             url(att,valor); //cite ->url
             }}
else if (strcmp(tag,"body")==0){
	len=(sizeof(body)/sizeof(body[0]));
	atr=verificar_atributo(body,att,"body");
        if(atr==1){
	len_v=(sizeof(color)/sizeof(color[0]));
        verificar_valor(color,valor,att);}}
else if (strcmp(tag,"br")==0){
	len=(sizeof(br)/sizeof(br[0]));
	atr=verificar_atributo(br,att,"br");
        if(atr==1){
	     texto(att,valor);// acepta los valores de los atributos generales , que son texto "type","id","class"
}}
else if (strcmp(tag,"button")==0){
	len=(sizeof(button)/sizeof(button[0]));
	atr=verificar_atributo(button,att,"button");
	if(atr==1){
             if(strcmp(att,"disabled")==0){
                  len_v=(sizeof(disabled)/sizeof(disabled[0]));
                  verificar_valor(disabled,valor,"disabled");}
	     else if(strcmp(att,"autofocus")==0){
                  len_v=(sizeof(autofocus)/sizeof(autofocus[0]));
                  verificar_valor(autofocus,valor,"autofocus");}
             else texto(att,valor);  // acepta los valores de los atributos generales , que son texto "type","id","class"
}}
else if (strcmp(tag,"caption")==0){
	len=(sizeof(caption)/sizeof(caption[0]));
	atr=verificar_atributo(caption,att,"caption");
        if(atr==1){
	len_v=(sizeof(align)/sizeof(align[0]));
        verificar_valor(align,valor,"align");}}
else if (strcmp(tag,"code")==0){
	len=(sizeof(code)/sizeof(code[0]));
	atr=verificar_atributo(code,att,"code");
        if(atr==1){
	     texto(att,valor);// acepta los valores de los atributos generales , que son texto "type","id","class"
}}
else if (strcmp(tag,"footer")==0){
	len=(sizeof(footer)/sizeof(footer[0]));
	atr=verificar_atributo(footer,att,"footer");
  if(atr==1){
	     texto(att,valor);
}}
else if (strcmp(tag,"div")==0){
	len=(sizeof(div_)/sizeof(div_[0]));
	atr=verificar_atributo(div_,att,"div");
        if(atr==1){
           if(strcmp(att,"align")==0 ){
                  len_v=(sizeof(align)/sizeof(align[0]));
                  verificar_valor(align,valor,"align");}
             else texto(att,valor);  // acepta los valores de los atributos generales , que son texto "type","id","class"
}}
else if (strcmp(tag,"dl")==0){
	len=(sizeof(dl)/sizeof(dl[0]));
	atr=verificar_atributo(dl,att,"dl");
         if(atr==1){
	     texto(att,valor);// acepta los valores de los atributos generales , que son texto "type","id","class"
}}
else if (strcmp(tag,"dt")==0){
	len=(sizeof(dt)/sizeof(dt[0]));
	atr=verificar_atributo(dt,att,"dt");
         if(atr==1){
	     texto(att,valor);// acepta los valores de los atributos generales , que son texto "type","id","class"
}}
else if (strcmp(tag,"dd")==0){
	len=(sizeof(dd)/sizeof(dd[0]));
	atr=verificar_atributo(dd,att,"dd");
         if(atr==1){
	     texto(att,valor);// acepta los valores de los atributos generales , que son texto "type","id","class"
}}
else if (strcmp(tag,"em")==0){
	len=(sizeof(em)/sizeof(em[0]));
	atr=verificar_atributo(em,att,"em");
          if(atr==1){
	     texto(att,valor);// acepta los valores de los atributos generales , que son texto "type","id","class"
}}
else if (strcmp(tag,"embed")==0){
	len=(sizeof(embed)/sizeof(embed[0]));
	atr=verificar_atributo(embed,att,"embed");
       if(atr==1){
            if(strcmp(att,"height")==0 | strcmp(att,"width")==0 )
                  pixels(att,valor);
             else if(strcmp(att,"src")==0)
                  url(att,valor);
	     else texto(att,valor);// acepta los valores de los atributos generales , que son texto "type","id","class"
}}
else if (strcmp(tag,"pre")==0){
	len=(sizeof(pre)/sizeof(pre[0]));
	atr=verificar_atributo(pre,att,"pre");
        if(atr==1){
            if(strcmp(att,"width")==0 )
                  pixels(att,valor);
             else 
                  texto(att,valor);// acepta los valores de los atributos generales , que son texto "type","id","class"

}}
else if (strcmp(tag,"form")==0){
	len=(sizeof(form)/sizeof(form[0]));
	atr=verificar_atributo(form,att,"form");
	if(atr==1){
            if(strcmp(att,"method")==0 ){
                  len_v=(sizeof(method)/sizeof(method[0]));
                  verificar_valor(method,valor,"method");}
             else if(strcmp(att,"action")==0)
                  url(att,valor);
	     else texto(att,valor); // acepta los valores de los atributos generales , que son texto "type","id","class"
}}
else if (strcmp(tag,"headings")==0){
	len=(sizeof(headings)/sizeof(headings[0]));
	atr=verificar_atributo(headings,att,"headings");
         if(atr==1){
           if(strcmp(att,"align")==0 ){
                  len_v=(sizeof(align)/sizeof(align[0]));
                  verificar_valor(align,valor,"align");}
             else {
			texto(att,valor); // acepta los valores de los atributos generales , que son texto "type","id","class"
                  }
}}
else if (strcmp(tag,"head")==0){
	len=(sizeof(head)/sizeof(head[0]));
	atr=verificar_atributo(head,att,"head");
        if(atr==1){
	     texto(att,valor); // acepta los valores de los atributos generales , que son texto "type","id","class"
}}
else if (strcmp(tag,"header")==0){
	len=(sizeof(header)/sizeof(header[0]));
	atr=verificar_atributo(header,att,"header");
        if(atr==1){
	     texto(att,valor); // acepta los valores de los atributos generales , que son texto "type","id","class"
}}
else if (strcmp(tag,"hr")==0){
	len=(sizeof(hr)/sizeof(hr[0]));
	atr=verificar_atributo(hr,att,"hr");
        if(atr==1){
             if(strcmp(att,"size")==0 | strcmp(att,"width")==0  )
                   pixels(att,valor);
           else if(strcmp(att,"align")==0 ){
                  len_v=(sizeof(align)/sizeof(align[0]));
                  verificar_valor(align,valor,"align");}
             else texto(att,valor);  // acepta los valores de los atributos generales , que son texto "type","id","class"
}}
else if (strcmp(tag,"html")==0){
	len=(sizeof(html)/sizeof(html[0]));
	atr=verificar_atributo(html,att,"html");
        if(atr==1){
           if(strcmp(att,"xmlns")==0 ){
                  len_v=(sizeof(xmlns)/sizeof(xmlns[0]));
                  verificar_valor(xmlns,valor,"xmlns");}
             else texto(att,valor); // acepta los valores de los atributos generales , que son texto "type","id","class"
}}
else if (strcmp(tag,"img")==0){
	len=(sizeof(img)/sizeof(img[0]));
	atr=verificar_atributo(img,att,"img");
        if(atr==1){
             if(strcmp(att,"align")==0){
                  len_v=(sizeof(align)/sizeof(align[0]));
                  verificar_valor(align,valor,"align");}
             else if(strcmp(att,"height")==0 | strcmp(att,"width")==0)
                  pixels(att,valor);
             else if(strcmp(att,"src")==0)
                   url(att,valor);
             else  texto(att,valor);  // acepta los valores de los atributos generales , que son texto "type","id","class"   
 }}
else if (strcmp(tag,"input")==0){
	len=(sizeof(input)/sizeof(input[0]));
	atr=verificar_atributo(input,att,"input");
         if(atr==1){
            if(strcmp(att,"height")==0 | strcmp(att,"width")==0 | strcmp(att,"size")==0)
                  pixels(att,valor);
             else if(strcmp(att,"src")==0)
                   url(att,valor);
             else  texto(att,valor);     // acepta los valores de los atributos generales , que son texto "type","id","class"
 }}
else if (strcmp(tag,"li")==0){
	len=(sizeof(li)/sizeof(li[0]));
	atr=verificar_atributo(li,att,"li");
           if(atr==1){
	     texto(att,valor);// acepta los valores de los atributos generales , que son texto "type","id","class"
}}
else if (strcmp(tag,"link")==0){
	len=(sizeof(link)/sizeof(link[0]));
	atr=verificar_atributo(link,att,"link");
        if(atr==1){
            if(strcmp(att,"href")==0)
                  url(att,valor);
             else if(strcmp(att,"rel")==0) {
                   len_v=(sizeof(rel)/sizeof(rel[0]));
                  verificar_valor(rel,valor,"rel");}
             else  texto(att,valor);  // acepta los valores de los atributos generales , que son texto "type","id","class" 
}}
else if (strcmp(tag,"meta")==0){
	len=(sizeof(meta)/sizeof(meta[0]));
	atr=verificar_atributo(meta,att,"meta");
       if(atr==1){
	     texto(att,valor);// acepta los valores de los atributos generales , que son texto "type","id","class"
}}
else if (strcmp(tag,"object")==0){
	len=(sizeof(object)/sizeof(object[0]));
	atr=verificar_atributo(object,att,"object");
        if(atr==1){
             if(strcmp(att,"hight")==0 | strcmp(att,"width")==0  )
                   pixels(att,valor);
           else if(strcmp(att,"align")==0 ){
                  len_v=(sizeof(align)/sizeof(align[0]));
                  verificar_valor(align,valor,"align");}
             else texto(att,valor);// acepta los valores de los atributos generales , que son texto "type","id","class"
}}
else if (strcmp(tag,"ol")==0){
	len=(sizeof(ol)/sizeof(ol[0]));
	atr=verificar_atributo(ol,att,"ol");
        if(atr==1){
           if(strcmp(att,"start")==0 )
                  numero(att,valor);
             else texto(att,valor);  // acepta los valores de los atributos generales , que son texto "type","id","class"
}}
else if (strcmp(tag,"option")==0){
	len=(sizeof(option)/sizeof(option[0]));
	atr=verificar_atributo(option,att,"option");
        if(atr==1){
	     texto(att,valor);// acepta los valores de los atributos generales , que son texto "type","id","class"
}}
else if (strcmp(tag,"p")==0){
	len=(sizeof(p)/sizeof(p[0]));
	atr=verificar_atributo(p,att,"p");
        if(atr==1){
           if(strcmp(att,"align")==0 ){
                  len_v=(sizeof(align)/sizeof(align[0]));
                  verificar_valor(align,valor,"align");}
             else texto(att,valor);  // acepta los valores de los atributos generales , que son texto "type","id","class"
}}
else if (strcmp(tag,"span")==0){
	len=(sizeof(span)/sizeof(span[0]));
	atr=verificar_atributo(span,att,"span");
        if(atr==1){
	     texto(att,valor);   // acepta los valores de los atributos generales , que son texto "type","id","class"
}}
else if (strcmp(tag,"strong")==0){
	len=(sizeof(strong)/sizeof(strong[0]));
	atr=verificar_atributo(strong,att,"strong");
        if(atr==1){
	     texto(att,valor);   // acepta los valores de los atributos generales , que son texto "type","id","class"
}}
else if (strcmp(tag,"style")==0){
	len=(sizeof(style)/sizeof(style[0]));
	atr=verificar_atributo(style,att,"style");
        if(atr==1){
	     texto(att,valor);
}}
else if (strcmp(tag,"select")==0){
	len=(sizeof(select_)/sizeof(select_[0]));
	atr=verificar_atributo(select_,att,"select");
        if(atr==1){
           if(strcmp(att,"size")==0 )
                  pixels(att,valor);                  
             else texto(att,valor);   // acepta los valores de los atributos generales , que son texto "type","id","class"
}}
else if (strcmp(tag,"table")==0){
	len=(sizeof(table)/sizeof(table[0]));
	atr=verificar_atributo(table,att,"table");
        if(atr==1){
             if(strcmp(att,"bgcolor")==0){
                  len_v=(sizeof(color)/sizeof(color[0]));
                  verificar_valor(color,valor,"bgcolor"); }
             else if(strcmp(att,"border")==0){
                  len_v=(sizeof(border)/sizeof(border[0]));
                  verificar_valor(border,valor,"border"); }
             else if(strcmp(att,"align")==0){
                  len_v=(sizeof(align)/sizeof(align[0]));
                  verificar_valor(align,valor,"align"); }
             else if(strcmp(att,"width")==0)
                   pixels(att,valor);
             else texto(att,valor);// acepta los valores de los atributos generales , que son texto "type","id","class"
}}
else if (strcmp(tag,"td")==0){
	len=(sizeof(td)/sizeof(td[0]));
	atr=verificar_atributo(td,att,"td");
        if(atr==1){
             if(strcmp(att,"colspan")==0 | strcmp(att,"rowspan")==0){
                  numero(att,valor); }
             else if(strcmp(att,"bgcolor")==0){
                  len_v=(sizeof(color)/sizeof(color[0]));
                  verificar_valor(color,valor,"bgcolor"); }
             else if(strcmp(att,"align")==0){
                  len_v=(sizeof(align)/sizeof(align[0]));
                  verificar_valor(align,valor,"align"); }
             else if(strcmp(att,"width")==0 | strcmp(att,"height")==0)
                   pixels(att,valor);
             else texto(att,valor);// acepta los valores de los atributos generales , que son texto "type","id","class"
}}
else if (strcmp(tag,"th")==0){
	len=(sizeof(th)/sizeof(th[0]));
	atr=verificar_atributo(th,att,"th");
        if(atr==1){
             if(strcmp(att,"colspan")==0 | strcmp(att,"rowspan")==0){
                  numero(att,valor); }
             else if(strcmp(att,"bgcolor")==0){
                  len_v=(sizeof(color)/sizeof(color[0]));
                  verificar_valor(color,valor,"bgcolor"); }
             else if(strcmp(att,"align")==0){
                  len_v=(sizeof(align)/sizeof(align[0]));
                  verificar_valor(align,valor,"align"); }
             else if(strcmp(att,"width")==0 | strcmp(att,"height")==0)
                   pixels(att,valor);
             else texto(att,valor);// acepta los valores de los atributos generales , que son texto "type","id","class"
}}
else if (strcmp(tag,"title")==0){
	len=(sizeof(title)/sizeof(title[0]));
	atr=verificar_atributo(title,att,"title");
        if(atr==1){
	     texto(att,valor);
}}
else if (strcmp(tag,"tr")==0){
	len=(sizeof(tr)/sizeof(tr[0]));
	atr=verificar_atributo(tr,att,"tr");
         if(atr==1){
           if(strcmp(att,"bgcolor")==0 ){
                  len_v=(sizeof(color)/sizeof(color[0]));
                  verificar_valor(color,valor,"bgcolor"); }
             else texto(att,valor);  // acepta los valores de los atributos generales , que son texto "type","id","class"
}}
else if (strcmp(tag,"textarea")==0){
	len=(sizeof(textarea)/sizeof(textarea[0]));
	atr=verificar_atributo(textarea,att,"textarea");
        if(atr==1){
           if(strcmp(att,"cols")==0 | strcmp(att,"rows")==0)
                  numero(att,valor);
             else texto(att,valor);// acepta los valores de los atributos generales , que son texto "type","id","class"
}}
else if (strcmp(tag,"ul")==0){
	len=(sizeof(ul)/sizeof(ul[0]));
	atr=verificar_atributo(ul,att,"ul");
        if(atr==1){
	     texto(att,valor);// acepta los valores de los atributos generales , que son texto "type","id","class"
}}
else if (strcmp(tag,"script")==0){
	len=(sizeof(script)/sizeof(script[0]));
	atr=verificar_atributo(script,att,"script");
        if(atr==1){
	     url(att,valor);
}}
else{ 
	printf("Etiqueta invalida\n");
	}


}
Example #16
0
// A function for printing the results of the mesh to a file and/or the screen
void Vol2mesh :: print_results(double t, bool disable_screen, bool enable_file){
       
	// Convert CGAL object to a vtkUnstructuredGrid 
	// (http://cgal-discuss.949826.n4.nabble.com/mesh-to-vtk-output-td3586974.html)
	vtkUnstructuredGrid *uGrid;
	uGrid = CGAL::output_c3t3_to_vtk_unstructured_grid(c3t3_); 
	uGrid->Squeeze();
              
    // Compute mesh quality information 
    vtkMeshQuality *q = vtkMeshQuality::New();
    q->SetInput(uGrid);
    
    // Variables for storing quality statistics
    matrix q_mat;
    vector<string> q_name;
    
    // Gather statistics for the mesh
    get_all_quality_stats(q, q_mat, q_name);
    
    // Define variables for building the vector or strings for display    
    int w = 85; // must be greater than 85
    char c[w];    
    vector<string> s;
    string tmp;
    
    // File information header
    tmp.assign("FILE INFORMATION ");
    tmp.append(w - tmp.size() - 1, '-');
    tmp.append("\n");
    s.push_back(tmp);
    
    // File input and output names
    sprintf(c, " %12s: %s\n", "input-file", input_file_.c_str());         
		s.push_back(c);
    sprintf(c, " %12s: %s\n\n", "output-file", output_file_.c_str());     
		s.push_back(c);
   
    // Input paramaters header
    tmp.assign("DEFAULT MESH CRITERIA ");
    tmp.append(w - tmp.size() - 1, '-');
    tmp.append("\n");
    s.push_back(tmp);
 
    // User suplied options
    sprintf(c, " %23s: %6.3f\n", "facet-angle", default_criteria_.facet_angle);            
		s.push_back(c);
    sprintf(c, " %23s: %6.3f\n", "facet-size", default_criteria_.facet_size);              
		s.push_back(c);
    sprintf(c, " %23s: %6.3f\n", "facet-distance", default_criteria_.facet_distance);      
		s.push_back(c);
    sprintf(c, " %23s: %6.3f\n", "cell-radius-edge-ratio", 
		default_criteria_.cell_radius_edge_ratio);      
        s.push_back(c);
    sprintf(c, " %23s: %6.3f\n\n", "cell-size", default_criteria_.cell_size); 
		s.push_back(c);

	// Mesh results header
    tmp.assign("MESH RESULTS ");
    tmp.append(w - tmp.size() - 1, '-');
    tmp.append("\n");
    s.push_back(tmp);
     
    // Mesh results
    vector<int> pix = pixels();
    vector<double> vox = voxels();

    sprintf(c, " %23s: %6.3f\n", "execution time (sec.)", t);            
		s.push_back(c);
	sprintf(c, " %23s: %d, %d, %d\n", "num. of pixels (x,y,z)",
		pix[0], pix[1], pix[2]);
		s.push_back(c);	
	sprintf(c, " %23s: %6.3f, %6.3f, %6.3f\n", "pixel dim. (x,y,z)",
		vox[0], vox[1], vox[2]);
		s.push_back(c);	
	sprintf(c, " %23s: %6.3f, %6.3f, %6.3f\n", "image dim. (x,y,z)",
		pix[0]*vox[0], pix[1]*vox[1], pix[2]*vox[2]);
		s.push_back(c);				
    sprintf(c, " %23s: %d\n", "num. of elements", (int)c3t3_.number_of_cells()); 
		s.push_back(c);
	sprintf(c, " %23s: %d\n\n", "num. of faces", (int)c3t3_.number_of_facets()); 
		s.push_back(c);
		
	// Mesh quality header
	tmp.assign("TETRAHEDRAL QUALITY ");
    tmp.append(w - tmp.size() - 1, '-');
    tmp.append("\n");
    s.push_back(tmp);			
		
	// Print the mesh quality table labels
	sprintf(c,"%24s%10s%10s%10s%10s%10s\n", 
		"Name", "Lower", "Upper", "Average", "Std. dev.", "COV (%)");
		s.push_back(c);
		
	// Print each of the mesh quality results			
	for(int i = 0; i < q_name.size(); ++i){
		sprintf(c,"%24s%10.3f%10.3f%10.3f%10.3f%10.3f\n", q_name[i].c_str(), 
			q_mat[i][0], q_mat[i][1], q_mat[i][2], q_mat[i][3],
			q_mat[i][3] / q_mat[i][2] * 100);
		s.push_back(c);		
	}

	// Add sub-domain mesh criteria
	if(!subdomain_criteria_.empty()){
		for(int i = 0; i < subdomain_criteria_.size(); i++){
			// Input paramaters header
			sprintf(c, "SUBDOMAIN %d: MESH CRITERIA ", subdomain_criteria_[i].id);
			tmp.assign(c);
			tmp.append(w - tmp.size() - 1, '-');
			tmp.append("\n");
			s.push_back(tmp);

			// User suplied options
			sprintf(c, " %23s: %6.3f\n", "facet-angle", subdomain_criteria_[i].facet_angle);            
				s.push_back(c);
			sprintf(c, " %23s: %6.3f\n", "facet-size", subdomain_criteria_[i].facet_size);              
				s.push_back(c);
			sprintf(c, " %23s: %6.3f\n", "facet-distance", subdomain_criteria_[i].facet_distance);      
				s.push_back(c);
			sprintf(c, " %23s: %6.3f\n", "cell-radius-edge-ratio", 
				subdomain_criteria_[i].cell_radius_edge_ratio);      
				s.push_back(c);
			sprintf(c, " %23s: %6.3f\n\n", "cell-size", subdomain_criteria_[i].cell_size); 
				s.push_back(c);	
		}
	}

	// Output the message to the screen
    if (!disable_screen){
        printf("\n\n");
        for (int i = 0; i < s.size(); ++i){
            printf("%s", s[i].c_str());
        }
        printf("\n\n");
    }

	// Output the message to a file
    if (enable_file){
        string hdr_file;
        hdr_file = output_file_ + (string)".info";
        FILE* fid = fopen(hdr_file.c_str(), "w");
        for (int i = 0; i < s.size(); ++i){
            fprintf(fid, "%s", s[i].c_str());
        }
    }
}
void OpenGLViewer::snapshot(const char* name, int number, bool transparent)
{
   char path[256];
   int pixel = transparent ? 4 : 3;
   int savewidth = width;
   int saveheight = height;
   static int counter = 0;
   if (number == -1)
   {
      number = counter;
      counter++;
   }

   //Redraw blended for output as transparent PNG
   if (transparent)
      blend_mode = BLEND_PNG;

   //Re-render at specified output size (in a framebuffer object if available)
   if (outwidth > 0 && (outwidth != width || outheight != height))
   {
      if (!outheight)
      {
         float ratio = height / (float)width;
         outheight = outwidth * ratio;
      }
#ifdef FBO_SUPPORTED
      //Switch to a framebuffer object
      if (fbo_frame > 0)
      {
         fbo_enabled = true;
         renderBuffer = GL_COLOR_ATTACHMENT0_EXT;
         //glBindTexture(GL_TEXTURE_2D, fbo_texture);
         glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fbo_frame);
      }
      //else
      //   fbo(outwidth, outheight);

      setsize(outwidth, outheight);
#else
      setsize(outwidth, outheight);
#endif
   }
   display();

   // Read the pixels
   GLubyte *image = new GLubyte[width * height * pixel];
#ifdef HAVE_LIBPNG
   pixels(image, transparent);
#else
   pixels(image, false, true);
#endif
   //Write PNG or JPEG
   sprintf(path, "%s%s.%05d", output_path, name, number);
   writeImage(image, width, height, path, transparent);

   delete[] image;

   blend_mode = BLEND_NORMAL;
   if (outwidth > 0 && outwidth != savewidth)
   {
#ifdef FBO_SUPPORTED
      show();  //Disables fbo mode
      resize(savewidth, saveheight); //Resized callback
#else
      setsize(savewidth, saveheight);
#endif
   }
}
Example #18
0
	bool Image2D::LoadBMP(char* filename)
	{
		std::ifstream input;
		input.open(filename, std::ifstream::binary);
		assert(!input.fail() || !"Could not find file");
		char buffer[2];
		input.read(buffer, 2);
		assert(buffer[0] == 'B' && buffer[1] == 'M' || !"Not a bitmap file");
		input.ignore(8);
		int dataOffset = readInt(input);
		
		//Read the header
		int headerSize = readInt(input);
		switch(headerSize) 
		{
			case 40:
				//V3
				this->width = readInt(input);
				this->height = readInt(input);
				input.ignore(2);
				assert(readShort(input) == 24 || !"Image is not 24 bits per pixel");
				assert(readShort(input) == 0 || !"Image is compressed");
				break;
			case 12:
				//OS/2 V1
				this->width = readShort(input);
				this->height = readShort(input);
				input.ignore(2);
				assert(readShort(input) == 24 || !"Image is not 24 bits per pixel");
				break;
			case 64:
				//OS/2 V2
				assert(!"Can't load OS/2 V2 bitmaps");
				break;
			case 108:
				//Windows V4
				assert(!"Can't load Windows V4 bitmaps");
				break;
			case 124:
				//Windows V5
				assert(!"Can't load Windows V5 bitmaps");
				break;
			default:
				assert(!"Unknown bitmap format");
		}
		
		//Read the data
		int bytesPerRow = ((this->width * 3 + 3) / 4) * 4 - (this->width * 3 % 4);
		int size = bytesPerRow * this->height;
		cgl::auto_array<char> pixels(new char[size]);
		input.seekg(dataOffset, std::ios_base::beg);
		input.read(pixels.Get(), size);
		
		//Get the data into the right format
		cgl::auto_array<char> pixels2(new char[this->width * this->height * 3]);
		for(int y = 0; y < this->height; y++) 
		{
			for(int x = 0; x < this->width; x++) 
			{
				for(int c = 0; c < 3; c++) 
				{
					pixels2[3 * (this->width * y + x) + c] = pixels[bytesPerRow * y + 3 * x + (2 - c)];
				}
			}
		}	
		input.close();
		this->dataBMP = pixels2.Release();
		glGenTextures(1, &this->ID); //Make room for our texture
		glBindTexture(GL_TEXTURE_2D, this->ID); //Tell OpenGL which texture to edit
		glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
		glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
		glTexImage2D(GL_TEXTURE_2D,                //Always GL_TEXTURE_2D
					 0,                            //0 for now
					 GL_RGB,                       //Format OpenGL uses for image
					 this->width, this->height,  //Width and height
					 0,                            //The border of the image
					 GL_RGB, //GL_RGB, because pixels are stored in RGB format
					 GL_UNSIGNED_BYTE, //GL_UNSIGNED_BYTE, because pixels are stored
									   //as unsigned numbers
					 this->dataBMP);               //The actual pixel data*/

		return true;
	}
Example #19
0
void MeshRenderer::draw2D(
                cl::BufferGL PBO,
                uint width,
                uint height,
                Eigen::Transform<float, 3, Eigen::Affine> pixelToViewportTransform,
                float PBOspacing,
                Vector2f translation
        ) {
    boost::lock_guard<boost::mutex> lock(mMutex);

    OpenCLDevice::pointer device = getMainDevice();
    cl::CommandQueue queue = device->getCommandQueue();
    std::vector<cl::Memory> v;
    v.push_back(PBO);
    queue.enqueueAcquireGLObjects(&v);

    // Map would probably be better here, but doesn't work on NVIDIA, segfault surprise!
    //float* pixels = (float*)queue.enqueueMapBuffer(PBO, CL_TRUE, CL_MAP_WRITE, 0, width*height*sizeof(float)*4);
    boost::shared_array<float> pixels(new float[width*height*sizeof(float)*4]);
    queue.enqueueReadBuffer(PBO, CL_TRUE, 0, width*height*4*sizeof(float), pixels.get());

    boost::unordered_map<uint, Mesh::pointer>::iterator it;
    for(it = mMeshToRender.begin(); it != mMeshToRender.end(); it++) {
    	Mesh::pointer mesh = it->second;
    	if(mesh->getDimensions() != 2) // Mesh must be 2D
    		continue;

		Color color = mDefaultColor;
        ProcessObjectPort port = getInputPort(it->first);
        if(mInputColors.count(port) > 0) {
            color = mInputColors[port];
        }

    	MeshAccess::pointer access = mesh->getMeshAccess(ACCESS_READ);
        std::vector<VectorXui> lines = access->getLines();
        std::vector<MeshVertex> vertices = access->getVertices();

        // Draw each line
        for(int i = 0; i < lines.size(); ++i) {
        	Vector2ui line = lines[i];
        	Vector2f a = vertices[line.x()].getPosition();
        	Vector2f b = vertices[line.y()].getPosition();
        	Vector2f direction = b - a;
        	float lengthInPixels = ceil(direction.norm() / PBOspacing);

        	// Draw the line
        	for(int j = 0; j <= lengthInPixels; ++j) {
        		Vector2f positionInMM = a + direction*((float)j/lengthInPixels);
        		Vector2f positionInPixels = positionInMM / PBOspacing;

        		int x = round(positionInPixels.x());
        		int y = round(positionInPixels.y());
        		y = height - 1 - y;
        		if(x < 0 || y < 0 || x >= width || y >= height)
        			continue;

        		pixels[4*(x + y*width)] = color.getRedValue();
        		pixels[4*(x + y*width) + 1] = color.getGreenValue();
        		pixels[4*(x + y*width) + 2] = color.getBlueValue();
        	}
        }
    }

    //queue.enqueueUnmapMemObject(PBO, pixels);
    queue.enqueueWriteBuffer(PBO, CL_TRUE, 0, width*height*4*sizeof(float), pixels.get());
    queue.enqueueReleaseGLObjects(&v);
}
Example #20
0
Image Texture::copyToImage() const
{
    // Easy case: empty texture
    if (!m_texture)
        return Image();

    ensureGlContext();

    // Make sure that the current texture binding will be preserved
    priv::TextureSaver save;

    // Create an array of pixels
    std::vector<Uint8> pixels(m_size.x * m_size.y * 4);

#ifdef SFML_OPENGL_ES

    // OpenGL ES doesn't have the glGetTexImage function, the only way to read
    // from a texture is to bind it to a FBO and use glReadPixels
    GLuint frameBuffer = 0;
    glCheck(GLEXT_glGenFramebuffers(1, &frameBuffer));
    if (frameBuffer)
    {
        GLint previousFrameBuffer;
        glCheck(glGetIntegerv(GLEXT_GL_FRAMEBUFFER_BINDING, &previousFrameBuffer));

        glCheck(GLEXT_glBindFramebuffer(GLEXT_GL_FRAMEBUFFER, frameBuffer));
        glCheck(GLEXT_glFramebufferTexture2D(GLEXT_GL_FRAMEBUFFER, GLEXT_GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, m_texture, 0));
        glCheck(glReadPixels(0, 0, m_size.x, m_size.y, GL_RGBA, GL_UNSIGNED_BYTE, &pixels[0]));
        glCheck(GLEXT_glDeleteFramebuffers(1, &frameBuffer));

        glCheck(GLEXT_glBindFramebuffer(GLEXT_GL_FRAMEBUFFER, previousFrameBuffer));
    }

#else

    if ((m_size == m_actualSize) && !m_pixelsFlipped)
    {
        // Texture is not padded nor flipped, we can use a direct copy
        glCheck(glBindTexture(GL_TEXTURE_2D, m_texture));
        glCheck(glGetTexImage(GL_TEXTURE_2D, 0, GL_RGBA, GL_UNSIGNED_BYTE, &pixels[0]));
    }
    else
    {
        // Texture is either padded or flipped, we have to use a slower algorithm

        // All the pixels will first be copied to a temporary array
        std::vector<Uint8> allPixels(m_actualSize.x * m_actualSize.y * 4);
        glCheck(glBindTexture(GL_TEXTURE_2D, m_texture));
        glCheck(glGetTexImage(GL_TEXTURE_2D, 0, GL_RGBA, GL_UNSIGNED_BYTE, &allPixels[0]));

        // Then we copy the useful pixels from the temporary array to the final one
        const Uint8* src = &allPixels[0];
        Uint8* dst = &pixels[0];
        int srcPitch = m_actualSize.x * 4;
        int dstPitch = m_size.x * 4;

        // Handle the case where source pixels are flipped vertically
        if (m_pixelsFlipped)
        {
            src += srcPitch * (m_size.y - 1);
            srcPitch = -srcPitch;
        }

        for (unsigned int i = 0; i < m_size.y; ++i)
        {
            std::memcpy(dst, src, dstPitch);
            src += srcPitch;
            dst += dstPitch;
        }
    }

#endif // SFML_OPENGL_ES

    // Create the image
    Image image;
    image.create(m_size.x, m_size.y, &pixels[0]);

    return image;
}
Example #21
0
void run_framedump_loop(
	eglplus::Surface& surface,
	std::unique_ptr<Example>& example,
	ExampleClock& clock,
	const ExampleOptions& opts
)
{
	std::vector<char> txtbuf(1024);
	std::cin.getline(txtbuf.data(), txtbuf.size());
	if(std::strcmp(opts.framedump_prefix, txtbuf.data()) != 0) return;

	const std::size_t mouse_path_pts = 7;
	std::vector<Vec2f> mouse_path_pos(mouse_path_pts);
	std::vector<Vec2f> mouse_path_dir(mouse_path_pts);

	for(std::size_t p=0; p!= mouse_path_pts; ++p)
	{
		mouse_path_pos[p] = Vec2f(
			std::rand() % opts.width,
			std::rand() % opts.height
		);
		mouse_path_dir[p] = Vec2f(
			(std::rand()%2?1.0:-1.0)*10.0f*
			(0.2+float(std::rand())/float(RAND_MAX)*0.8),
			(std::rand()%2?1.0:-1.0)*10.0f*
			(0.2+float(std::rand())/float(RAND_MAX)*0.8)
		);
	}

	typedef CubicBezierLoop<Vec2f, double> Loop;

	double t = 0.0;
	double period = 1.0 / 25.0;
	GLuint frame_no = 0;
	std::vector<char> pixels(opts.width * opts.height * 4);

	GLuint border = 32;

	glEnable(GL_MULTISAMPLE);

	while(true)
	{
		Vec2f mouse_pos = Loop(mouse_path_pos).Position(t*0.2);

		for(std::size_t p=0; p!= mouse_path_pts; ++p)
		{
			Vec2f dir = mouse_path_dir[p];
			Vec2f pos = mouse_path_pos[p];

			if((pos.x() < border) && (dir.x() < 0.0))
			{
				dir = Vec2f(-dir.x(), dir.y());
			}
			if((pos.y() < border) && (dir.y() < 0.0))
			{
				dir = Vec2f( dir.x(),-dir.y());
			}
			if((pos.x() > opts.width-border) && (dir.x() > 0.0))
			{
				dir = Vec2f(-dir.x(), dir.y());
			}
			if((pos.y() >opts.height-border) && (dir.y() > 0.0))
			{
				dir = Vec2f( dir.x(),-dir.y());
			}

			mouse_path_dir[p] = dir;
			mouse_path_pos[p] = pos + dir;
		}

		float mouse_x = mouse_pos.x();
		float mouse_y = mouse_pos.y();

		if(mouse_x < 0.0f) mouse_x = 0.0f;
		if(mouse_y < 0.0f) mouse_y = 0.0f;
		if(mouse_x > opts.width) mouse_x = opts.width;
		if(mouse_y > opts.height) mouse_y = opts.height;

		example->MouseMove(
			GLuint(mouse_x),
			GLuint(mouse_y),
			opts.width,
			opts.height
		);

		t += period;
		clock.Update(t);

		if(!example->Continue(clock)) break;

		unsigned part_no = 0;
		double comp = 0.0;
		do
		{
			comp = example->RenderPart(part_no++, clock);
		}
		while(comp < 1.0);

		glFinish();
		glReadPixels(
			0, 0,
			opts.width,
			opts.height,
			GL_RGBA,
			GL_UNSIGNED_BYTE,
			pixels.data()
		);
		glFinish();
		surface.SwapBuffers();
		std::stringstream filename;
		filename <<
			opts.framedump_prefix <<
			std::setfill('0') << std::setw(6) <<
			frame_no << ".rgba";
		{
			std::ofstream file(filename.str());
			file.write(pixels.data(), pixels.size());
			file.flush();
		}
		std::cout << filename.str() << std::endl;
		++frame_no;

		txtbuf.resize(filename.str().size()+1);
		std::cin.getline(txtbuf.data(), txtbuf.size());

		if(std::strncmp(
			filename.str().c_str(),
			txtbuf.data(),
			txtbuf.size()
		) != 0) break;
	}
}
void Detector::setPixel(int i, float value) {
    pixels(i) = value;
}
Example #23
0
    bool DecodePNG ( Image& aImage, size_t aBufferSize, const void* aBuffer )
    {
        if ( png_sig_cmp ( static_cast<uint8_t*> ( const_cast<void*> ( aBuffer ) ), 0, 8 ) != 0 )
        {
            return false;
        }
        try
        {
            png_structp png_ptr =
                png_create_read_struct ( PNG_LIBPNG_VER_STRING,
                                         nullptr, nullptr, nullptr );
            if ( png_ptr == nullptr )
            {
                throw std::runtime_error ( "png_create_read_struct failed." );
            }
            png_infop info_ptr = png_create_info_struct ( png_ptr );
            if ( info_ptr == nullptr )
            {
                throw std::runtime_error ( "png_create_info_struct failed." );
            }
            if ( setjmp ( png_jmpbuf ( png_ptr ) ) )
            {
                throw std::runtime_error ( "Error during init_io." );
            }
            png_read_memory_struct read_memory_struct = {static_cast<const uint8_t*> ( aBuffer ), static_cast<const uint8_t*> ( aBuffer ) + 8,
                                                         static_cast<png_size_t> ( aBufferSize * sizeof ( uint8_t ) )
                                                        };
            png_set_read_fn ( png_ptr, &read_memory_struct, png_read_memory_data );
            png_set_sig_bytes ( png_ptr, 8 );

            png_read_info ( png_ptr, info_ptr );

            uint32_t width = png_get_image_width ( png_ptr, info_ptr );
            uint32_t height = png_get_image_height ( png_ptr, info_ptr );
            png_byte color_type = png_get_color_type ( png_ptr, info_ptr );
            png_byte bit_depth = png_get_bit_depth ( png_ptr, info_ptr );

            Image::ImageFormat format;
            Image::ImageType type;
            if ( ( color_type == PNG_COLOR_TYPE_RGB ) || ( color_type == PNG_COLOR_TYPE_RGBA ) )
            {
                format = ( color_type == PNG_COLOR_TYPE_RGB ) ? Image::ImageFormat::RGB : Image::ImageFormat::RGBA;
                type   = ( bit_depth == 8 ) ? Image::ImageType::UNSIGNED_BYTE : Image::ImageType::UNSIGNED_SHORT;
            }
            else
            {
                throw std::runtime_error ( "PNG image color type not supported...yet" );
            }

            /*int number_of_passes =*/ png_set_interlace_handling ( png_ptr );
            png_read_update_info ( png_ptr, info_ptr );

            /* read file */
            if ( setjmp ( png_jmpbuf ( png_ptr ) ) )
            {
                throw std::runtime_error ( "Error during read_image." );
            }
            // --------------------------------------
            png_size_t rowbytes = png_get_rowbytes ( png_ptr, info_ptr );
            std::vector<uint8_t*> row_pointers ( sizeof ( png_bytep ) * height );
            std::vector<uint8_t> pixels ( width * height * GetPixelSize ( format, type ) );
            for ( png_uint_32 y = 0; y < height; ++y )
            {
                row_pointers[y] = pixels.data() + ( rowbytes * y );
            }
            // --------------------------------------
            png_read_image ( png_ptr, row_pointers.data() );
            png_destroy_read_struct ( &png_ptr, &info_ptr, ( png_infopp ) nullptr );
            aImage.Initialize ( width, height, format, type, pixels.data() );
        }
        catch ( std::runtime_error& e )
        {
            std::cout << e.what() << std::endl;
            return false;
        }
        return true;
    }
int main(int argc, char** argv)
{
    ros::init(argc, argv, "gaussian_tracker");
    ros::NodeHandle nh("~");

    /* ------------------------------ */
    /* - Parameters                 - */
    /* ------------------------------ */
    // tracker's main parameter container
    dbot::GaussianTrackerBuilder::Parameters params;

    // camera data
    dbot::CameraData::Resolution resolution;
    std::string camera_info_topic;
    std::string depth_image_topic;
    int downsampling_factor;

    // object data
    std::string object_package;
    std::string object_directory;
    std::vector<std::string> object_meshes;

    // parameter shorthand prefix
    std::string pre = "gaussian_filter/";

    /* ------------------------------ */
    /* - Read out parameters        - */
    /* ------------------------------ */
    // get object parameters
    /// \todo nh.getParam does not check whether the parameter exists in the
    /// config file. this is dangerous, we should use ri::read instead

    nh.getParam("object/meshes", object_meshes);
    nh.getParam("object/package", object_package);
    nh.getParam("object/directory", object_directory);

    params.ori.package_path(ros::package::getPath(object_package));
    params.ori.directory(object_directory);
    params.ori.meshes(object_meshes);

    // get filter parameters
    nh.getParam(pre + "unscented_transform/alpha", params.ut_alpha);
    nh.getParam(pre + "moving_average_update_rate",
                params.moving_average_update_rate);
    nh.getParam(pre + "center_object_frame", params.center_object_frame);

    nh.getParam(pre + "observation/tail_weight",
                params.observation.tail_weight);
    nh.getParam(pre + "observation/bg_depth", params.observation.bg_depth);
    nh.getParam(pre + "observation/fg_noise_std",
                params.observation.fg_noise_std);
    nh.getParam(pre + "observation/bg_noise_std",
                params.observation.bg_noise_std);
    nh.getParam(pre + "observation/uniform_tail_max",
                params.observation.uniform_tail_max);
    nh.getParam(pre + "observation/uniform_tail_min",
                params.observation.uniform_tail_min);

    // state transition parameters
    nh.getParam(pre + "object_transition/linear_sigma_x",
                params.object_transition.linear_sigma_x);
    nh.getParam(pre + "object_transition/linear_sigma_y",
                params.object_transition.linear_sigma_y);
    nh.getParam(pre + "object_transition/linear_sigma_z",
                params.object_transition.linear_sigma_z);

    nh.getParam(pre + "object_transition/angular_sigma_x",
                params.object_transition.angular_sigma_x);
    nh.getParam(pre + "object_transition/angular_sigma_y",
                params.object_transition.angular_sigma_y);
    nh.getParam(pre + "object_transition/angular_sigma_z",
                params.object_transition.angular_sigma_z);

    nh.getParam(pre + "object_transition/velocity_factor",
                params.object_transition.velocity_factor);
    params.object_transition.part_count = object_meshes.size();

    // camera parameters
    nh.getParam("camera_info_topic", camera_info_topic);
    nh.getParam("depth_image_topic", depth_image_topic);
    nh.getParam("downsampling_factor", downsampling_factor);
    nh.getParam("resolution/width", resolution.width);
    nh.getParam("resolution/height", resolution.height);

    /* ------------------------------ */
    /* - Setup camera data          - */
    /* ------------------------------ */
    // setup camera data
    auto camera_data_provider = std::shared_ptr<dbot::CameraDataProvider>(
        new dbot::RosCameraDataProvider(nh,
                                        camera_info_topic,
                                        depth_image_topic,
                                        resolution,
                                        downsampling_factor,
                                        2.0));
    auto camera_data = std::make_shared<dbot::CameraData>(camera_data_provider);

    // finally, set number of pixels
    params.observation.sensors = camera_data->pixels();

    /* ------------------------------ */
    /* - Initialize interactively   - */
    /* ------------------------------ */
    opi::InteractiveMarkerInitializer object_initializer(
        camera_data->frame_id(),
        params.ori.package(),
        params.ori.directory(),
        params.ori.meshes(),
        {},
        true);
    if (!object_initializer.wait_for_object_poses())
    {
        ROS_INFO("Setting object poses was interrupted.");
        return 0;
    }

    auto initial_ros_poses = object_initializer.poses();
    std::vector<Tracker::State> initial_poses;
    initial_poses.push_back(Tracker::State(params.ori.count_meshes()));
    int i = 0;
    for (auto& ros_pose : initial_ros_poses)
    {
        initial_poses[0].component(i++) = ri::to_pose_velocity_vector(ros_pose);
    }

    /* ------------------------------ */
    /* - Create the tracker         - */
    /* ------------------------------ */
    auto tracker_builder =
        dbot::GaussianTrackerBuilder(params, camera_data);

    auto tracker = tracker_builder.build();
    tracker->initialize(initial_poses);

    /* ------------------------------ */
    /* - Tracker publisher          - */
    /* ------------------------------ */
    int object_color[3];
    nh.getParam(pre + "object_color/R", object_color[0]);
    nh.getParam(pre + "object_color/G", object_color[1]);
    nh.getParam(pre + "object_color/B", object_color[2]);
    auto tracker_publisher = dbot::ObjectStatePublisher(
        params.ori, object_color[0], object_color[1], object_color[2]);

    /* ------------------------------ */
    /* - Create and run tracker     - */
    /* - node                       - */
    /* ------------------------------ */
    dbot::ObjectTrackerRos<dbot::GaussianTracker>
        ros_object_tracker(tracker, camera_data, params.ori.count_meshes());

    ros::Subscriber subscriber =
        nh.subscribe(depth_image_topic,
                     1,
                     &dbot::ObjectTrackerRos<
                         dbot::GaussianTracker>::update_obsrv,
                     &ros_object_tracker);

    while (ros::ok())
    {
        if (ros_object_tracker.run_once())
        {
            tracker_publisher.publish(
                ros_object_tracker.current_state_messages());
        }
        ros::spinOnce();
    }

    ros::spin();

    return 0;
}
Example #25
0
        bool TImage::LoadData(size_t type, const size_t *offsets, IStream *pStream)
        {
            AD_FUNCTION_PERFORMANCE_TEST
            size_t packet_size = 1;
            if (info.depth > 8)
                packet_size++;
            std::vector<unsigned char> pixels((info.width + 256)*packet_size, 0);

            std::vector<unsigned char> compact_pixels;
            if (info.compression == RLECompression)
            {
                size_t length = 0;
                for (size_t y = 0; y < info.height; y++)
                {
                    if(length < offsets[y])
                        length = offsets[y];
                }
                compact_pixels.resize(length, 0);
            }
            for(size_t y = 0; y < info.height; y++)
            {        
                ULONG count = 0;  
                if (info.depth == 1)
                {

                    size_t length = (info.width + 7)/8;
                    if (info.compression != RLECompression)
                        pStream->Read(&pixels[0], (ULONG)length, &count);
                    else
                    {
                        pStream->Read(&compact_pixels[0], (ULONG)offsets[y], &count);
                        if ((size_t)count != offsets[y])
                            break;
                        count = (ULONG)DecodePixels(&compact_pixels[0], offsets[y], (size_t)123456, &pixels[0], length);
                    }
                    if ((size_t)count < length)
                        break;
                }
                else
                {
                    size_t length = packet_size*info.width;
                    if (info.compression != RLECompression)
                        pStream->Read(&pixels[0], (ULONG)length, &count);
                    else
                    {
                        pStream->Read(&compact_pixels[0], (ULONG)offsets[y], &count);
                        if ((size_t)count != offsets[y])
                            break;
                        count = (ULONG)DecodePixels(&compact_pixels[0], offsets[y], info.depth, &pixels[0], length);
                    }
                    if ((size_t)count < length)
                        break;
                }

                TPixel *q = &data[y*info.width];
                const unsigned char *p = &pixels[0];
                for (size_t x = 0; x < info.width; x++)
                {
                    unsigned char pixel;
                    if (packet_size == 1)
                        pixel = *p++;
                    else
                        pixel = ScaleShortToChar(ReadShort(p));
                    switch (type)
                    {
                    case 0:
                        q->red = pixel;
                        if (info.channels == 1)
                        {
                            q->green = q->red;
                            q->blue = q->red;
                            q->alpha = CHANNEL_MAX;
                        }
                        break;
                    case 1:
                        q->green = pixel;
                        break;
                    case 2:
                        q->blue = pixel;
                        if (info.channels == 3)
                            q->alpha = CHANNEL_MAX;
                        break;
                    case 3:
                        q->alpha = pixel;
                        break;
                    case 4:
                        q->alpha = pixel;
                        break;
                    default:
                        break;
                    }
                    q++;
                }
            }
            return true;
        }
Example #26
0
static bool
convert_file (const std::string &in_filename, const std::string &out_filename)
{
    if (noclobber && Filesystem::exists(out_filename)) {
        std::cerr << "iconvert ERROR: Output file already exists \""
                  << out_filename << "\"\n";
        return false;
    }

    if (verbose)
        std::cout << "Converting " << in_filename << " to " << out_filename << "\n";

    std::string tempname = out_filename;
    if (tempname == in_filename) {
        tempname = out_filename + ".tmp"
                    + Filesystem::extension (out_filename);
    }

    // Find an ImageIO plugin that can open the input file, and open it.
    ImageInput *in = ImageInput::open (in_filename.c_str());
    if (! in) {
        std::string err = geterror();
        std::cerr << "iconvert ERROR: " 
                  << (err.length() ? err : Strutil::format("Could not open \"%s\"", in_filename))
                  << "\n";
        delete in;
        return false;
    }
    ImageSpec inspec = in->spec();
    std::string metadatatime = inspec.get_string_attribute ("DateTime");

    // Find an ImageIO plugin that can open the output file, and open it
    ImageOutput *out = ImageOutput::create (tempname.c_str());
    if (! out) {
        std::cerr 
            << "iconvert ERROR: Could not find an ImageIO plugin to write \"" 
            << out_filename << "\" :" << geterror() << "\n";
        delete in;
        return false;
    }

    // In order to deal with formats that support subimages, but not
    // subimage appending, we gather them all first.
    std::vector<ImageSpec> subimagespecs;
    if (out->supports("multiimage") && !out->supports("appendsubimage")) {
        ImageCache *imagecache = ImageCache::create ();
        int nsubimages = 0;
        ustring ufilename (in_filename);
        imagecache->get_image_info (ufilename, 0, 0, ustring("subimages"),
                                    TypeDesc::TypeInt, &nsubimages);
        if (nsubimages > 1) {
            subimagespecs.resize (nsubimages);
            for (int i = 0;  i < nsubimages;  ++i) {
                ImageSpec inspec = *imagecache->imagespec (ufilename, i, 0,
                                                           true /*native*/);
                subimagespecs[i] = inspec;
                adjust_spec (in, out, inspec, subimagespecs[i]);
            }
        }
        ImageCache::destroy (imagecache);
    }

    bool ok = true;
    bool mip_to_subimage_warning = false;
    for (int subimage = 0;
           ok && in->seek_subimage(subimage,0,inspec);
           ++subimage) {

        if (subimage > 0 &&  !out->supports ("multiimage")) {
            std::cerr << "iconvert WARNING: " << out->format_name()
                      << " does not support multiple subimages.\n";
            std::cerr << "\tOnly the first subimage has been copied.\n";
            break;  // we're done
        }

        int miplevel = 0;
        do {
            // Copy the spec, with possible change in format
            ImageSpec outspec = inspec;
            bool nocopy = adjust_spec (in, out, inspec, outspec);
        
            if (miplevel > 0) {
                // Moving to next MIP level
                ImageOutput::OpenMode mode;
                if (out->supports ("mipmap"))
                    mode = ImageOutput::AppendMIPLevel;
                else if (out->supports ("multiimage") &&
                         out->supports ("appendsubimage")) {
                    mode = ImageOutput::AppendSubimage; // use if we must
                    if (! mip_to_subimage_warning
                        && strcmp(out->format_name(),"tiff")) {
                        std::cerr << "iconvert WARNING: " << out->format_name()
                                  << " does not support MIPmaps.\n";
                        std::cerr << "\tStoring the MIPmap levels in subimages.\n";
                    }
                    mip_to_subimage_warning = true;
                } else {
                    std::cerr << "iconvert WARNING: " << out->format_name()
                              << " does not support MIPmaps.\n";
                    std::cerr << "\tOnly the first level has been copied.\n";
                    break;  // on to the next subimage
                }
                ok = out->open (tempname.c_str(), outspec, mode);
            } else if (subimage > 0) {
                // Moving to next subimage
                ok = out->open (tempname.c_str(), outspec,
                                ImageOutput::AppendSubimage);
            } else {
                // First time opening
                if (subimagespecs.size())
                    ok = out->open (tempname.c_str(), int(subimagespecs.size()),
                                    &subimagespecs[0]);
                else
                    ok = out->open (tempname.c_str(), outspec, ImageOutput::Create);
            }
            if (! ok) {
                std::string err = out->geterror();
                std::cerr << "iconvert ERROR: " 
                          << (err.length() ? err : Strutil::format("Could not open \"%s\"", out_filename))
                          << "\n";
                ok = false;
                break;
            }

            if (! nocopy) {
                ok = out->copy_image (in);
                if (! ok)
                    std::cerr << "iconvert ERROR copying \"" << in_filename 
                              << "\" to \"" << out_filename << "\" :\n\t" 
                              << out->geterror() << "\n";
            } else {
                // Need to do it by hand for some reason.  Future expansion in which
                // only a subset of channels are copied, or some such.
                std::vector<char> pixels ((size_t)outspec.image_bytes(true));
                ok = in->read_image (outspec.format, &pixels[0]);
                if (! ok) {
                    std::cerr << "iconvert ERROR reading \"" << in_filename 
                              << "\" : " << in->geterror() << "\n";
                } else {
                    ok = out->write_image (outspec.format, &pixels[0]);
                    if (! ok)
                        std::cerr << "iconvert ERROR writing \"" << out_filename 
                                  << "\" : " << out->geterror() << "\n";
                }
            }
        
            ++miplevel;
        } while (ok && in->seek_subimage(subimage,miplevel,inspec));
    }

    out->close ();
    delete out;
    in->close ();
    delete in;

    // Figure out a time for the input file -- either one supplied by
    // the metadata, or the actual time stamp of the input file.
    std::time_t in_time;
    if (metadatatime.empty() ||
           ! DateTime_to_time_t (metadatatime.c_str(), in_time))
        in_time = Filesystem::last_write_time (in_filename);

    if (out_filename != tempname) {
        if (ok) {
            Filesystem::remove (out_filename);
            Filesystem::rename (tempname, out_filename);
        }
        else
            Filesystem::remove (tempname);
    }

    // If user requested, try to adjust the file's modification time to
    // the creation time indicated by the file's DateTime metadata.
    if (ok && adjust_time)
        Filesystem::last_write_time (out_filename, in_time);

    return ok;
}
Example #27
0
void run_framedump_loop(
	const x11::Display& display,
	const x11::Window& win,
	const glx::Context& ctx,
	std::unique_ptr<Example>& example,
	ExampleClock& clock,
	GLuint width,
	GLuint height,
	const char* framedump_prefix
)
{
	std::vector<char> txtbuf(1024);
	std::cin.getline(txtbuf.data(), txtbuf.size());
	if(std::strcmp(framedump_prefix, txtbuf.data()) != 0) return;

	const std::size_t mouse_path_pts = 7;
	std::vector<Vec2f> mouse_path_pos(mouse_path_pts);
	std::vector<Vec2f> mouse_path_dir(mouse_path_pts);

	for(std::size_t p=0; p!= mouse_path_pts; ++p)
	{
		mouse_path_pos[p] = Vec2f(
			std::rand() % width,
			std::rand() % height
		);
		mouse_path_dir[p] = Vec2f(
			(std::rand()%2?1.0:-1.0)*10.0f*
			(0.2+float(std::rand())/float(RAND_MAX)*0.8),
			(std::rand()%2?1.0:-1.0)*10.0f*
			(0.2+float(std::rand())/float(RAND_MAX)*0.8)
		);
	}

	typedef CubicBezierLoop<Vec2f, double> Loop;

	double t = 0.0;
	double period = 1.0 / 25.0;
	GLuint frame_no = 0;
	std::vector<char> pixels(width * height * 4);

	GLuint border = 32;

	XEvent event;

	while(true)
	{
		while(display.NextEvent(event));

		Vec2f mouse_pos = Loop(mouse_path_pos).Position(t*0.2);

		for(std::size_t p=0; p!= mouse_path_pts; ++p)
		{
			Vec2f dir = mouse_path_dir[p];
			Vec2f pos = mouse_path_pos[p];

			if((pos.x() < border) && (dir.x() < 0.0))
				dir = Vec2f(-dir.x(), dir.y());
			if((pos.y() < border) && (dir.y() < 0.0))
				dir = Vec2f( dir.x(),-dir.y());
			if((pos.x() > width-border) && (dir.x() > 0.0))
				dir = Vec2f(-dir.x(), dir.y());
			if((pos.y() >height-border) && (dir.y() > 0.0))
				dir = Vec2f( dir.x(),-dir.y());

			mouse_path_dir[p] = dir;
			mouse_path_pos[p] = pos + dir;
		}

		float mouse_x = mouse_pos.x();
		float mouse_y = mouse_pos.y();

		if(mouse_x < 0.0f) mouse_x = 0.0f;
		if(mouse_y < 0.0f) mouse_y = 0.0f;
		if(mouse_x > width) mouse_x = width;
		if(mouse_y > height) mouse_y = height;

		example->MouseMove(
			GLuint(mouse_x),
			GLuint(mouse_y),
			width,
			height
		);

		t += period;
		clock.Update(t);
		if(!example->Continue(clock)) break;
		example->Render(clock);
		glFinish();
		glReadPixels(
			0, 0,
			width,
			height,
			GL_RGBA,
			GL_UNSIGNED_BYTE,
			pixels.data()
		);
		glFinish();
		ctx.SwapBuffers(win);
		std::stringstream filename;
		filename <<
			framedump_prefix <<
			std::setfill('0') << std::setw(6) <<
			frame_no << ".rgba";
		{
			std::ofstream file(filename.str());
			file.write(pixels.data(), pixels.size());
			file.flush();
		}
		std::cout << filename.str() << std::endl;
		++frame_no;

		txtbuf.resize(filename.str().size()+1);
		std::cin.getline(txtbuf.data(), txtbuf.size());

		if(std::strncmp(
			filename.str().c_str(),
			txtbuf.data(),
			txtbuf.size()
		) != 0) break;
	}
	while(display.NextEvent(event));
}
Example #28
0
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(EGLImageTest, reporter, context0, glCtx0) {
    // Try to create a second GL context and then check if the contexts have necessary
    // extensions to run this test.

    if (kGLES_GrGLStandard != glCtx0->gl()->fStandard) {
        return;
    }
    GrGLGpu* gpu0 = static_cast<GrGLGpu*>(context0->getGpu());
    if (!gpu0->glCaps().glslCaps()->externalTextureSupport()) {
        return;
    }

    SkAutoTDelete<SkGLContext> glCtx1 = glCtx0->createNew();
    if (!glCtx1) {
        return;
    }
    GrContext* context1 = GrContext::Create(kOpenGL_GrBackend, (GrBackendContext)glCtx1->gl());
    const GrGLTextureInfo* backendTexture1 = nullptr;
    GrEGLImage image = GR_EGL_NO_IMAGE;
    GrGLTextureInfo externalTexture;
    externalTexture.fID = 0;

    if (!context1) {
        cleanup(glCtx0, externalTexture.fID, glCtx1, context1, backendTexture1, image);
        return;
    }

    if (!glCtx1->gl()->hasExtension("EGL_KHR_image") ||
        !glCtx1->gl()->hasExtension("EGL_KHR_gl_texture_2D_image")) {
        cleanup(glCtx0, externalTexture.fID, glCtx1, context1, backendTexture1, image);
        return;
    }

    ///////////////////////////////// CONTEXT 1 ///////////////////////////////////

    // Use GL Context 1 to create a texture unknown to GrContext.
    context1->flush();
    GrGpu* gpu1 = context1->getGpu();
    static const int kSize = 100;
    backendTexture1 = reinterpret_cast<const GrGLTextureInfo*>(
        gpu1->createTestingOnlyBackendTexture(nullptr, kSize, kSize, kRGBA_8888_GrPixelConfig));
    if (!backendTexture1 || !backendTexture1->fID) {
        ERRORF(reporter, "Error creating texture for EGL Image");
        cleanup(glCtx0, externalTexture.fID, glCtx1, context1, backendTexture1, image);
        return;
    }
    if (GR_GL_TEXTURE_2D != backendTexture1->fTarget) {
        ERRORF(reporter, "Expected backend texture to be 2D");
        cleanup(glCtx0, externalTexture.fID, glCtx1, context1, backendTexture1, image);
        return;
    }

    // Wrap the texture in an EGLImage
    image = glCtx1->texture2DToEGLImage(backendTexture1->fID);
    if (GR_EGL_NO_IMAGE == image) {
        ERRORF(reporter, "Error creating EGL Image from texture");
        cleanup(glCtx0, externalTexture.fID, glCtx1, context1, backendTexture1, image);
        return;
    }

    // Populate the texture using GL context 1. Important to use TexSubImage as TexImage orphans
    // the EGL image. Also, this must be done after creating the EGLImage as the texture
    // contents may not be preserved when the image is created.
    SkAutoTMalloc<uint32_t> pixels(kSize * kSize);
    for (int i = 0; i < kSize*kSize; ++i) {
        pixels.get()[i] = 0xDDAABBCC;
    }
    GR_GL_CALL(glCtx1->gl(), ActiveTexture(GR_GL_TEXTURE0));
    GR_GL_CALL(glCtx1->gl(), BindTexture(backendTexture1->fTarget, backendTexture1->fID));
    GR_GL_CALL(glCtx1->gl(), TexSubImage2D(backendTexture1->fTarget, 0, 0, 0, kSize, kSize,
                                           GR_GL_RGBA, GR_GL_UNSIGNED_BYTE, pixels.get()));
    GR_GL_CALL(glCtx1->gl(), Finish());
    // We've been making direct GL calls in GL context 1, let GrContext 1 know its internal
    // state is invalid.
    context1->resetContext();

    ///////////////////////////////// CONTEXT 0 ///////////////////////////////////

    // Make a new texture ID in GL Context 0 from the EGL Image
    glCtx0->makeCurrent();
    externalTexture.fTarget = GR_GL_TEXTURE_EXTERNAL;
    externalTexture.fID = glCtx0->eglImageToExternalTexture(image);

    // Wrap this texture ID in a GrTexture
    GrBackendTextureDesc externalDesc;
    externalDesc.fConfig = kRGBA_8888_GrPixelConfig;
    externalDesc.fWidth = kSize;
    externalDesc.fHeight = kSize;
    externalDesc.fTextureHandle = reinterpret_cast<GrBackendObject>(&externalTexture);
    SkAutoTUnref<GrTexture> externalTextureObj(
        context0->textureProvider()->wrapBackendTexture(externalDesc));
    if (!externalTextureObj) {
        ERRORF(reporter, "Error wrapping external texture in GrTexture.");
        cleanup(glCtx0, externalTexture.fID, glCtx1, context1, backendTexture1, image);
        return;
    }

    // Should not be able to wrap as a RT
    externalDesc.fFlags = kRenderTarget_GrBackendTextureFlag;
    SkAutoTUnref<GrTexture> externalTextureRTObj(
        context0->textureProvider()->wrapBackendTexture(externalDesc));
    if (externalTextureRTObj) {
        ERRORF(reporter, "Should not be able to wrap an EXTERNAL texture as a RT.");
    }
    externalDesc.fFlags = kNone_GrBackendTextureFlag;

    // Should not be able to wrap with a sample count
    externalDesc.fSampleCnt = 4;
    SkAutoTUnref<GrTexture> externalTextureMSAAObj(
        context0->textureProvider()->wrapBackendTexture(externalDesc));
    if (externalTextureMSAAObj) {
        ERRORF(reporter, "Should not be able to wrap an EXTERNAL texture with MSAA.");
    }
    externalDesc.fSampleCnt = 0;

    test_read_pixels(reporter, context0, externalTextureObj, pixels.get());

    test_write_pixels(reporter, context0, externalTextureObj);

    test_copy_surface(reporter, context0, externalTextureObj, pixels.get());

    cleanup(glCtx0, externalTexture.fID, glCtx1, context1, backendTexture1, image);
}
Example #29
0
File: heat.cpp Project: ttk592/fdm
int main(int argc, char** argv)
{

    if(argc<1) {
        printf("usage: %s <>\n", argv[0]);
        exit(EXIT_FAILURE);
    }

    fdm::enableexcept();    // for testing, raise fpu exceptions


    boost::timer	time_all;

    // reading pde parameters
    double	T=20.0;
    double	diffusion=0.5;
    fdm::SVector<double,2>	convection;
    fdm::SVector<double,2>	x;
    convection[0]=1.0;
    convection[1]=0.5;

    x[0]=0.0;
    x[1]=0.0;


    // reading calculation parameters
    int	n_time=(int) (T*25);
    int	n_x=720;
    int	n_y=480;

    // n_x=360;
    // n_y=240;


    // generating grids
    fdm::Grid<1>	grid_time;
    fdm::GridC2<2>	grid_space;

    grid_time.set_uniform(0,0.0,T,n_time+1);
    grid_space.set_uniform(0,-7.2,7.2,n_x);
    grid_space.set_uniform(1,0.-4.8,4.8,n_y);
    grid_space.calculate();


    // defining the pde
    fdm::PDEHeat2D		pde;

    pde.Diffusion[0][0] = diffusion;
    pde.Diffusion[1][1] = diffusion;
    pde.Diffusion[0][1] = 0.0;
    pde.Diffusion[1][0] = 0.0;

    pde.Convection[0] = convection[0];
    pde.Convection[1] = convection[1];

    pde.Const = 0.0;

    // defining the boundary condition
    // make sure we prescribe fixed values at boundaries with incoming
    // convection, and free boundary condition for out flowing boundaries
    fdm::BoundaryConst<2>	boundary;

    boundary.TypeLower[0]=fdm::BoundaryType::Value;
    boundary.ValueLower[0]=0.0;
    boundary.TypeUpper[0]=fdm::BoundaryType::Free;
    boundary.ValueUpper[0]=0.0;

    boundary.TypeLower[1]=fdm::BoundaryType::Value;
    boundary.ValueLower[1]=0.0;
    boundary.TypeUpper[1]=fdm::BoundaryType::Free;
    boundary.ValueUpper[1]=0.0;


    // setting initial conditions
    fdm::GridFunction<2>    u(grid_space);
    u.iterator.begin();
    while(u.iterator.isend()==false) {
        if( u.coord().norm() <3.0 ) {
            u.value()=1.0;
        } else {
            u.value()=0.0;
        }
        u.iterator++;
    }


    // solving the pde
    boost::multi_array<double, 2>	pixels(boost::extents[n_y][n_x]);
    fdm::SVector<int,2>	idx;
    char	filename[30];
    for(size_t k=1; k<grid_time.size(0); k++) {
        fdm::FDMSolveStep(pde,boundary,u,grid_time,x,k,fdm::type::pred_corr,true,false);
        // saving the result in a 2d array
        for(int i=0; i<n_y; i++) {
            for(int j=0; j<n_x; j++) {
                idx[0]=j;
                idx[1]=i;
                pixels[i][j]=u.value(idx);
            }
        }
        // output of the result in a png in form of a heat map
        sprintf(filename,"out/heatmap-%.6lu.png",k);
        png::write_png(pixels,filename);
    }
    double	value=u(x);

    printf("-------------------- result --------------------\n");

    printf("Numerical value:\t %12.10f\n",value);
    printf("elapsed time: %.1f s:\n", time_all.elapsed());

    printf("\n");
    printf("generate an animation by calling\n\n");
    printf("mencoder \"mf://out/*.png\" -mf fps=25 -o out.avi -ovc lavc -lavcopts vcodec=mpeg4\n\n");

    return 0;		// success
}
Example #30
0
    virtual void startup() override {
        glGenBuffers(1, &m_vbo);
        glBindBuffer(GL_ARRAY_BUFFER, m_vbo);

        GLfloat vertexPositions[] = {0.5,  0.5, 0.0, 1.0, 0.5,  -0.5, 0.0, 1.0,
                                     -0.5, 0.5, 0.0, 1.0, -0.5, -0.5, 0.0, 1.0};
        glBufferData(GL_ARRAY_BUFFER, sizeof(vertexPositions), vertexPositions,
                     GL_STATIC_DRAW);
        glEnableVertexAttribArray(0);
        glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, 0, NULL);

        glClearColor(0.0, 0.0, 0.0, 0.0);
        glClearDepthf(1.0);

        const char* vshSrc =
#ifndef OPENGL_ES2
                "#version 120\n"
#endif
                "attribute vec4 position;\n"
                "varying vec2 texPos;\n"
                "\n"
                "void main() {\n"
                "    gl_Position = position;\n"
                "    texPos = position.xy * 0.5 + 0.5;\n"
                "}\n";

        const char* fshSrc =
#ifdef OPENGL_ES2
                "precision mediump float;\n"
#else
                "#version 120\n"
#endif
                "uniform sampler2D sampler0;\n"
                "varying vec2 texPos;\n"
                "void main()\n"
                "{\n"
                "    gl_FragColor = texture2D(sampler0, texPos);\n"
                "}\n";

        m_program = gl::CreateProgram(vshSrc, fshSrc);
        glUseProgram(m_program->Name());

        glGenTextures(1, &m_tex);
        glBindTexture(GL_TEXTURE_2D, m_tex);

        GLubyte colors[5][4] = {
            {102, 127, 204, 255},
            { 140, 32, 48, 223}, 
            { 74, 189, 232, 239}, 
            { 214, 72, 239, 87},
            { 144, 223, 142, 223 },
        };

        int level = 0;

        for (size_t i = 16; i > 0; i /=2 ) {
            const int width = i; 
            const int height = (i + 1) / 2;
            std::vector<GLubyte> pixels( width * height * 4);
            for (size_t j = 0; j < pixels.size(); j++) {
                pixels[j] = colors[level][j % 4];
            }
#ifdef OPENGL_ES2
            glTexImage2D(GL_TEXTURE_2D, level, GL_RGBA, width, height, 0, GL_RGBA,
                GL_UNSIGNED_BYTE, &pixels[0]);
#else
            glTexImage2D(GL_TEXTURE_2D, level, GL_RGBA8, width, height, 0, GL_RGBA,
                GL_UNSIGNED_BYTE, &pixels[0]);
#endif
            level++;
        }

        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_NEAREST);
        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
    }