示例#1
0
	int Surface::setFormat(State & state, SDL_Surface * surface){
		PixelFormat * interfacePixelFormat = state.getInterface<PixelFormat>("LuaSDL_PixelFormat");
	
		SDL_PixelFormat * format = interfacePixelFormat->get(1);
		if (format){
			surface->format->format = format->format;
			surface->format->palette = format->palette;
			surface->format->BitsPerPixel = format->BitsPerPixel;
			surface->format->BytesPerPixel = format->BytesPerPixel;
			surface->format->Rmask = format->Rmask;
			surface->format->Gmask = format->Gmask;
			surface->format->Bmask = format->Bmask;
			surface->format->Amask = format->Amask;

			surface->format->Rloss = format->Rloss;
			surface->format->Gloss = format->Gloss;
			surface->format->Bloss = format->Bloss;
			surface->format->Aloss = format->Aloss;
			surface->format->Rshift = format->Rshift;
			surface->format->Gshift = format->Gshift;
			surface->format->Bshift = format->Bshift;
			surface->format->Ashift = format->Ashift;
		}
		return 0;
	}
bool RemoteViewerCore::updatePixelFormat()
{
  PixelFormat pxFormat;
  m_logWriter.debug(_T("Check pixel format change..."));
  {
    AutoLock al(&m_pixelFormatLock);
    if (!m_isNewPixelFormat)
      return false;
    m_isNewPixelFormat = false;
    pxFormat = m_viewerPixelFormat;
  }

  int bitsPerPixel = m_viewerPixelFormat.bitsPerPixel;
  if (bitsPerPixel != 8 && bitsPerPixel != 16 && bitsPerPixel != 32) {
    throw Exception(_T("Only 8, 16 or 32 bits per pixel supported!"));
  }

  {
    AutoLock al(&m_fbLock);
    // FIXME: here isn't accept true-colour flag.
    // PixelFormats may be equal, if isn't.
    if (pxFormat.isEqualTo(&m_frameBuffer.getPixelFormat())) {
      return false;
    }
    if (m_frameBuffer.getBuffer() != 0)
      setFbProperties(&m_frameBuffer.getDimension(), &pxFormat);
  }

  RfbSetPixelFormatClientMessage pixelFormatMessage(&pxFormat);
  pixelFormatMessage.send(m_output);

  return true;
}
bool WindowsScreenGrabber::getPixelFormatChanged()
{
  m_screen.update();

  PixelFormat currentPF = m_screen.getPixelFormat();
  PixelFormat frameBufferPF = m_workFrameBuffer.getPixelFormat();

  return !frameBufferPF.isEqualTo(&currentPF);
}
	void WindowRendererSupportGLWin32::setPixelFormat(const DeviceContext* deviceContext, const PixelFormat& pixelFormat) const
	{
		HDC hdc = static_cast<const DeviceContextWin32*>(deviceContext)->getHDC();

		ushort colourBits = pixelFormat.getRedBits() + pixelFormat.getGreenBits() + pixelFormat.getBlueBits();
		ushort alphaBits = pixelFormat.getAlphaBits();
		ushort depthBits = 24;
		ushort stencilBits = 8;
		ushort samples = 4;

		PIXELFORMATDESCRIPTOR pfd;
		memset(&pfd, 0, sizeof(pfd));
		pfd.nSize = sizeof(pfd);
		pfd.nVersion = 1;
		pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER;
		pfd.iPixelType = PFD_TYPE_RGBA;
		pfd.cColorBits = (BYTE)colourBits;
		pfd.cAlphaBits = (BYTE)alphaBits;
		pfd.cDepthBits = (BYTE)depthBits;
		pfd.cStencilBits = (BYTE)stencilBits;

		int format = 0;

		if(mHasMultisample)
		{
			vector<int> attribList;
			attribList.push_back(WGL_DRAW_TO_WINDOW_ARB);	attribList.push_back(GL_TRUE);
			attribList.push_back(WGL_SUPPORT_OPENGL_ARB);	attribList.push_back(GL_TRUE);
			attribList.push_back(WGL_DOUBLE_BUFFER_ARB);	attribList.push_back(GL_TRUE);
			attribList.push_back(WGL_SAMPLE_BUFFERS_ARB);	attribList.push_back(GL_TRUE);
			attribList.push_back(WGL_ACCELERATION_ARB);		attribList.push_back(WGL_FULL_ACCELERATION_ARB);
			attribList.push_back(WGL_COLOR_BITS_ARB);		attribList.push_back(colourBits);
			attribList.push_back(WGL_ALPHA_BITS_ARB);		attribList.push_back(alphaBits);
			attribList.push_back(WGL_DEPTH_BITS_ARB);		attribList.push_back(depthBits);
			attribList.push_back(WGL_STENCIL_BITS_ARB);		attribList.push_back(stencilBits);
			attribList.push_back(WGL_SAMPLES_ARB);			attribList.push_back(samples);

			if(mHasHardwareGamma)
			{
				attribList.push_back(WGL_FRAMEBUFFER_SRGB_CAPABLE_EXT);	attribList.push_back(GL_TRUE);
			}

			attribList.push_back(0);

			UINT nformats;
			wglChoosePixelFormatARB(hdc, &(attribList[0]), nullptr, 1, &format, &nformats);
		}
		else
		{
			format = ChoosePixelFormat(hdc, &pfd);
		}

		SetPixelFormat(hdc, format, &pfd);
	}
示例#5
0
文件: Pixel.cpp 项目: tapio/Wendy
bool RGBtoRGBA::supports(const PixelFormat& targetFormat,
                         const PixelFormat& sourceFormat)
{
  if (targetFormat.type() != sourceFormat.type())
    return false;

  if (targetFormat.semantic() != PixelFormat::RGBA ||
      sourceFormat.semantic() != PixelFormat::RGB)
  {
    return false;
  }

  return true;
}
示例#6
0
void Bitmap::ReformatImage(const PixelFormat& pxf)
{
	assert( image );
	if ( format == pxf )
		return;

	// indexed reformat means color quantization
	if ( pxf.IsIndexed() )
	{
		QuantizeImage();
		return;
	}

	// conversion
	Bitmap temp(width,height,pxf);
	temp.BlitImage(*this,BLIT_COPY);
	
	// swap pointers
	delete[] image;
	image = temp.image;
	temp.image = NULL;
	
	pitch = temp.pitch;
	format = pxf;
}
示例#7
0
bool WindowsScreenGrabber::getPixelFormatChanged()
{
  BMI bmi;
  if (!getBMI(&bmi)) {
    return false;
  }

  PixelFormat currentPF;
  PixelFormat frameBufferPF = m_workFrameBuffer.getPixelFormat();
  fillPixelFormat(&currentPF, &bmi);

  if (!frameBufferPF.isEqualTo(&currentPF)) {
    return true;
  }

  return false;
}
示例#8
0
文件: Pixel.cpp 项目: tapio/Wendy
void RGBtoRGBA::convert(void* target,
                        const PixelFormat& targetFormat,
                        const void* source,
                        const PixelFormat& sourceFormat,
                        size_t count)
{
  size_t channelSize = targetFormat.channelSize();
  size_t targetSize = targetFormat.size();
  size_t sourceSize = sourceFormat.size();

  while (count--)
  {
    std::memcpy(target, source, sourceSize);
    std::memset((char*) target + sourceSize, 0, channelSize);
    target = (char*) target + targetSize;
    source = (char*) source + sourceSize;
  }
}
示例#9
0
Bitmap::Bitmap(int xsize, int ysize, const PixelFormat& pxf)
{
	assert( xsize > 0 && ysize > 0 );

	width  = xsize;
	height = ysize;
	pitch  = width * pxf.GetBytes();
	format = pxf;
	image  = new uint8[height*pitch];
}
示例#10
0
void Bitmap::SetImage(int xsize, int ysize, const PixelFormat& pxf, void* i)
{
	assert( xsize > 0 && ysize > 0 );

	delete[] image;	

	width  = xsize;
	height = ysize;
	pitch  = xsize * pxf.GetBytes();
	format = pxf;
	image  = reinterpret_cast<uint8*>(i);
}
示例#11
0
	int Surface::convert(State & state, SDL_Surface * surface){
		Stack * stack = state.stack;
		PixelFormat * interfacePixelFormat = state.getInterface<PixelFormat>("LuaSDL_PixelFormat");
		Surface * interfaceSurface = state.getInterface<Surface>("LuaSDL_Surface");
	
		Uint32 flags;
		if (stack->is<LUA_TNUMBER>(2)){
			flags = stack->to<int>(2);
		}else{
			flags = 0;
		}

		SDL_PixelFormat * pf = interfacePixelFormat->get(1);

		if (pf){
			SDL_Surface * newSurface = SDL_ConvertSurface(surface, pf, flags);
			if (newSurface){
				interfaceSurface->push(newSurface, true);
				return 1;
			}
		}

		return 0;
	}
示例#12
0
GLenum ConvertToGL( const PixelFormat& format, bool sRGB )
{
	switch(format.componentType())
	{
		case PixelComponent_UInt8:
		{
			switch(format.semantic())
			{
				case PixelSemantic_Luminance:
				{
					if(sRGB)
						return GL_SLUMINANCE8;
					else
						return GL_LUMINANCE8;
				}
				
				case PixelSemantic_LuminanceAlpha:
				{
					if(sRGB)
						return GL_SLUMINANCE8_ALPHA8;
					else
						return GL_LUMINANCE8_ALPHA8;
				}
				
				case PixelSemantic_RGB:
				{
					if(sRGB)
						return GL_SRGB8;
					else
						return GL_RGB8;
				}
				
				case PixelSemantic_RGBA:
				{
					if(sRGB)
						return GL_SRGB8_ALPHA8;
					else
						return GL_RGBA8;
				}
				
				default:
					;
			}
			
			break;
		}
		
		case PixelComponent_UInt16:
		{
			if(format.semantic() == PixelSemantic_Depth)
				return GL_DEPTH_COMPONENT16;
			break;
		}
		
		case PixelComponent_UInt24:
		{
			if(format.semantic() == PixelSemantic_Depth)
				return GL_DEPTH_COMPONENT24;
			break;
		}
		
		case PixelComponent_UInt32:
		{
			if(format.semantic() == PixelSemantic_Depth)
				return GL_DEPTH_COMPONENT32;
			break;
		}
		
		case PixelComponent_Float16:
		{
			switch(format.semantic())
			{
				case PixelSemantic_Luminance:
				{
					if(!GLEW_ARB_texture_float)
					{
						FatalError("GLEW_ARB_texture_float not supported");
						break;
					}
					
					return GL_LUMINANCE16F_ARB;
				}
				
				case PixelSemantic_LuminanceAlpha:
				{
					if(!GLEW_ARB_texture_float)
					{
						FatalError("GLEW_ARB_texture_float not supported");
						break;
					}
					
					return GL_LUMINANCE_ALPHA16F_ARB;
				}
				
				case PixelSemantic_RGB:
					return GL_RGB16F;
				
				case PixelSemantic_RGBA:
					return GL_RGBA16F;
				
				default:
					;
			}
			
			break;
		}
		
		case PixelComponent_Float32:
		{
			switch(format.semantic())
			{
				case PixelSemantic_Luminance:
				{
					if(!GLEW_ARB_texture_float)
					{
						FatalError("GLEW_ARB_texture_float not supported");
						break;
					}
					
					return GL_LUMINANCE32F_ARB;
				}
				
				case PixelSemantic_LuminanceAlpha:
				{
					if(!GLEW_ARB_texture_float)
					{
						FatalError("GLEW_ARB_texture_float not supported");
						break;
					}
					
					return GL_LUMINANCE_ALPHA32F_ARB;
				}
				
				case PixelSemantic_RGB:
					return GL_RGB32F;
				
				case PixelSemantic_RGBA:
					return GL_RGBA32F;
				
				default:
					;
			}
			
			break;
		}
		
		default:
			;
	}
	
	FatalError("Pixel format '%s' not supported!", format.asString().c_str());
	return 0;
}
示例#13
0
bool StreamerPNG::saveBitmap(const Bitmap & bitmap, std::ostream & output) {
	volatile int colorType = 0; // volatile is needed because of the setjmp later on.
	volatile int transforms = 0;

	const PixelFormat & pixelFormat = bitmap.getPixelFormat();
	if(pixelFormat == PixelFormat::RGBA) {
		colorType = PNG_COLOR_TYPE_RGB_ALPHA;
		transforms = PNG_TRANSFORM_IDENTITY;
	} else if(pixelFormat == PixelFormat::BGRA) {
		colorType = PNG_COLOR_TYPE_RGB_ALPHA;
		transforms = PNG_TRANSFORM_BGR;
	} else if(pixelFormat == PixelFormat::RGB) {
		colorType = PNG_COLOR_TYPE_RGB;
		transforms = PNG_TRANSFORM_IDENTITY;
	} else if(pixelFormat == PixelFormat::BGR) {
		colorType = PNG_COLOR_TYPE_RGB;
		transforms = PNG_TRANSFORM_BGR;
	} else if(pixelFormat == PixelFormat::MONO) {
		colorType = PNG_COLOR_TYPE_GRAY;
		transforms = PNG_TRANSFORM_IDENTITY;
	} else if(pixelFormat == PixelFormat::MONO_FLOAT) {
		Reference<Bitmap> tmp = BitmapUtils::convertBitmap(bitmap, PixelFormat::MONO);
		return saveBitmap(*tmp.get(), output);
	} else {
		WARN("Unable to save PNG file. Unsupported color type.");
		return false;
	}

	// Set up the necessary structures for libpng.
	png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, nullptr, nullptr, nullptr);
	if (!png_ptr) {
		return false;
	}

	png_infop info_ptr = png_create_info_struct(png_ptr);
	if (!info_ptr) {
		png_destroy_write_struct(&png_ptr, nullptr);
		return false;
	}
	if (setjmp(png_jmpbuf(png_ptr))) {
		png_destroy_write_struct(&png_ptr, &info_ptr);
		return false;
	}

	struct PNGFunctions {
			static void writeData(png_structp write_ptr, png_bytep data, png_size_t length) {
				std::ostream * out = reinterpret_cast<std::ostream *>(png_get_io_ptr(write_ptr));
				out->write(reinterpret_cast<const char*>(data), static_cast<std::streamsize>(length));
			}
			static void flushData(png_structp flush_ptr) {
				std::ostream * out = reinterpret_cast<std::ostream *>(png_get_io_ptr(flush_ptr));
				out->flush();
			}
	};

	png_set_write_fn(png_ptr, reinterpret_cast<png_voidp>(&output), PNGFunctions::writeData, PNGFunctions::flushData);

	const uint32_t width = bitmap.getWidth();
	const uint32_t height = bitmap.getHeight();

	png_set_IHDR(png_ptr, info_ptr, width, height, 8, colorType, PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT);

	// Write the image.
	std::vector<png_bytep> row_pointers;
	row_pointers.reserve(height);
	const uint8_t bytes = pixelFormat.getBytesPerPixel();
	for (uint_fast32_t row = 0; row < height; ++row) {
		// Take over rows in the same order.
		row_pointers.push_back(reinterpret_cast<png_bytep>(const_cast<uint8_t *>(bitmap.data()) + row * width * bytes));
	}
	png_set_rows(png_ptr, info_ptr, row_pointers.data());

	png_write_png(png_ptr, info_ptr, transforms, nullptr);

	// Clean up.
	png_destroy_write_struct(&png_ptr, &info_ptr);

	return true;
}
示例#14
0
	int Surface::getFormat(State & state, SDL_Surface * surface){
		PixelFormat * interfacePixelFormat = state.getInterface<PixelFormat>("LuaSDL_PixelFormat");
		interfacePixelFormat->push(surface->format);
		return 1;
	}
示例#15
0
文件: GLHelper.cpp 项目: tapio/Wendy
GLenum convertToGL(const PixelFormat& format, bool sRGB)
{
  switch (format.type())
  {
    case PixelFormat::UINT8:
    {
      switch (format.semantic())
      {
        case PixelFormat::L:
        {
          if (sRGB)
            return GL_SLUMINANCE8;
          else
            return GL_LUMINANCE8;
        }

        case PixelFormat::LA:
        {
          if (sRGB)
            return GL_SLUMINANCE8_ALPHA8;
          else
            return GL_LUMINANCE8_ALPHA8;
        }

        case PixelFormat::RGB:
        {
          if (sRGB)
            return GL_SRGB8;
          else
            return GL_RGB8;
        }

        case PixelFormat::RGBA:
        {
          if (sRGB)
            return GL_SRGB8_ALPHA8;
          else
            return GL_RGBA8;
        }

        default:
          break;
      }

      break;
    }

    case PixelFormat::UINT16:
    {
      if (format.semantic() == PixelFormat::DEPTH)
        return GL_DEPTH_COMPONENT16;

      break;
    }

    case PixelFormat::UINT24:
    {
      if (format.semantic() == PixelFormat::DEPTH)
        return GL_DEPTH_COMPONENT24;

      break;
    }

    case PixelFormat::UINT32:
    {
      if (format.semantic() == PixelFormat::DEPTH)
        return GL_DEPTH_COMPONENT32;

      break;
    }

    case PixelFormat::FLOAT16:
    {
      switch (format.semantic())
      {
        case PixelFormat::L:
        {
          if (!GLEW_ARB_texture_float)
          {
            logError("Half-precision floating point textures not supported; "
                     "cannot convert pixel format");
            return 0;
          }

          return GL_LUMINANCE16F_ARB;
        }

        case PixelFormat::LA:
        {
          if (!GLEW_ARB_texture_float)
          {
            logError("Half-precision floating point textures not supported; "
                     "cannot convert pixel format");
            return 0;
          }

          return GL_LUMINANCE_ALPHA16F_ARB;
        }

        case PixelFormat::RGB:
          return GL_RGB16F;
        case PixelFormat::RGBA:
          return GL_RGBA16F;
        default:
          break;
      }

      break;
    }

    case PixelFormat::FLOAT32:
    {
      switch (format.semantic())
      {
        case PixelFormat::L:
        {
          if (!GLEW_ARB_texture_float)
          {
            logError("Floating point textures not supported; cannot convert pixel format");
            return 0;
          }

          return GL_LUMINANCE32F_ARB;
        }

        case PixelFormat::LA:
        {
          if (!GLEW_ARB_texture_float)
          {
            logError("Floating point textures not supported; cannot convert pixel format");
            return 0;
          }

          return GL_LUMINANCE_ALPHA32F_ARB;
        }

        case PixelFormat::RGB:
          return GL_RGB32F;
        case PixelFormat::RGBA:
          return GL_RGBA32F;
        default:
          break;
      }

      break;
    }

    default:
      break;
  }

  logError("No OpenGL equivalent for pixel format %s",
           format.asString().c_str());
  return 0;
}
示例#16
0
PixelFormat Renderbuffer::changePixelFormat(PixelFormat const& pf)
{
    PixelFormat ret = getPixelFormat();
    if (pf.isRenderable())
    {
        initStorage(getColorRenderbuffer(), pf.pfcolorcode(), pf.getSamplesCount(), size().width(), size().height());
        ret.copyColor(pf);
    }
    if (pf.matchesDepthStencil())
    {
        depthstencil = true;
        initStorage(getDepthStencilRenderbuffer(), pf.pfdepthstencilcode(), pf.getSamplesCount(), size().width(), size().height());
    }
    else
    {
        depthstencil = false;
        initStorage(getDepthRenderbuffer(), pf.pfdepthcode(), pf.getSamplesCount(), size().width(), size().height());
        initStorage(getStencilRenderbuffer(), pf.pfstencilcode(), pf.getSamplesCount(), size().width(), size().height());
    }

    ret.setSamplesCount(pf.getSamplesCount());
    ret.copyDepthStencil(pf);
    return ret;
}
示例#17
0
bool Image::read(Stream stream)
{
    BytesArray mem;

    mem = stream.process();
    if (!stream.ok())
        return false;

    FIMEMORY* fimem = FreeImage_OpenMemory(mem.raw(), mem.size());
    if (!fimem)
        return false;

    FREE_IMAGE_FORMAT fiformat = FreeImage_GetFileTypeFromMemory(fimem);
    FIBITMAP* fibit = FreeImage_LoadFromMemory(fiformat, fimem);
    FreeImage_CloseMemory(fimem);
    if (!fibit)
        return false;

    //FIBITMAP* fitmp = fibit;
    //fibit = FreeImage_ConvertTo32Bits(fitmp);
    //FreeImage_Unload(fitmp);

    uint64 red_mask = FreeImage_GetRedMask(fibit);
    uint64 green_mask = FreeImage_GetGreenMask(fibit);
    uint64 blue_mask = FreeImage_GetBlueMask(fibit);
    uint64 alpha_mask = 0;
    

    PixelFormat tmpformat;
    tmpformat.setDepthBits(0);
    tmpformat.setStencilBits(0);
    tmpformat.setColorBitsFromMasks(&red_mask, &green_mask, &blue_mask, &alpha_mask);

    if (FreeImage_GetBPP(fibit) != tmpformat.bitsPerPixel())
    {
        alpha_mask = ~(uint32)(red_mask | green_mask | blue_mask);
        tmpformat.setColorBitsFromMasks(&red_mask, &green_mask, &blue_mask, &alpha_mask);
    }

    format = TransferFormat(tmpformat.getRedBits(), tmpformat.getGreenBits(), tmpformat.getBlueBits(), tmpformat.getAlphaBits());

    MIRROR_ASSERT(FreeImage_GetBPP(fibit) == format.bitsPerPixel(), "Conversion from FreeImage bitmap to Mirror bitmap failed!\nBits per pixel conversion from "
        "FreeImage color masks probably returned bad bits-per-channel values respectivly for red, green and blue channel, as"
        " stored inside PixelFormat structure - see PixelFormat::setColorBitsFromMasks for calculations!");

    my_size = { FreeImage_GetWidth(fibit), FreeImage_GetHeight(fibit) };
    uint32 req_bits = my_size.width()*my_size.height()*format.bitsPerPixel();
    pixels.resize(PixelFormat::bitsToFullBytes(req_bits));

    const uint32 pitch = FreeImage_GetPitch(fibit);
    const uint32 lineSize = FreeImage_GetLine(fibit);

    uint32 pixidx = 0;
    BYTE* line = FreeImage_GetBits(fibit);
    for (uint32 y = 0; y < my_size.height(); ++y, line += pitch)         
        for (BYTE* pixel = line; pixel < line + lineSize; pixel += format.bytesPerPixel())
        {
            pixels[pixidx++] = pixel[2]; //red
            pixels[pixidx++] = pixel[1]; //green
            pixels[pixidx++] = pixel[0]; //blue
            if (alpha_mask)
                pixels[pixidx++] = pixel[3]; //alpha
        }

    ////transfer pixels from FreeImage to our buffer as raw bits, should be in RGBA order thanks to our new color masks
    //FreeImage_ConvertToRawBits(pixels.raw(), fibit, format.bitsPerPixel()*my_size.width(), format.bitsPerPixel(), 0, 0, 0, TRUE);
    FreeImage_Unload(fibit);
    return true;
}
示例#18
0
Reference<Bitmap> StreamerPNG::loadBitmap(std::istream & input) {
	char header[8];
	input.read(header, 8);
	const int is_png = !png_sig_cmp(reinterpret_cast<png_byte *>(header), 0, 8);
	if(!is_png) {
		WARN("File is not a valid PNG image.");
		return nullptr;
	}

	// Set up the necessary structures for libpng.
	png_structp png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, nullptr, nullptr, nullptr);
	if(!png_ptr) {
		return nullptr;
	}

	png_infop info_ptr = png_create_info_struct(png_ptr);
	if(!info_ptr) {
		png_destroy_read_struct(&png_ptr, static_cast<png_infopp>(nullptr), static_cast<png_infopp>(nullptr));
		return nullptr;
	}

	if(setjmp(png_jmpbuf(png_ptr))) {
		png_destroy_read_struct(&png_ptr, &info_ptr, static_cast<png_infopp>(nullptr));
		return nullptr;
	}

	struct PNGFunctions {
			static void readData(png_structp read_ptr, png_bytep data, png_size_t length) {
				std::istream * in = reinterpret_cast<std::istream *>(png_get_io_ptr(read_ptr));
				if(in == nullptr || !in->good()) {
					png_error(read_ptr, "Error in input stream.");
				}
				in->read(reinterpret_cast<char *>(data), static_cast<std::streamsize>(length));
				if(in->gcount() != static_cast<std::streamsize>(length)) {
					png_error(read_ptr, "Requested amount of data could not be extracted from input stream");
				}
			}
	};

	png_set_read_fn(png_ptr, reinterpret_cast<png_voidp>(&input), PNGFunctions::readData);

	png_set_sig_bytes(png_ptr, 8);

	png_read_info(png_ptr, info_ptr);

	png_uint_32 width;
	png_uint_32 height;
	int bit_depth;
	int color_type;
	png_get_IHDR(	png_ptr, info_ptr,
					&width, &height,
					&bit_depth, &color_type, nullptr, nullptr, nullptr);

	PixelFormat pixelFormat = PixelFormat::RGB;
	switch(color_type) {
		case PNG_COLOR_TYPE_GRAY:
			// Convert bpp less than 8 to 8 bits.
			if(bit_depth < 8) {
				png_set_expand_gray_1_2_4_to_8(png_ptr);
			}
			pixelFormat = PixelFormat::MONO;
			break;
		case PNG_COLOR_TYPE_GRAY_ALPHA:
			// Convert grayscale with alpha to RGBA.
			png_set_expand(png_ptr);
			png_set_gray_to_rgb(png_ptr);
			pixelFormat = PixelFormat::RGBA;
			break;
		case PNG_COLOR_TYPE_PALETTE:
			// Convert color palette to RGB(A).
			png_set_expand(png_ptr);

			// Check if the color palette contains transparent colors.
#if PNG_LIBPNG_VER >= 10300
			if(png_get_valid(png_ptr, info_ptr, PNG_INFO_tRNS)){
				pixelFormat = PixelFormat::RGBA;
			}
#else
			if(info_ptr->valid & PNG_INFO_tRNS) {
				pixelFormat = PixelFormat::RGBA;
			}
#endif
			break;
		case PNG_COLOR_TYPE_RGB_ALPHA:
			pixelFormat = PixelFormat::RGBA;
			break;
		default:
			// Already set to RGB above.
			break;
	}
	// Convert 16 bpp to 8 bits.
	if (bit_depth == 16) {
		png_set_strip_16(png_ptr);
	}

	// Create the bitmap to store the data.
	Reference<Bitmap> bitmap = new Bitmap(width, height, pixelFormat);

	auto row_pointers = new png_bytep[height];
	const uint8_t bytes = pixelFormat.getBytesPerPixel();
	for (uint_fast32_t row = 0; row < height; ++row) {
		// Take over rows in the same order.
		row_pointers[row] = reinterpret_cast<png_bytep>(bitmap->data() + row * width * bytes);
	}

	// This function automatically handles interlacing.
	png_read_image(png_ptr, row_pointers);

	png_read_end(png_ptr, nullptr);
	png_destroy_read_struct(&png_ptr, &info_ptr, static_cast<png_infopp>(nullptr));
	png_free_data(png_ptr, info_ptr, PNG_FREE_ALL, -1);
	delete [] row_pointers;

	return bitmap;
}
示例#19
0
文件: Pixel.cpp 项目: elmindreda/Nori
std::string stringCast(PixelFormat format)
{
  return std::string(stringCast(format.semantic())) + stringCast(format.type());
}
示例#20
0
//
// ISDL12Window::setMode
//
// Sets the window size to the specified size and frees the existing primary
// surface before instantiating a new primary surface. This function performs
// no sanity checks on the desired video mode.
// 
// NOTE: If a hardware surface is obtained or the surface's screen pitch
// will create cache thrashing (tested by pitch & 511 == 0), a SDL software
// surface will be created and used for drawing video frames. This software
// surface is then blitted to the screen at the end of the frame, prior to
// calling SDL_Flip.
//
bool ISDL12Window::setMode(uint16_t video_width, uint16_t video_height, uint8_t video_bpp,
							bool video_fullscreen, bool vsync)
{
	uint32_t flags = 0;

	if (vsync)
		flags |= SDL_HWSURFACE | SDL_DOUBLEBUF;
	else
		flags |= SDL_SWSURFACE;

	if (video_fullscreen)
		flags |= SDL_FULLSCREEN;
	else
		flags |= SDL_RESIZABLE;

	if (video_fullscreen && video_bpp == 8)
		flags |= SDL_HWPALETTE;

	// TODO: check for multicore
	flags |= SDL_ASYNCBLIT;

	//if (video_fullscreen)
	//	flags = ((flags & (~SDL_SWSURFACE)) | SDL_HWSURFACE);

	#ifdef SDL_GL_SWAP_CONTROL
	SDL_GL_SetAttribute(SDL_GL_SWAP_CONTROL, vsync);
	#endif

	// [SL] SDL_SetVideoMode reinitializes DirectInput if DirectX is being used.
	// This interferes with RawWin32Mouse's input handlers so we need to
	// disable them prior to reinitalizing DirectInput...
	I_PauseMouse();

	SDL_Surface* sdl_surface = SDL_SetVideoMode(video_width, video_height, video_bpp, flags);
	if (sdl_surface == NULL)
	{
		I_FatalError("I_SetVideoMode: unable to set video mode %ux%ux%u (%s): %s\n",
				video_width, video_height, video_bpp, video_fullscreen ? "fullscreen" : "windowed",
				SDL_GetError());
		return false;
	}

	assert(sdl_surface == SDL_GetVideoSurface());

	// [SL] ...and re-enable RawWin32Mouse's input handlers after
	// DirectInput is reinitalized.
	I_ResumeMouse();

	PixelFormat format;
	I_BuildPixelFormatFromSDLSurface(sdl_surface, &format);

	// just in case SDL couldn't set the exact video mode we asked for...
	mWidth = sdl_surface->w;
	mHeight = sdl_surface->h;
	mBitsPerPixel = format.getBitsPerPixel();
	mIsFullScreen = (sdl_surface->flags & SDL_FULLSCREEN) == SDL_FULLSCREEN;
	mUseVSync = vsync;

	if (SDL_MUSTLOCK(sdl_surface))
		SDL_LockSurface(sdl_surface);		// lock prior to accessing pixel format

	delete mSurfaceManager;

	bool got_hardware_surface = (sdl_surface->flags & SDL_HWSURFACE) == SDL_HWSURFACE;

	bool create_software_surface = 
					(sdl_surface->pitch & 511) == 0 ||	// pitch is a multiple of 512 (thrashes the cache)
					got_hardware_surface;				// drawing directly to hardware surfaces is slower

	if (create_software_surface)
		mSurfaceManager = new ISDL12SoftwareWindowSurfaceManager(mWidth, mHeight, &format);
	else
		mSurfaceManager = new ISDL12DirectWindowSurfaceManager(mWidth, mHeight, &format);

	assert(mSurfaceManager != NULL);
	assert(getPrimarySurface() != NULL);

	if (SDL_MUSTLOCK(sdl_surface))
		SDL_UnlockSurface(sdl_surface);

	mVideoMode = IVideoMode(mWidth, mHeight, mBitsPerPixel, mIsFullScreen);

	assert(mWidth >= 0 && mWidth <= MAXWIDTH);
	assert(mHeight >= 0 && mHeight <= MAXHEIGHT);
	assert(mBitsPerPixel == 8 || mBitsPerPixel == 32);

	// Tell argb_t the pixel format
	if (format.getBitsPerPixel() == 32)
		argb_t::setChannels(format.getAPos(), format.getRPos(), format.getGPos(), format.getBPos());
	else
		argb_t::setChannels(3, 2, 1, 0);

	// [SL] SDL can create SDL_VIDEORESIZE events in response to SDL_SetVideoMode
	// and we need to filter those out.
	mIgnoreResize = true;

	return true;
}