////////////////////////////////////////////////////////////////////// // readback // ////////////////////////////////////////////////////////////////////// bool CheckBackBuffer::readback( GLuint width, GLuint height ) { bool ret = false; if (m_bUsePBO) { // binds the PBO for readback bindReadback(); // Initiate the readback BLT from BackBuffer->PBO->membuf glReadPixels(0, 0, width, height, getPixelFormat(), GL_UNSIGNED_BYTE, BUFFER_OFFSET(0)); ret = checkStatus(__FILE__, __LINE__, true); if (!ret) printf("CheckBackBuffer::glReadPixels() checkStatus = %d\n", ret); // map - unmap simulates readback without the copy void *ioMem = glMapBufferARB(GL_PIXEL_PACK_BUFFER_ARB, GL_READ_ONLY_ARB); memcpy(m_pImageData, ioMem, width*height*m_Bpp); glUnmapBufferARB(GL_PIXEL_PACK_BUFFER_ARB); // release the PBO unbindReadback(); } else { // reading direct from the backbuffer glReadBuffer(GL_FRONT); glReadPixels(0, 0, width, height, getPixelFormat(), GL_UNSIGNED_BYTE, m_pImageData); } return ret; }
void VideoReader::init() { // analyse InputFile avtranscoder::NoDisplayProgress p; _inputFile->analyse( p ); _streamProperties = &_inputFile->getProperties().getStreamPropertiesWithIndex(_streamIndex); _videoStreamProperties = static_cast<const VideoProperties*>(_streamProperties); _inputFile->activateStream( _streamIndex ); // setup decoder _decoder = new VideoDecoder( _inputFile->getStream( _streamIndex ) ); _decoder->setupDecoder(); // create src frame _srcFrame = new VideoFrame( _inputFile->getStream( _streamIndex ).getVideoCodec().getVideoFrameDesc() ); VideoFrame* srcFrame = static_cast<VideoFrame*>(_srcFrame); // create dst frame if( _width == 0 ) _width = srcFrame->desc().getWidth(); if( _height == 0 ) _height = srcFrame->desc().getHeight(); VideoFrameDesc videoFrameDescToDisplay( _width, _height, getPixelFormat() ); _dstFrame = new VideoFrame( videoFrameDescToDisplay ); // create transform _transform = new VideoTransform(); }
FrameAvailableCode SyncVideoDecoder::getRenderedBmps(vector<BitmapPtr>& pBmps, float timeWanted) { AVG_ASSERT(getState() == DECODING); ScopeTimer timer(RenderToBmpProfilingZone); FrameAvailableCode frameAvailable; if (timeWanted == -1) { readFrame(m_pFrame); frameAvailable = FA_NEW_FRAME; } else { frameAvailable = readFrameForTime(m_pFrame, timeWanted); } if (frameAvailable == FA_USE_LAST_FRAME || isEOF()) { return FA_USE_LAST_FRAME; } else { allocFrameBmps(pBmps); if (pixelFormatIsPlanar(getPixelFormat())) { ScopeTimer timer(CopyImageProfilingZone); for (unsigned i = 0; i < pBmps.size(); ++i) { m_pFrameDecoder->copyPlaneToBmp(pBmps[i], m_pFrame->data[i], m_pFrame->linesize[i]); } } else { m_pFrameDecoder->convertFrameToBmp(m_pFrame, pBmps[0]); } return FA_NEW_FRAME; } }
const Graphics::Surface *QuickTimeDecoder::decodeNextFrame() { if (!_nextVideoTrack) return 0; const Graphics::Surface *frame = _nextVideoTrack->decodeNextFrame(); if (!_setStartTime) { _startTime = g_system->getMillis(); _setStartTime = true; } _nextVideoTrack = findNextVideoTrack(); _needUpdate = false; // Update audio buffers too // (needs to be done after we find the next track) updateAudioBuffer(); // We have to initialize the scaled surface if (frame && (_scaleFactorX != 1 || _scaleFactorY != 1)) { if (!_scaledSurface) { _scaledSurface = new Graphics::Surface(); _scaledSurface->create(_width, _height, getPixelFormat()); } scaleSurface(frame, _scaledSurface, _scaleFactorX, _scaleFactorY); return _scaledSurface; } return frame; }
void ofxEmscriptenVideoGrabber::update(){ if(html5video_grabber_update(id,usePixels,pixels.getPixels())){ texture.texData.width = html5video_grabber_width(id); texture.texData.height = html5video_grabber_height(id); texture.texData.tex_w = texture.texData.width; texture.texData.tex_h = texture.texData.height; if(texture.texData.textureID!=html5video_grabber_texture_id(id)){ texture.texData.textureID = html5video_grabber_texture_id(id); texture.texData.bUseExternalTextureID = true; texture.texData.bFlipTexture = false; switch(getPixelFormat()){ case OF_PIXELS_RGBA: texture.texData.glTypeInternal = GL_RGBA; break; case OF_PIXELS_RGB: texture.texData.glTypeInternal = GL_RGB; break; case OF_PIXELS_MONO: texture.texData.glTypeInternal = GL_LUMINANCE; break; default: ofLogError() << "unknown pixel format, can't allocating texture"; break; } texture.texData.tex_u = 1; texture.texData.tex_t = 1; texture.texData.textureTarget = GL_TEXTURE_2D; texture.texData.bAllocated = true; } } }
void QuickTimeDecoder::init() { Audio::QuickTimeAudioDecoder::init(); _videoTrackIndex = -1; _startTime = 0; // Find video streams for (uint32 i = 0; i < _tracks.size(); i++) if (_tracks[i]->codecType == CODEC_TYPE_VIDEO && _videoTrackIndex < 0) _videoTrackIndex = i; // Start the audio codec if we've got one that we can handle if (_audStream) { startAudio(); _audioStartOffset = Audio::Timestamp(0); } // Initialize video, if present if (_videoTrackIndex >= 0) { for (uint32 i = 0; i < _tracks[_videoTrackIndex]->sampleDescs.size(); i++) ((VideoSampleDesc *)_tracks[_videoTrackIndex]->sampleDescs[i])->initCodec(); if (getScaleFactorX() != 1 || getScaleFactorY() != 1) { // We have to initialize the scaled surface _scaledSurface = new Graphics::Surface(); _scaledSurface->create(getWidth(), getHeight(), getPixelFormat()); } } }
void ofxEmscriptenVideoPlayer::update(){ gotFirstFrame = pixels.isAllocated(); if(html5video_player_update(id,pixels.isAllocated() && usePixels,pixels.getPixels())){ if(texture.texData.width!=html5video_player_width(id) || texture.texData.height!=html5video_player_height(id)){ texture.texData.width = html5video_player_width(id); texture.texData.height = html5video_player_height(id); texture.texData.tex_w = texture.texData.width; texture.texData.tex_h = texture.texData.height; switch(getPixelFormat()){ case OF_PIXELS_RGBA: pixels.allocate(texture.texData.width,texture.texData.height,4); break; case OF_PIXELS_RGB: pixels.allocate(texture.texData.width,texture.texData.height,3); break; case OF_PIXELS_MONO: pixels.allocate(texture.texData.width,texture.texData.height,1); break; default: ofLogError() << "unknown pixel format, can't allocating texture"; break; } } if(texture.texData.textureID!=html5video_player_texture_id(id)){ texture.texData.bFlipTexture = false; switch(getPixelFormat()){ case OF_PIXELS_RGBA: texture.texData.glInternalFormat = GL_RGBA; break; case OF_PIXELS_RGB: texture.texData.glInternalFormat = GL_RGB; break; case OF_PIXELS_MONO: texture.texData.glInternalFormat = GL_LUMINANCE; break; default: ofLogError() << "unknown pixel format, can't allocating texture"; break; } texture.texData.tex_u = 1; texture.texData.tex_t = 1; texture.texData.textureTarget = GL_TEXTURE_2D; texture.texData.bAllocated = true; texture.setUseExternalTextureID(html5video_player_texture_id(id)); } } }
////////////////////////////////////////////////////////////////////// // readback // // Code to handle reading back of the FBO data // ////////////////////////////////////////////////////////////////////// bool CheckFBO::readback( GLuint width, GLuint height ) { bool ret = false; if (m_bUsePBO) { // binds the PBO for readback bindReadback(); // bind FBO buffer (we want to transfer FBO -> PBO) glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, m_pFrameBufferObject->getFbo()); // Now initiate the readback to PBO glReadPixels(0, 0, width, height, getPixelFormat(), GL_UNSIGNED_BYTE, BUFFER_OFFSET(0)); ret = checkStatus(__FILE__, __LINE__, true); if (!ret) printf("CheckFBO::readback() FBO->PBO checkStatus = %d\n", ret); // map - unmap simulates readback without the copy void *ioMem = glMapBufferARB(GL_PIXEL_PACK_BUFFER_ARB, GL_READ_ONLY_ARB); memcpy(m_pImageData, ioMem, width*height*m_Bpp); glUnmapBufferARB(GL_PIXEL_PACK_BUFFER_ARB); // release the FBO glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0); // release the PBO unbindReadback(); } else { // Reading back from FBO using glReadPixels glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, m_pFrameBufferObject->getFbo()); ret = checkStatus(__FILE__, __LINE__, true); if (!ret) printf("CheckFBO::readback::glBindFramebufferEXT() checkStatus = %d\n", ret); glReadBuffer(static_cast<GLenum>(GL_COLOR_ATTACHMENT0_EXT)); ret &= checkStatus(__FILE__, __LINE__, true); if (!ret) printf("CheckFBO::readback::glReadBuffer() checkStatus = %d\n", ret); glReadPixels(0, 0, width, height, getPixelFormat(), GL_UNSIGNED_BYTE, m_pImageData); glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0); } return CHECK_FBO; }
void RttManager::_initializeRtt(const RttType& Rtt, const Ogre::Vector3& PlaneNormal, const Ogre::ColourValue& BackgroundColour, const bool& RenderSky, Ogre::RenderTargetListener* RTListener, const Ogre::String& MaterialScheme, const bool& ShadowsEnabled) { remove(Rtt); mPlanes[Rtt] = new Ogre::MovablePlane(mRttOptions[Rtt].Name + "Plane"); mPlanes[Rtt]->d = 0; mPlanes[Rtt]->normal = PlaneNormal; Ogre::MeshManager::getSingleton(). createPlane(mRttOptions[Rtt].Name + "ClipPlane", Ogre::ResourceGroupManager::DEFAULT_RESOURCE_GROUP_NAME, *mPlanes[Rtt], mHydrax->getMesh()->getSize().Width,mHydrax->getMesh()->getSize().Height, // PG grid.. <-- look into 10,10, true, 1,5,5, Ogre::Vector3::UNIT_Z); mPlanes[Rtt]->setCastShadows(false); mPlanesSceneNode->attachObject(mPlanes[Rtt]); Size TSize = mRttOptions[Rtt].Size_; if (TSize.Width == 0 || TSize.Height == 0) { TSize.Width = mHydrax->getViewport()->getActualWidth(); TSize.Height = mHydrax->getViewport()->getActualHeight(); } mTextures[Rtt] = Ogre::TextureManager::getSingleton() .createManual(mRttOptions[Rtt].Name, Ogre::ResourceGroupManager::DEFAULT_RESOURCE_GROUP_NAME, Ogre::TEX_TYPE_2D, TSize.Width, TSize.Height, 0, getPixelFormat(Rtt), Ogre::TU_RENDERTARGET); Ogre::RenderTarget* RT_Texture = mTextures[Rtt]->getBuffer()->getRenderTarget(); Ogre::Viewport *RT_Texture_Viewport = RT_Texture->addViewport(mHydrax->getCamera()); RT_Texture_Viewport->setClearEveryFrame(true); RT_Texture_Viewport->setBackgroundColour(BackgroundColour); RT_Texture_Viewport->setOverlaysEnabled(false); RT_Texture_Viewport->setShadowsEnabled(ShadowsEnabled); if (MaterialScheme != "") { RT_Texture_Viewport->setMaterialScheme(MaterialScheme); } RT_Texture_Viewport->setSkiesEnabled(RenderSky); RT_Texture->addListener(RTListener); }
void VideoFrame::assign(const unsigned char* ptrValue) { const int ret = avpicture_fill(reinterpret_cast<AVPicture*>(_frame), ptrValue, getPixelFormat(), getWidth(), getHeight()); if(ret < 0) { std::stringstream os; os << "Unable to assign an image buffer of " << getSize() << " bytes: " << getDescriptionFromErrorCode(ret); throw std::runtime_error(os.str()); } }
void QuickTimeDecoder::init() { Audio::QuickTimeAudioDecoder::init(); _startTime = 0; _setStartTime = false; // Initialize all the audio tracks if (!_audioTracks.empty()) { _audioHandles.resize(_audioTracks.size()); for (uint32 i = 0; i < _audioTracks.size(); i++) _handlers.push_back(new AudioTrackHandler(this, _audioTracks[i])); } // Initialize all the video tracks for (uint32 i = 0; i < _tracks.size(); i++) { if (_tracks[i]->codecType == CODEC_TYPE_VIDEO) { for (uint32 j = 0; j < _tracks[i]->sampleDescs.size(); j++) ((VideoSampleDesc *)_tracks[i]->sampleDescs[j])->initCodec(); _handlers.push_back(new VideoTrackHandler(this, _tracks[i])); } } // Prepare the first video track _nextVideoTrack = findNextVideoTrack(); if (_nextVideoTrack) { // Initialize the scaled surface if (_scaleFactorX != 1 || _scaleFactorY != 1) { // We have to initialize the scaled surface _scaledSurface = new Graphics::Surface(); _scaledSurface->create((_nextVideoTrack->getWidth() / _scaleFactorX).toInt(), (_nextVideoTrack->getHeight() / _scaleFactorY).toInt(), getPixelFormat()); _width = _scaledSurface->w; _height = _scaledSurface->h; } else { _width = _nextVideoTrack->getWidth().toInt(); _height = _nextVideoTrack->getHeight().toInt(); } _needUpdate = true; } else { _needUpdate = false; } // Now start any audio if (!_audioTracks.empty()) { startAudio(); _audioStartOffset = Audio::Timestamp(0); } }
QuickTimeDecoder::VideoTrackHandler::VideoTrackHandler(QuickTimeDecoder *decoder, Common::QuickTimeParser::Track *parent) : TrackHandler(decoder, parent) { if (_parent->scaleFactorX != 1 || _parent->scaleFactorY != 1) { _scaledSurface = new Graphics::Surface(); _scaledSurface->create(getWidth().toInt(), getHeight().toInt(), getPixelFormat()); } else { _scaledSurface = 0; } enterNewEditList(false); _holdNextFrameStartTime = false; _curFrame = -1; _durationOverride = -1; }
void SurfaceSDL::createPalette() { ASSERT_LOG(surface_ != nullptr, "No internal surface for createPalette."); ASSERT_LOG(surface_->format != nullptr, "No internal format field."); if(surface_->format->palette) { auto p = surface_->format->palette; palette_.resize(p->ncolors); for(int n = 0; n != p->ncolors; ++n) { palette_[n] = Color(p->colors[n].r, p->colors[n].g, p->colors[n].b, p->colors[n].a); } auto pf = std::dynamic_pointer_cast<SDLPixelFormat>(getPixelFormat()); ASSERT_LOG(pf != nullptr, "Couldn't cast pixelformat -- this is an error."); SDL_SetPixelFormatPalette(pf->get(), surface_->format->palette); } }
bool ofPixels_<PixelType>::resize(int dstWidth, int dstHeight, ofInterpolationMethod interpMethod){ if ((dstWidth<=0) || (dstHeight<=0) || !(isAllocated())) return false; ofPixels_<PixelType> dstPixels; dstPixels.allocate(dstWidth, dstHeight, getPixelFormat()); if(!resizeTo(dstPixels,interpMethod)) return false; delete [] pixels; pixels = dstPixels.getData(); width = dstWidth; height = dstHeight; pixelsSize = dstPixels.size(); dstPixels.pixelsOwner = false; return true; }
void ofPixels_<PixelType>::mirrorTo(ofPixels_<PixelType> & dst, bool vertically, bool horizontal) const{ if(&dst == this){ dst.mirror(vertically,horizontal); return; } if (!vertically && !horizontal){ dst = *this; return; } int bytesPerPixel = getNumChannels(); dst.allocate(width, height, getPixelFormat()); if(vertically && !horizontal){ auto dstLines = dst.getLines(); auto lineSrc = getConstLines().begin(); auto line = --dstLines.end(); auto stride = line.getStride(); for(; line>=dstLines.begin(); --line, ++lineSrc){ memcpy(line.begin(), lineSrc.begin(), stride); } }else if (!vertically && horizontal){ int wToDo = width/2; int hToDo = height; for (int i = 0; i < wToDo; i++){ for (int j = 0; j < hToDo; j++){ int pixelb = i; int pixela = j*width + i; for (int k = 0; k < bytesPerPixel; k++){ dst[pixela*bytesPerPixel + k] = pixels[pixelb*bytesPerPixel + k]; dst[pixelb*bytesPerPixel + k] = pixels[pixela*bytesPerPixel + k]; } } } } else { // I couldn't think of a good way to do this in place. I'm sure there is. mirrorTo(dst,true, false); dst.mirror(false, true); } }
const Graphics::Surface *QuickTimeDecoder::decodeNextFrame() { const Graphics::Surface *frame = VideoDecoder::decodeNextFrame(); // Update audio buffers too // (needs to be done after we find the next track) updateAudioBuffer(); // We have to initialize the scaled surface if (frame && (_scaleFactorX != 1 || _scaleFactorY != 1)) { if (!_scaledSurface) { _scaledSurface = new Graphics::Surface(); _scaledSurface->create(_width, _height, getPixelFormat()); } scaleSurface(frame, _scaledSurface, _scaleFactorX, _scaleFactorY); return _scaledSurface; } return frame; }
IDirect3DTexture9 * TextureCache::_addTex( uint fmt, uint w, uint h, uint c, const unsigned char * pixel, uint _usage ) { uint usage = _usage; uint pool = D3DPOOL_MANAGED; if ( usage & D3DUSAGE_DYNAMIC ) // The resource will be a render target. D3DUSAGE_RENDERTARGET can only be used with D3DPOOL_DEFAULT. { pool = D3DPOOL_DEFAULT; } if ( usage & D3DUSAGE_RENDERTARGET ) // The resource will be a render target. D3DUSAGE_RENDERTARGET can only be used with D3DPOOL_DEFAULT. { pool = D3DPOOL_DEFAULT; } else // no usage { usage |= D3DUSAGE_AUTOGENMIPMAP; } //printf( "%s %s %x\n", __FUNCTION__, ("nil"), fmt); IDirect3DTexture9 * tx = createTexture( w, h, getPixelFormat(fmt), usage, pool ); if ( ! tx ) { printf( __FUNCTION__ " ERROR : could not create tex \n" ); return 0; } if ( pixel ) { IDirect3DSurface9 *surf = 0; HRESULT hr = tx->GetSurfaceLevel( 0, & surf ); E_TRACE(hr); uint r = SurfaceHelper::copyToSurface( w, h, c, pixel, surf ); COM_RELEASE( surf ); } return tx; }
const Graphics::Surface *QuickTimeDecoder::VideoTrackHandler::decodeNextFrame() { if (endOfTrack()) return 0; const Graphics::Surface *frame = bufferNextFrame(); if (_holdNextFrameStartTime) { // Don't set the next frame start time here; we just did a seek _holdNextFrameStartTime = false; } else if (_durationOverride >= 0) { // Use our own duration from the edit list calculation _nextFrameStartTime += _durationOverride; _durationOverride = -1; } else { _nextFrameStartTime += getFrameDuration(); } // Update the edit list, if applicable // HACK: We're also accepting the time minus one because edit lists // aren't as accurate as one would hope. if (!endOfTrack() && getRateAdjustedFrameTime() >= getCurEditTimeOffset() + getCurEditTrackDuration() - 1) { _curEdit++; if (!endOfTrack()) enterNewEditList(true); } if (frame && (_parent->scaleFactorX != 1 || _parent->scaleFactorY != 1)) { if (!_scaledSurface) { _scaledSurface = new Graphics::Surface(); _scaledSurface->create(getWidth().toInt(), getHeight().toInt(), getPixelFormat()); } _decoder->scaleSurface(frame, _scaledSurface, _parent->scaleFactorX, _parent->scaleFactorY); return _scaledSurface; } return frame; }
void CameraNode::open() { m_pCamera->startCapture(); setViewport(-32767, -32767, -32767, -32767); PixelFormat pf = getPixelFormat(); IntPoint size = getMediaSize(); bool bMipmap = getMaterial().getUseMipmaps(); m_pTex = GLTexturePtr(new GLTexture(size, pf, bMipmap)); m_pTex->enableStreaming(); getSurface()->create(pf, m_pTex); BitmapPtr pBmp = m_pTex->lockStreamingBmp(); if (pf == B8G8R8X8 || pf == B8G8R8A8) { FilterFill<Pixel32> Filter(Pixel32(0,0,0,255)); Filter.applyInPlace(pBmp); } else if (pf == I8) { FilterFill<Pixel8> Filter(0); Filter.applyInPlace(pBmp); } m_pTex->unlockStreamingBmp(true); setupFX(true); }
void Format::printAll() { // All format information... printf( "Image size info:\n" \ " width:\t\t %d\n" \ " height:\t\t %d\n" \ " bytes per line:\t %d\n" \ " image buffer size:\t %d\n" \ "\nImage format info:\n" \ " pixel format:\t %s\n" \ " colorspace:\t\t %s\n" \ " field:\t\t %s\n", getWidth(), getHeight(), getBytesPerLine(), getImageSize(), getPixelFormat(), getColorspace(), getField() ); switch(state) { case FORMAT_IS_DIRTY: printf("\nFormat state has not yet been applied.\n"); break; case FORMAT_IS_NULL: printf("\nFormat has not yet been fetched.\n"); break; case FORMAT_FAILED_UPDATE: printf("\nFormat state update failed to apply cleanly.\n"); break; case FORMAT_IS_CLEAN: default: break; } }
bool ofxEmscriptenVideoGrabber::initGrabber(int w, int h){ if(id!=-1){ html5video_grabber_init(id,w,h,desiredFramerate); switch(getPixelFormat()){ case OF_PIXELS_RGBA: pixels.allocate(w,h,4); break; case OF_PIXELS_RGB: pixels.allocate(w,h,3); break; case OF_PIXELS_MONO: pixels.allocate(w,h,1); break; default: ofLogError() << "unknown pixel format, can't allocating texture"; break; } pixels.set(0); return true; }else{ return false; } }
void SpriteGL::buildGLTexture() { //Do not continue if the image isnt loaded if(!getBasicImage()) return; destroyGLTexture(); m_engineCreationTimestamp = 0; if(!g_running) return; //m_multiplierx = nextpow(this->getWidth()) / this->getWidth(); //m_multipliery = nextpow(this->getHeight()) / this->getHeight(); //Stretch(nextpow(this->getWidth()), nextpow(this->getHeight()), true); //printf("Stretch %p\n", this); printf("."); m_multiplierx = 1.; m_multipliery = 1.; #if SDL_BYTEORDER == SDL_BIG_ENDIAN uint32_t rmask = 0xff000000; uint32_t gmask = 0x00ff0000; uint32_t bmask = 0x0000ff00; uint32_t amask = 0x000000ff; #else uint32_t rmask = 0x000000ff; uint32_t gmask = 0x0000ff00; uint32_t bmask = 0x00ff0000; uint32_t amask = 0xff000000; #endif glEnable(GL_TEXTURE_2D); glGenTextures(1, &m_texture); /* std::stringstream sbmp; sbmp << "gl_debug_"<<m_texture<<".bmp"; std::string fout = yatc_findfile(sbmp.str().c_str(),"w"); SDL_SaveBMP(sfc,fout.c_str()); */ char* pixels = getBasicImage()->lockSelf(); glBindTexture(GL_TEXTURE_2D, m_texture); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); //GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); GLint ret = gluBuild2DMipmaps(GL_TEXTURE_2D, GL_RGBA, getBasicImage()->getW(), getBasicImage()->getH(), (getPixelFormat() == GL_NONE ? (DEBUGPRINT(DEBUGPRINT_WARNING, DEBUGPRINT_LEVEL_OBLIGATORY, "[SpriteGL::SpriteGL] Invalid pixelformat\n"), GL_BGRA) : getPixelFormat()), GL_UNSIGNED_BYTE, pixels); if(ret != 0){ DEBUGPRINT(DEBUGPRINT_ERROR, DEBUGPRINT_LEVEL_OBLIGATORY, "Error [SpriteGL::SpriteGL] Cant build 2DMipmaps: %s\n", gluErrorString(ret)); } glDisable(GL_TEXTURE_2D); getBasicImage()->unlockSelf(); m_engineCreationTimestamp = g_engine->m_creationTimestamp; }
int Image::getRowStrideSize(int pixels_per_row) const { return calculate_rowstride_bytes(getPixelFormat(), m_width); }
void FboRenderContext::createFramebuffer (const RenderConfig& config) { DE_ASSERT(m_framebuffer == 0 && m_colorBuffer == 0 && m_depthStencilBuffer == 0); const glw::Functions& gl = m_context->getFunctions(); const deUint32 colorFormat = chooseColorFormat(config); const deUint32 depthStencilFormat = chooseDepthStencilFormat(config); int width = config.width; int height = config.height; tcu::PixelFormat pixelFormat; int depthBits = 0; int stencilBits = 0; if (config.numSamples > 0 && !gl.renderbufferStorageMultisample) throw tcu::NotSupportedError("Multisample FBO is not supported"); if (colorFormat == 0) throw tcu::NotSupportedError("Unsupported color attachment format"); if (width == glu::RenderConfig::DONT_CARE || height == glu::RenderConfig::DONT_CARE) { int maxSize = 0; gl.getIntegerv(GL_MAX_RENDERBUFFER_SIZE, &maxSize); width = (width == glu::RenderConfig::DONT_CARE) ? maxSize : width; height = (height == glu::RenderConfig::DONT_CARE) ? maxSize : height; } { pixelFormat = getPixelFormat(colorFormat); gl.genRenderbuffers(1, &m_colorBuffer); gl.bindRenderbuffer(GL_RENDERBUFFER, m_colorBuffer); if (config.numSamples > 0) gl.renderbufferStorageMultisample(GL_RENDERBUFFER, config.numSamples, colorFormat, width, height); else gl.renderbufferStorage(GL_RENDERBUFFER, colorFormat, width, height); gl.bindRenderbuffer(GL_RENDERBUFFER, 0); GLU_EXPECT_NO_ERROR(gl.getError(), "Creating color renderbuffer"); } if (depthStencilFormat != GL_NONE) { getDepthStencilBits(depthStencilFormat, &depthBits, &stencilBits); gl.genRenderbuffers(1, &m_depthStencilBuffer); gl.bindRenderbuffer(GL_RENDERBUFFER, m_depthStencilBuffer); if (config.numSamples > 0) gl.renderbufferStorageMultisample(GL_RENDERBUFFER, config.numSamples, depthStencilFormat, width, height); else gl.renderbufferStorage(GL_RENDERBUFFER, depthStencilFormat, width, height); gl.bindRenderbuffer(GL_RENDERBUFFER, 0); GLU_EXPECT_NO_ERROR(gl.getError(), "Creating depth / stencil renderbuffer"); } gl.genFramebuffers(1, &m_framebuffer); gl.bindFramebuffer(GL_FRAMEBUFFER, m_framebuffer); if (m_colorBuffer) gl.framebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, m_colorBuffer); if (m_depthStencilBuffer) { if (depthBits > 0) gl.framebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, m_depthStencilBuffer); if (stencilBits > 0) gl.framebufferRenderbuffer(GL_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_RENDERBUFFER, m_depthStencilBuffer); } GLU_EXPECT_NO_ERROR(gl.getError(), "Creating framebuffer"); if (gl.checkFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) throw tcu::NotSupportedError("Framebuffer is not complete"); // Set up correct viewport for first test case. gl.viewport(0, 0, width, height); m_renderTarget = tcu::RenderTarget(width, height, pixelFormat, depthBits, stencilBits, config.numSamples); }
void FileGrabForeground::draw(DrawActionBase *action, Viewport *port) { if(getActive() == false) return; if(getName().empty()) { FWARNING(("FileGrabForeground::draw: no name ?!?\n")); return; } Image::PixelFormat pixelFormat = (Image::PixelFormat)getPixelFormat(); pixelFormat = (pixelFormat == 0) ? Image::OSG_RGB_PF : pixelFormat; // do we have an image yet? If not, create one. if(getImage() == NullFC) { beginEditCP(this->getPtr(), FileGrabForeground::ImageFieldMask); { ImagePtr iPtr = Image::create(); iPtr->set(pixelFormat, 1); setImage(iPtr); } endEditCP (this->getPtr(), FileGrabForeground::ImageFieldMask); } else if(getImage()->getPixelFormat() != pixelFormat) { ImagePtr iPtr = getImage(); beginEditCP(iPtr, Image::PixelFormatFieldMask); { iPtr->reformat(pixelFormat); } endEditCP (iPtr, Image::PixelFormatFieldMask); } // read pixels Inherited::draw(action,port); Char8 *name = new Char8 [ getName().size() + 32 ]; // this is really // arbitrary... :( sprintf(name, getName().c_str(), getFrame()); ImagePtr i = getImage(); i->write(name); delete [] name; if(getIncrement() != false) { beginEditCP(this->getPtr(), FileGrabForeground::FrameFieldMask); { setFrame(getFrame() + 1); } endEditCP (this->getPtr(), FileGrabForeground::FrameFieldMask); } }
const Graphics::Surface *QuickTimeDecoder::VideoTrackHandler::decodeNextFrame() { if (endOfTrack()) return 0; if (_reversed) { // Subtract one to place us on the frame before the current displayed frame. _curFrame--; // We have one "dummy" frame at the end to so the last frame is displayed // for the right amount of time. if (_curFrame < 0) return 0; // Decode from the last key frame to the frame before the one we need. // TODO: Probably would be wise to do some caching int targetFrame = _curFrame; _curFrame = findKeyFrame(targetFrame) - 1; while (_curFrame != targetFrame - 1) bufferNextFrame(); } const Graphics::Surface *frame = bufferNextFrame(); if (_reversed) { if (_holdNextFrameStartTime) { // Don't set the next frame start time here; we just did a seek _holdNextFrameStartTime = false; } else { // Just need to subtract the time _nextFrameStartTime -= getFrameDuration(); } } else { if (_holdNextFrameStartTime) { // Don't set the next frame start time here; we just did a seek _holdNextFrameStartTime = false; } else if (_durationOverride >= 0) { // Use our own duration from the edit list calculation _nextFrameStartTime += _durationOverride; _durationOverride = -1; } else { _nextFrameStartTime += getFrameDuration(); } // Update the edit list, if applicable // HACK: We're also accepting the time minus one because edit lists // aren't as accurate as one would hope. if (!atLastEdit() && getRateAdjustedFrameTime() >= getCurEditTimeOffset() + getCurEditTrackDuration() - 1) { _curEdit++; if (!atLastEdit()) enterNewEditList(true); } } if (frame && (_parent->scaleFactorX != 1 || _parent->scaleFactorY != 1)) { if (!_scaledSurface) { _scaledSurface = new Graphics::Surface(); _scaledSurface->create(getScaledWidth().toInt(), getScaledHeight().toInt(), getPixelFormat()); } _decoder->scaleSurface(frame, _scaledSurface, _parent->scaleFactorX, _parent->scaleFactorY); return _scaledSurface; } return frame; }
////////////////////////////////////////////////////////////////////// // readback // // Code to handle reading back of the FBO data (but with a specified FBO pointer) // ////////////////////////////////////////////////////////////////////// bool CheckFBO::readback( GLuint width, GLuint height, GLuint bufObject ) { bool ret = false; if (m_bUseFBO) { if (m_bUsePBO) { shrLog("CheckFBO::readback() FBO->PBO->m_pImageData\n"); // binds the PBO for readback bindReadback(); // bind FBO buffer (we want to transfer FBO -> PBO) glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, bufObject ); // Now initiate the readback to PBO glReadPixels(0, 0, width, height, getPixelFormat(), GL_UNSIGNED_BYTE, BUFFER_OFFSET(0)); ret = checkStatus(__FILE__, __LINE__, true); if (!ret) shrLog("CheckFBO::readback() FBO->PBO checkStatus = %d\n", ret); // map - unmap simulates readback without the copy void *ioMem = glMapBufferARB(GL_PIXEL_PACK_BUFFER_ARB, GL_READ_ONLY_ARB); memcpy(m_pImageData, ioMem, width*height*m_Bpp); glUnmapBufferARB(GL_PIXEL_PACK_BUFFER_ARB); // release the FBO glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0); // release the PBO unbindReadback(); } else { shrLog("CheckFBO::readback() FBO->m_pImageData\n"); // Reading direct to FBO using glReadPixels glBindFramebufferEXT( GL_FRAMEBUFFER_EXT, bufObject ); ret = checkStatus(__FILE__, __LINE__, true); if (!ret) shrLog("CheckFBO::readback::glBindFramebufferEXT() fbo=%d checkStatus = %d\n", (int)bufObject, (int)ret); glReadBuffer(static_cast<GLenum>(GL_COLOR_ATTACHMENT0_EXT)); ret &= checkStatus(__FILE__, __LINE__, true); if (!ret) shrLog("CheckFBO::readback::glReadBuffer() fbo=%d checkStatus = %d\n", (int)bufObject, (int)ret); glReadPixels(0, 0, width, height, getPixelFormat(), GL_UNSIGNED_BYTE, m_pImageData); glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0); } } else { shrLog("CheckFBO::readback() PBO->m_pImageData\n"); // read from bufObject (PBO) to system memorys image glBindBufferARB(GL_PIXEL_PACK_BUFFER_ARB, bufObject); // Bind the PBO // map - unmap simulates readback without the copy void *ioMem = glMapBufferARB(GL_PIXEL_PACK_BUFFER_ARB, GL_READ_ONLY_ARB); memcpy(m_pImageData, ioMem, width*height*m_Bpp); glUnmapBufferARB(GL_PIXEL_PACK_BUFFER_ARB); // read from bufObject (PBO) to system memory image glBindBufferARB(GL_PIXEL_PACK_BUFFER_ARB, 0); // unBind the PBO } return CHECK_FBO; }
void QuickTimeDecoder::init() { // Remove non-Video/Audio streams for (uint32 i = 0; i < _numStreams;) { if (_streams[i]->codec_type == CODEC_TYPE_MOV_OTHER) { delete _streams[i]; for (uint32 j = i + 1; j < _numStreams; j++) _streams[j - 1] = _streams[j]; _numStreams--; } else i++; } // Adjust time/duration for (uint32 i = 0; i < _numStreams; i++) { MOVStreamContext *sc = _streams[i]; if (!sc->time_rate) sc->time_rate = 1; if (!sc->time_scale) sc->time_scale = _timeScale; sc->duration /= sc->time_rate; if (sc->codec_type == CODEC_TYPE_VIDEO && _videoStreamIndex < 0) _videoStreamIndex = i; else if (sc->codec_type == CODEC_TYPE_AUDIO && _audioStreamIndex < 0) _audioStreamIndex = i; } // Initialize audio, if present if (_audioStreamIndex >= 0) { STSDEntry *entry = &_streams[_audioStreamIndex]->stsdEntries[0]; if (checkAudioCodecSupport(entry->codecTag)) { _audStream = Audio::makeQueuingAudioStream(entry->sampleRate, entry->channels == 2); _curAudioChunk = 0; // Make sure the bits per sample transfers to the sample size if (entry->codecTag == MKID_BE('raw ') || entry->codecTag == MKID_BE('twos')) _streams[_audioStreamIndex]->sample_size = (entry->bitsPerSample / 8) * entry->channels; startAudio(); } _audioStartOffset = Audio::Timestamp(0); } // Initialize video, if present if (_videoStreamIndex >= 0) { for (uint32 i = 0; i < _streams[_videoStreamIndex]->stsdEntryCount; i++) { STSDEntry *entry = &_streams[_videoStreamIndex]->stsdEntries[i]; entry->videoCodec = createCodec(entry->codecTag, entry->bitsPerSample & 0x1F); } if (getScaleFactorX() != 1 || getScaleFactorY() != 1) { // We have to initialize the scaled surface _scaledSurface = new Graphics::Surface(); _scaledSurface->create(getWidth(), getHeight(), getPixelFormat().bytesPerPixel); } } }
////////////////////////////////////////////////////////////////////// // readback // // Code to handle reading back of the FBO data (but with a specified FBO pointer) // ////////////////////////////////////////////////////////////////////// bool CheckBackBuffer::readback( GLuint width, GLuint height, GLuint bufObject ) { bool ret = false; if (m_bUseFBO) { if (m_bUsePBO) { printf("CheckBackBuffer::readback() FBO->PBO->m_pImageData\n"); // binds the PBO for readback bindReadback(); // bind FBO buffer (we want to transfer FBO -> PBO) glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, bufObject ); // Now initiate the readback to PBO glReadPixels(0, 0, width, height, getPixelFormat(), GL_UNSIGNED_BYTE, BUFFER_OFFSET(0)); ret = checkStatus(__FILE__, __LINE__, true); if (!ret) printf("CheckBackBuffer::readback() FBO->PBO checkStatus = %d\n", ret); // map - unmap simulates readback without the copy void *ioMem = glMapBufferARB(GL_PIXEL_PACK_BUFFER_ARB, GL_READ_ONLY_ARB); memcpy(m_pImageData, ioMem, width*height*m_Bpp); glUnmapBufferARB(GL_PIXEL_PACK_BUFFER_ARB); // release the FBO glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0); // release the PBO unbindReadback(); } else { printf("CheckBackBuffer::readback() FBO->m_pImageData\n"); // Reading direct to FBO using glReadPixels glBindFramebufferEXT( GL_FRAMEBUFFER_EXT, bufObject ); ret = checkStatus(__FILE__, __LINE__, true); if (!ret) printf("CheckBackBuffer::readback::glBindFramebufferEXT() fbo=%d checkStatus = %d\n", bufObject, ret); glReadBuffer(static_cast<GLenum>(GL_COLOR_ATTACHMENT0_EXT)); ret &= checkStatus(__FILE__, __LINE__, true); if (!ret) printf("CheckBackBuffer::readback::glReadBuffer() fbo=%d checkStatus = %d\n", bufObject, ret); glReadPixels(0, 0, width, height, getPixelFormat(), GL_UNSIGNED_BYTE, m_pImageData); glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0); } } else { printf("CheckBackBuffer::readback() PBO->m_pImageData\n"); // read from bufObject (PBO) to system memorys image glBindBufferARB(GL_PIXEL_PACK_BUFFER_ARB, bufObject); // Bind the PBO // map - unmap simulates readback without the copy void *ioMem = glMapBufferARB(GL_PIXEL_PACK_BUFFER_ARB, GL_READ_ONLY_ARB); // allocate a buffer so we can flip the image unsigned char * temp_buf = (unsigned char *)malloc(width*height*m_Bpp); memcpy( temp_buf, ioMem, width*height*m_Bpp ); // let's flip the image as we copy for (unsigned int y = 0; y < height; y++) { memcpy( (void *)&(m_pImageData[(height-y)*width*m_Bpp]), (void *)&(temp_buf[y*width*m_Bpp]), width*m_Bpp); } free(temp_buf); glUnmapBufferARB(GL_PIXEL_PACK_BUFFER_ARB); // read from bufObject (PBO) to system memory image glBindBufferARB(GL_PIXEL_PACK_BUFFER_ARB, 0); // unBind the PBO } return CHECK_FBO; }
void Texture::initialize(const PackedFrameDescription &description, const GLvoid * pData) { const auto packFormat = description.glPackFormat; initialize(description, getAdaptedInternalFormat(packFormat), getPixelFormat(packFormat), getPixelType(packFormat), pData); }