GonkVideoDecoderManager::GonkVideoDecoderManager( MediaTaskQueue* aTaskQueue, mozilla::layers::ImageContainer* aImageContainer, const mp4_demuxer::VideoDecoderConfig& aConfig) : GonkDecoderManager(aTaskQueue) , mImageContainer(aImageContainer) , mReaderCallback(nullptr) , mColorConverterBufferSize(0) , mNativeWindow(nullptr) , mPendingVideoBuffersLock("GonkVideoDecoderManager::mPendingVideoBuffersLock") { NS_ASSERTION(!NS_IsMainThread(), "Should not be on main thread."); MOZ_ASSERT(mImageContainer); MOZ_COUNT_CTOR(GonkVideoDecoderManager); mVideoWidth = aConfig.display_width; mVideoHeight = aConfig.display_height; mDisplayWidth = aConfig.display_width; mDisplayHeight = aConfig.display_height; mInfo.mVideo.mHasVideo = true; nsIntSize displaySize(mDisplayWidth, mDisplayHeight); mInfo.mVideo.mDisplay = displaySize; nsIntRect pictureRect(0, 0, mVideoWidth, mVideoHeight); nsIntSize frameSize(mVideoWidth, mVideoHeight); mPicture = pictureRect; mInitialFrame = frameSize; mHandler = new MessageHandler(this); mVideoListener = new VideoResourceListener(this); }
nsresult MediaOmxReader::ReadMetadata(VideoInfo* aInfo, MetadataTags** aTags) { NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread."); *aTags = nullptr; if (!mOmxDecoder.get()) { mOmxDecoder = new OmxDecoder(mDecoder->GetResource(), mDecoder); if (!mOmxDecoder->Init()) { return NS_ERROR_FAILURE; } } // Set the total duration (the max of the audio and video track). int64_t durationUs; mOmxDecoder->GetDuration(&durationUs); if (durationUs) { ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); mDecoder->SetMediaDuration(durationUs); } if (mOmxDecoder->HasVideo()) { int32_t width, height; mOmxDecoder->GetVideoParameters(&width, &height); nsIntRect pictureRect(0, 0, width, height); // Validate the container-reported frame and pictureRect sizes. This ensures // that our video frame creation code doesn't overflow. nsIntSize displaySize(width, height); nsIntSize frameSize(width, height); if (!VideoInfo::ValidateVideoRegion(frameSize, pictureRect, displaySize)) { return NS_ERROR_FAILURE; } // Video track's frame sizes will not overflow. Activate the video track. mHasVideo = mInfo.mHasVideo = true; mInfo.mDisplay = displaySize; mPicture = pictureRect; mInitialFrame = frameSize; VideoFrameContainer* container = mDecoder->GetVideoFrameContainer(); if (container) { container->SetCurrentFrame(gfxIntSize(displaySize.width, displaySize.height), nullptr, mozilla::TimeStamp::Now()); } } if (mOmxDecoder->HasAudio()) { int32_t numChannels, sampleRate; mOmxDecoder->GetAudioParameters(&numChannels, &sampleRate); mHasAudio = mInfo.mHasAudio = true; mInfo.mAudioChannels = numChannels; mInfo.mAudioRate = sampleRate; } *aInfo = mInfo; return NS_OK; }
RefPtr<MediaDataDecoder::InitPromise> GonkVideoDecoderManager::Init() { nsIntSize displaySize(mDisplayWidth, mDisplayHeight); nsIntRect pictureRect(0, 0, mVideoWidth, mVideoHeight); uint32_t maxWidth, maxHeight; char propValue[PROPERTY_VALUE_MAX]; property_get("ro.moz.omx.hw.max_width", propValue, "-1"); maxWidth = -1 == atoi(propValue) ? MAX_VIDEO_WIDTH : atoi(propValue); property_get("ro.moz.omx.hw.max_height", propValue, "-1"); maxHeight = -1 == atoi(propValue) ? MAX_VIDEO_HEIGHT : atoi(propValue) ; if (mVideoWidth * mVideoHeight > maxWidth * maxHeight) { GVDM_LOG("Video resolution exceeds hw codec capability"); return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); } // Validate the container-reported frame and pictureRect sizes. This ensures // that our video frame creation code doesn't overflow. nsIntSize frameSize(mVideoWidth, mVideoHeight); if (!IsValidVideoRegion(frameSize, pictureRect, displaySize)) { GVDM_LOG("It is not a valid region"); return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); } mReaderTaskQueue = AbstractThread::GetCurrent()->AsTaskQueue(); MOZ_ASSERT(mReaderTaskQueue); if (mDecodeLooper.get() != nullptr) { return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); } if (!InitLoopers(MediaData::VIDEO_DATA)) { return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); } RefPtr<InitPromise> p = mInitPromise.Ensure(__func__); android::sp<GonkVideoDecoderManager> self = this; mVideoCodecRequest.Begin(mVideoListener->Init() ->Then(mReaderTaskQueue, __func__, [self] (bool) -> void { self->mVideoCodecRequest.Complete(); self->codecReserved(); }, [self] (bool) -> void { self->mVideoCodecRequest.Complete(); self->codecCanceled(); })); mDecoder = MediaCodecProxy::CreateByType(mDecodeLooper, mMimeType.get(), false, mVideoListener); mDecoder->AsyncAskMediaCodec(); uint32_t capability = MediaCodecProxy::kEmptyCapability; if (mDecoder->getCapability(&capability) == OK && (capability & MediaCodecProxy::kCanExposeGraphicBuffer)) { mNativeWindow = new GonkNativeWindow(); } return p; }
bool CGUIDriverGL::Init( GUI::TWindowContextPtr windowContext ) { if ( !m_ceGuiInitialized ) { try { CEGUI::Sizef displaySize( (float) windowContext->GetWidth(), (float) windowContext->GetHeight() ); m_guiRenderer = &CEGUI::OpenGL3Renderer::create( displaySize );//, CEGUI::OpenGLRenderer::TTT_AUTO ); m_guiSystem = &CEGUI::System::create( *m_guiRenderer, &m_vfsResourceProvider, &m_xmlParserAdapter, m_imageCodecAdapter ); // setup default group for validation schemas CEGUI::XMLParser* parser = m_guiSystem->getXMLParser(); if ( nullptr != parser && parser->isPropertyPresent( "SchemaDefaultResourceGroup" ) ) parser->setProperty( "SchemaDefaultResourceGroup", m_schemasResourceGroup ); // Load the fonts CEGUI::FontManager::getSingleton().createAll( m_defaultFont, CEGUI::Font::getDefaultResourceGroup() ); // Load the scheme try { CEGUI::SchemeManager::getSingleton().createFromFile( "Generic.scheme" ); } catch ( CEGUI::Exception& e ) { CORE::CString info = e.getMessage() + " - at - " + e.getFileName() + ":" + e.getFunctionName() + ":" + CORE::UInt32ToString( e.getLine() ).STL_String(); GUCEF_EXCEPTION_LOG( CORE::LOGLEVEL_IMPORTANT, "Unhandled exception during CEGUI initialization: " + info ); } CEGUI::SchemeManager::getSingleton().createFromFile( m_schemeToUse ); // Set the defaults CEGUI::System::getSingleton().getDefaultGUIContext().setDefaultFont( m_defaultFont ); CEGUI::System::getSingleton().getDefaultGUIContext().getMouseCursor().setDefaultImage( m_defaultCursorImage ); CEGUI::Window* rootWindow = CEGUI::WindowManager::getSingleton().createWindow( "DefaultWindow", "root" ); CEGUI::System::getSingleton().getDefaultGUIContext().setRootWindow( rootWindow ); // clearing this queue actually makes sure it's created(!) CEGUI::System::getSingleton().getDefaultGUIContext().clearGeometry( CEGUI::RQ_OVERLAY ); m_ceGuiInitialized = true; } catch ( CEGUI::Exception& e ) { CORE::CString info = e.getMessage() + " - at - " + e.getFileName() + ":" + e.getFunctionName() + ":" + CORE::UInt32ToString( e.getLine() ).STL_String(); GUCEF_EXCEPTION_LOG( CORE::LOGLEVEL_IMPORTANT, "Unhandled exception during CEGUI initialization: " + info ); m_ceGuiInitialized = false; } } return m_ceGuiInitialized; }
nsRefPtr<MediaDataDecoder::InitPromise> GonkVideoDecoderManager::Init(MediaDataDecoderCallback* aCallback) { nsIntSize displaySize(mDisplayWidth, mDisplayHeight); nsIntRect pictureRect(0, 0, mVideoWidth, mVideoHeight); // Validate the container-reported frame and pictureRect sizes. This ensures // that our video frame creation code doesn't overflow. nsIntSize frameSize(mVideoWidth, mVideoHeight); if (!IsValidVideoRegion(frameSize, pictureRect, displaySize)) { GVDM_LOG("It is not a valid region"); return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); } mReaderCallback = aCallback; mReaderTaskQueue = AbstractThread::GetCurrent()->AsTaskQueue(); MOZ_ASSERT(!mReaderTaskQueue); if (mLooper.get() != nullptr) { return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); } // Create ALooper mLooper = new ALooper; mManagerLooper = new ALooper; mManagerLooper->setName("GonkVideoDecoderManager"); // Register AMessage handler to ALooper. mManagerLooper->registerHandler(mHandler); // Start ALooper thread. if (mLooper->start() != OK || mManagerLooper->start() != OK ) { return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); } nsRefPtr<InitPromise> p = mInitPromise.Ensure(__func__); mDecoder = MediaCodecProxy::CreateByType(mLooper, mMimeType.get(), false, mVideoListener); mDecoder->AsyncAskMediaCodec(); uint32_t capability = MediaCodecProxy::kEmptyCapability; if (mDecoder->getCapability(&capability) == OK && (capability & MediaCodecProxy::kCanExposeGraphicBuffer)) { mNativeWindow = new GonkNativeWindow(); } return p; }
bool GonkVideoDecoderManager::SetVideoFormat() { // read video metadata from MediaCodec sp<AMessage> codecFormat; if (mDecoder->getOutputFormat(&codecFormat) == OK) { AString mime; int32_t width = 0; int32_t height = 0; int32_t stride = 0; int32_t slice_height = 0; int32_t color_format = 0; int32_t crop_left = 0; int32_t crop_top = 0; int32_t crop_right = 0; int32_t crop_bottom = 0; if (!codecFormat->findString("mime", &mime) || !codecFormat->findInt32("width", &width) || !codecFormat->findInt32("height", &height) || !codecFormat->findInt32("stride", &stride) || !codecFormat->findInt32("slice-height", &slice_height) || !codecFormat->findInt32("color-format", &color_format) || !codecFormat->findRect("crop", &crop_left, &crop_top, &crop_right, &crop_bottom)) { GVDM_LOG("Failed to find values"); return false; } mFrameInfo.mWidth = width; mFrameInfo.mHeight = height; mFrameInfo.mStride = stride; mFrameInfo.mSliceHeight = slice_height; mFrameInfo.mColorFormat = color_format; nsIntSize displaySize(width, height); if (!IsValidVideoRegion(mInitialFrame, mPicture, displaySize)) { GVDM_LOG("It is not a valid region"); return false; } return true; } GVDM_LOG("Fail to get output format"); return false; }
int cmd_memory(char *param) { displayString(TEXT_MEMORY_ENVIRONMENT , mcb_length(env_glbSeg), env_freeCount(env_glbSeg)); displayString(TEXT_MEMORY_CONTEXT , mcb_length(ctxtSegm), env_freeCount(ctxtSegm)); displayTag(TEXT_MEMORY_CTXT_ALIAS, CTXT_TAG_ALIAS); displayTag(TEXT_MEMORY_CTXT_HISTORY, CTXT_TAG_HISTORY); displayTag(TEXT_MEMORY_CTXT_DIRSTACK, CTXT_TAG_DIRSTACK); displayTag1(TEXT_MEMORY_CTXT_LASTDIR, CTXT_TAG_LASTDIR); displayTag1(TEXT_MEMORY_CTXT_ARG, CTXT_TAG_ARG); displayTag1(TEXT_MEMORY_CTXT_SWAPINFO, CTXT_TAG_SWAPINFO); displayTag1(TEXT_MEMORY_CTXT_EXEC, CTXT_TAG_EXEC); displayTag1(TEXT_MEMORY_CTXT_STRING, CTXT_TAG_STRING); displayTag2(TEXT_MEMORY_CTXT_FLAG, CTXT_TAG_FLAG); displayTag2(TEXT_MEMORY_CTXT_IVAR, CTXT_TAG_IVAR); displayString(TEXT_MEMORY_HEAP, (unsigned long)coreleft(), getFree); displaySize(TEXT_MEMORY_DOSMEM, DOSalloc(0, 0x80 | 0x10)); return 0; }
android::sp<MediaCodecProxy> GonkVideoDecoderManager::Init(MediaDataDecoderCallback* aCallback) { nsIntSize displaySize(mDisplayWidth, mDisplayHeight); nsIntRect pictureRect(0, 0, mVideoWidth, mVideoHeight); // Validate the container-reported frame and pictureRect sizes. This ensures // that our video frame creation code doesn't overflow. nsIntSize frameSize(mVideoWidth, mVideoHeight); if (!IsValidVideoRegion(frameSize, pictureRect, displaySize)) { GVDM_LOG("It is not a valid region"); return nullptr; } mReaderCallback = aCallback; if (mLooper.get() != nullptr) { return nullptr; } // Create ALooper mLooper = new ALooper; mManagerLooper = new ALooper; mManagerLooper->setName("GonkVideoDecoderManager"); // Register AMessage handler to ALooper. mManagerLooper->registerHandler(mHandler); // Start ALooper thread. if (mLooper->start() != OK || mManagerLooper->start() != OK ) { return nullptr; } mDecoder = MediaCodecProxy::CreateByType(mLooper, mMimeType.get(), false, mVideoListener); mDecoder->AskMediaCodecAndWait(); uint32_t capability = MediaCodecProxy::kEmptyCapability; if (mDecoder->getCapability(&capability) == OK && (capability & MediaCodecProxy::kCanExposeGraphicBuffer)) { mNativeWindow = new GonkNativeWindow(); } return mDecoder; }
void MediaOmxReader::HandleResourceAllocated() { EnsureActive(); // After resources are available, set the metadata. if (!mOmxDecoder->EnsureMetadata()) { mMetadataPromise.Reject(ReadMetadataFailureReason::METADATA_ERROR, __func__); return; } bool isMP3 = mDecoder->GetResource()->GetContentType().EqualsASCII(AUDIO_MP3); if (isMP3 && mMP3FrameParser.IsMP3()) { // Check if the MP3 frame parser found a duration. mLastParserDuration = mMP3FrameParser.GetDuration(); } if (mLastParserDuration >= 0) { // Prefer the parser duration if we have it. mInfo.mMetadataDuration = Some(TimeUnit::FromMicroseconds(mLastParserDuration)); } else { // MP3 parser failed to find a duration. // Set the total duration (the max of the audio and video track). int64_t durationUs; mOmxDecoder->GetDuration(&durationUs); if (durationUs) { mInfo.mMetadataDuration = Some(TimeUnit::FromMicroseconds(durationUs)); } } if (mOmxDecoder->HasVideo()) { int32_t displayWidth, displayHeight, width, height; mOmxDecoder->GetVideoParameters(&displayWidth, &displayHeight, &width, &height); nsIntRect pictureRect(0, 0, width, height); // Validate the container-reported frame and pictureRect sizes. This ensures // that our video frame creation code doesn't overflow. nsIntSize displaySize(displayWidth, displayHeight); nsIntSize frameSize(width, height); if (!IsValidVideoRegion(frameSize, pictureRect, displaySize)) { mMetadataPromise.Reject(ReadMetadataFailureReason::METADATA_ERROR, __func__); return; } // Video track's frame sizes will not overflow. Activate the video track. mHasVideo = true; mInfo.mVideo.mDisplay = displaySize; mPicture = pictureRect; mInitialFrame = frameSize; VideoFrameContainer* container = mDecoder->GetVideoFrameContainer(); if (container) { container->ClearCurrentFrame(gfxIntSize(displaySize.width, displaySize.height)); } } if (mOmxDecoder->HasAudio()) { int32_t numChannels, sampleRate; mOmxDecoder->GetAudioParameters(&numChannels, &sampleRate); mHasAudio = true; mInfo.mAudio.mChannels = numChannels; mInfo.mAudio.mRate = sampleRate; } nsRefPtr<MetadataHolder> metadata = new MetadataHolder(); metadata->mInfo = mInfo; metadata->mTags = nullptr; #ifdef MOZ_AUDIO_OFFLOAD CheckAudioOffload(); #endif mMetadataPromise.Resolve(metadata, __func__); }
nsresult MediaOmxReader::ReadMetadata(MediaInfo* aInfo, MetadataTags** aTags) { NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread."); EnsureActive(); *aTags = nullptr; // Initialize the internal OMX Decoder. nsresult rv = InitOmxDecoder(); if (NS_FAILED(rv)) { return rv; } bool isMP3 = mDecoder->GetResource()->GetContentType().EqualsASCII(AUDIO_MP3); if (isMP3) { // When read sdcard's file on b2g platform at constructor, // the mDecoder->GetResource()->GetLength() would return -1. // Delay set the total duration on this function. mMP3FrameParser.SetLength(mDecoder->GetResource()->GetLength()); ProcessCachedData(0, true); } if (!mOmxDecoder->AllocateMediaResources()) { return NS_ERROR_FAILURE; } // Bug 1050667, both MediaDecoderStateMachine and MediaOmxReader // relies on IsWaitingMediaResources() function. And the waiting state will be // changed by binder thread, so we store the waiting state in a cache value to // make them in consistent state. UpdateIsWaitingMediaResources(); if (IsWaitingMediaResources()) { return NS_OK; } // After resources are available, set the metadata. if (!mOmxDecoder->EnsureMetadata()) { return NS_ERROR_FAILURE; } if (isMP3 && mMP3FrameParser.IsMP3()) { int64_t duration = mMP3FrameParser.GetDuration(); // The MP3FrameParser may reported a duration; // return -1 if no frame has been parsed. if (duration >= 0) { ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); mUseParserDuration = true; mLastParserDuration = duration; mDecoder->SetMediaDuration(mLastParserDuration); } } else { // Set the total duration (the max of the audio and video track). int64_t durationUs; mOmxDecoder->GetDuration(&durationUs); if (durationUs) { ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); mDecoder->SetMediaDuration(durationUs); } } if (mOmxDecoder->HasVideo()) { int32_t displayWidth, displayHeight, width, height; mOmxDecoder->GetVideoParameters(&displayWidth, &displayHeight, &width, &height); nsIntRect pictureRect(0, 0, width, height); // Validate the container-reported frame and pictureRect sizes. This ensures // that our video frame creation code doesn't overflow. nsIntSize displaySize(displayWidth, displayHeight); nsIntSize frameSize(width, height); if (!IsValidVideoRegion(frameSize, pictureRect, displaySize)) { return NS_ERROR_FAILURE; } // Video track's frame sizes will not overflow. Activate the video track. mHasVideo = mInfo.mVideo.mHasVideo = true; mInfo.mVideo.mDisplay = displaySize; mPicture = pictureRect; mInitialFrame = frameSize; VideoFrameContainer* container = mDecoder->GetVideoFrameContainer(); if (container) { container->SetCurrentFrame(gfxIntSize(displaySize.width, displaySize.height), nullptr, mozilla::TimeStamp::Now()); } } if (mOmxDecoder->HasAudio()) { int32_t numChannels, sampleRate; mOmxDecoder->GetAudioParameters(&numChannels, &sampleRate); mHasAudio = mInfo.mAudio.mHasAudio = true; mInfo.mAudio.mChannels = numChannels; mInfo.mAudio.mRate = sampleRate; } *aInfo = mInfo; #ifdef MOZ_AUDIO_OFFLOAD CheckAudioOffload(); #endif return NS_OK; }
nsresult MediaOmxReader::ReadMetadata(MediaInfo* aInfo, MetadataTags** aTags) { NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread."); EnsureActive(); *aTags = nullptr; // Initialize the internal OMX Decoder. nsresult rv = InitOmxDecoder(); if (NS_FAILED(rv)) { return rv; } if (!mOmxDecoder->TryLoad()) { return NS_ERROR_FAILURE; } #ifdef MOZ_AUDIO_OFFLOAD CheckAudioOffload(); #endif if (IsWaitingMediaResources()) { return NS_OK; } // Set the total duration (the max of the audio and video track). int64_t durationUs; mOmxDecoder->GetDuration(&durationUs); if (durationUs) { ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); mDecoder->SetMediaDuration(durationUs); } if (mOmxDecoder->HasVideo()) { int32_t displayWidth, displayHeight, width, height; mOmxDecoder->GetVideoParameters(&displayWidth, &displayHeight, &width, &height); nsIntRect pictureRect(0, 0, width, height); // Validate the container-reported frame and pictureRect sizes. This ensures // that our video frame creation code doesn't overflow. nsIntSize displaySize(displayWidth, displayHeight); nsIntSize frameSize(width, height); if (!IsValidVideoRegion(frameSize, pictureRect, displaySize)) { return NS_ERROR_FAILURE; } // Video track's frame sizes will not overflow. Activate the video track. mHasVideo = mInfo.mVideo.mHasVideo = true; mInfo.mVideo.mDisplay = displaySize; mPicture = pictureRect; mInitialFrame = frameSize; VideoFrameContainer* container = mDecoder->GetVideoFrameContainer(); if (container) { container->SetCurrentFrame(gfxIntSize(displaySize.width, displaySize.height), nullptr, mozilla::TimeStamp::Now()); } } if (mOmxDecoder->HasAudio()) { int32_t numChannels, sampleRate; mOmxDecoder->GetAudioParameters(&numChannels, &sampleRate); mHasAudio = mInfo.mAudio.mHasAudio = true; mInfo.mAudio.mChannels = numChannels; mInfo.mAudio.mRate = sampleRate; } *aInfo = mInfo; return NS_OK; }
void SlideEffect::paintScreen( int mask, QRegion region, ScreenPaintData& data ) { if( mTimeLine.value() == 0 ) { effects->paintScreen( mask, region, data ); return; } /* Transformations are done by remembering starting position of the change and the progress of it, the destination is computed from the current desktop. Positions of desktops are done using their topleft corner. */ QPoint destPos = desktopRect( effects->currentDesktop() ).topLeft(); QPoint diffPos = destPos - slide_start_pos; int w = 0; int h = 0; if( effects->optionRollOverDesktops()) { w = effects->workspaceWidth(); h = effects->workspaceHeight(); // wrap around if shorter if( diffPos.x() > 0 && diffPos.x() > w / 2 ) diffPos.setX( diffPos.x() - w ); if( diffPos.x() < 0 && abs( diffPos.x()) > w / 2 ) diffPos.setX( diffPos.x() + w ); if( diffPos.y() > 0 && diffPos.y() > h / 2 ) diffPos.setY( diffPos.y() - h ); if( diffPos.y() < 0 && abs( diffPos.y()) > h / 2 ) diffPos.setY( diffPos.y() + h ); } QPoint currentPos = slide_start_pos + mTimeLine.value() * diffPos; QSize displaySize( displayWidth(), displayHeight()); QRegion currentRegion = QRect( currentPos, displaySize ); if( effects->optionRollOverDesktops()) { currentRegion |= ( currentRegion & QRect( -w, 0, w, h )).translated( w, 0 ); currentRegion |= ( currentRegion & QRect( 0, -h, w, h )).translated( 0, h ); currentRegion |= ( currentRegion & QRect( w, 0, w, h )).translated( -w, 0 ); currentRegion |= ( currentRegion & QRect( 0, h, w, h )).translated( 0, -h ); } bool do_sticky = true; for( int desktop = 1; desktop <= effects->numberOfDesktops(); ++desktop ) { QRect rect = desktopRect( desktop ); if( currentRegion.contains( rect )) // part of the desktop needs painting { painting_desktop = desktop; slide_painting_sticky = do_sticky; slide_painting_diff = rect.topLeft() - currentPos; if( effects->optionRollOverDesktops()) { if( slide_painting_diff.x() > displayWidth()) slide_painting_diff.setX( slide_painting_diff.x() - w ); if( slide_painting_diff.x() < -displayWidth()) slide_painting_diff.setX( slide_painting_diff.x() + w ); if( slide_painting_diff.y() > displayHeight()) slide_painting_diff.setY( slide_painting_diff.y() - h ); if( slide_painting_diff.y() < -displayHeight()) slide_painting_diff.setY( slide_painting_diff.y() + h ); } do_sticky = false; // paint on-all-desktop windows only once ScreenPaintData d = data; d.xTranslate += slide_painting_diff.x(); d.yTranslate += slide_painting_diff.y(); // TODO mask parts that are not visible? effects->paintScreen( mask, region, d ); } } }
bool CGUIDriverOgre::Init( GUI::TWindowContextPtr windowContext ) { if ( !m_ceGuiInitialized ) { try { Ogre::RenderTarget* renderTarget = nullptr; CORE::CString renderTargetPtrStr = windowContext->GetProperty( "Ogre::RenderTarget" ); if ( !renderTargetPtrStr.IsNULLOrEmpty() ) { renderTarget = static_cast< Ogre::RenderTarget* >( CORE::StringToPointer( renderTargetPtrStr ) ); if ( NULL == renderTarget ) return false; } Ogre::SceneManager* sceneManager = nullptr; CORE::CString sceneManagerPtrStr = windowContext->GetProperty( "Ogre::SceneManager" ); if ( !sceneManagerPtrStr.IsNULLOrEmpty() ) { sceneManager = static_cast< Ogre::SceneManager* >( CORE::StringToPointer( sceneManagerPtrStr ) ); if ( NULL == sceneManager ) return false; } // Auto-create a viewport here if none exists yet unsigned short viewportCount = renderTarget->getNumViewports(); if ( 0 == viewportCount ) { Ogre::Camera* camera = sceneManager->createCamera( "CEGUI" ); camera->setPosition( Ogre::Vector3( 0, 0, 500 ) ); camera->lookAt( Ogre::Vector3( 0, 0, -300 ) ); camera->setNearClipDistance( 5 ); // Create a viewport covering whole window Ogre::Viewport* viewport = renderTarget->addViewport( camera ); viewport->setBackgroundColour( Ogre::ColourValue( 0.0f, 0.0f, 0.0f, 0.0f ) ); viewport->setOverlaysEnabled( true ); // Update the camera aspect ratio to that of the viewport camera->setAspectRatio( Ogre::Real( viewport->getActualWidth() ) / Ogre::Real( viewport->getActualHeight() ) ); } CEGUI::Sizef displaySize( (float) windowContext->GetWidth(), (float) windowContext->GetHeight() ); m_guiRenderer = &CEGUI::OgreRenderer::create( *renderTarget );// displaySize );//, CEGUI::OpenGLRenderer::TTT_AUTO ); m_guiRenderer->setDefaultRootRenderTarget( *renderTarget ); m_guiSystem = &CEGUI::System::create( *m_guiRenderer, &m_vfsResourceProvider, &m_xmlParserAdapter, m_imageCodecAdapter ); // setup default group for validation schemas CEGUI::XMLParser* parser = m_guiSystem->getXMLParser(); if ( nullptr != parser && parser->isPropertyPresent( "SchemaDefaultResourceGroup" ) ) parser->setProperty( "SchemaDefaultResourceGroup", m_schemasResourceGroup ); // Load the fonts CEGUI::FontManager::getSingleton().createAll( m_defaultFont, CEGUI::Font::getDefaultResourceGroup() ); // Load the scheme try { CEGUI::SchemeManager::getSingleton().createFromFile( "Generic.scheme" ); } catch ( CEGUI::Exception& e ) { CORE::CString info = e.getMessage() + " - at - " + e.getFileName() + ":" + e.getFunctionName() + ":" + CORE::UInt32ToString( e.getLine() ).STL_String(); GUCEF_EXCEPTION_LOG( CORE::LOGLEVEL_IMPORTANT, "Unhandled exception during CEGUI initialization: " + info ); } CEGUI::SchemeManager::getSingleton().createFromFile( m_schemeToUse ); // Set the defaults CEGUI::System::getSingleton().getDefaultGUIContext().setDefaultFont( m_defaultFont ); CEGUI::System::getSingleton().getDefaultGUIContext().getMouseCursor().setDefaultImage( m_defaultCursorImage ); CEGUI::Window* rootWindow = CEGUI::WindowManager::getSingleton().createWindow( "DefaultWindow", "root" ); CEGUI::System::getSingleton().getDefaultGUIContext().setRootWindow( rootWindow ); // clearing this queue actually makes sure it's created(!) CEGUI::System::getSingleton().getDefaultGUIContext().clearGeometry( CEGUI::RQ_OVERLAY ); m_ceGuiInitialized = true; } catch ( CEGUI::Exception& e ) { CORE::CString info = e.getMessage() + " - at - " + e.getFileName() + ":" + e.getFunctionName() + ":" + CORE::UInt32ToString( e.getLine() ).STL_String(); GUCEF_EXCEPTION_LOG( CORE::LOGLEVEL_IMPORTANT, "Unhandled exception during CEGUI initialization: " + info ); m_ceGuiInitialized = false; } } return m_ceGuiInitialized; }
int main(int argc, char **argv) { if (argc > 3) { std::cout << "Only the path of a SVO or a InitParams file can be passed in arg." << std::endl; return -1; } // Quick check input arguments bool readSVO = false; std::string SVOName; bool loadParams = false; std::string ParamsName; if (argc > 1) { std::string _arg; for (int i = 1; i < argc; i++) { _arg = argv[i]; if (_arg.find(".svo") != std::string::npos) { // If a SVO is given we save its name readSVO = true; SVOName = _arg; } if (_arg.find(".ZEDinitParam") != std::string::npos) { // If a parameter file is given we save its name loadParams = true; ParamsName = _arg; } } } sl::zed::Camera* zed; if (!readSVO) // Live Mode zed = new sl::zed::Camera(sl::zed::HD720); else // SVO playback mode zed = new sl::zed::Camera(SVOName); // Define a struct of parameters for the initialization sl::zed::InitParams params; if (loadParams) // A parameters file was given in argument, we load it params.load(ParamsName); // Enables verbosity in the console params.verbose = true; sl::zed::ERRCODE err = zed->init(params); std::cout << "Error code : " << sl::zed::errcode2str(err) << std::endl; if (err != sl::zed::SUCCESS) { // Exit if an error occurred delete zed; return 1; } // Save the initialization parameters // The file can be used later in any zed based application params.save("MyParam"); char key = ' '; int viewID = 0; int confidenceThres = 100; bool displayDisp = true; bool displayConfidenceMap = false; int width = zed->getImageSize().width; int height = zed->getImageSize().height; cv::Mat disp(height, width, CV_8UC4); cv::Mat anaglyph(height, width, CV_8UC4); cv::Mat confidencemap(height, width, CV_8UC4); cv::Size displaySize(720, 404); cv::Mat dispDisplay(displaySize, CV_8UC4); cv::Mat anaglyphDisplay(displaySize, CV_8UC4); cv::Mat confidencemapDisplay(displaySize, CV_8UC4); sl::zed::SENSING_MODE dm_type = sl::zed::STANDARD; // Mouse callback initialization sl::zed::Mat depth; zed->grab(dm_type); depth = zed->retrieveMeasure(sl::zed::MEASURE::DEPTH); // Get the pointer // Set the structure mouseStruct._image = cv::Size(width, height); mouseStruct._resize = displaySize; mouseStruct.data = (float*) depth.data; mouseStruct.step = depth.step; mouseStruct.name = "DEPTH"; mouseStruct.unit = unit2str(params.unit); // The depth is limited to 20 METERS, as defined in zed::init() zed->setDepthClampValue(10000); // Create OpenCV Windows // NOTE: You may encounter an issue with OpenGL support, to solve it either // use the default rendering by removing ' | cv::WINDOW_OPENGL' from the flags // or recompile OpenCV with OpenGL support (you may also need the gtk OpenGL Extension // on Linux, provided by the packages libgtkglext1 libgtkglext1-dev) cv::namedWindow(mouseStruct.name, cv::WINDOW_AUTOSIZE | cv::WINDOW_OPENGL); cv::setMouseCallback(mouseStruct.name, onMouseCallback, (void*) &mouseStruct); cv::namedWindow("VIEW", cv::WINDOW_AUTOSIZE | cv::WINDOW_OPENGL); std::cout << "Press 'q' to exit" << std::endl; // Jetson only. Execute the calling thread on core 2 sl::zed::Camera::sticktoCPUCore(2); sl::zed::ZED_SELF_CALIBRATION_STATUS old_self_calibration_status = sl::zed::SELF_CALIBRATION_NOT_CALLED; // Loop until 'q' is pressed while (key != 'q') { // Disparity Map filtering zed->setConfidenceThreshold(confidenceThres); // Get frames and launch the computation bool res = zed->grab(dm_type); if (!res) { if (old_self_calibration_status != zed->getSelfCalibrationStatus()) { std::cout << "Self Calibration Status : " << sl::zed::statuscode2str(zed->getSelfCalibrationStatus()) << std::endl; old_self_calibration_status = zed->getSelfCalibrationStatus(); } depth = zed->retrieveMeasure(sl::zed::MEASURE::DEPTH); // Get the pointer // The following is the best way to retrieve a disparity map / image / confidence map in OpenCV Mat. // If the buffer is not duplicated, it will be replaced by a next retrieve (retrieveImage, normalizeMeasure, getView...) // Disparity, depth, confidence are 32F buffer by default and 8UC4 buffer in normalized format (displayable grayscale) // -- The next part is about displaying the data -- // Normalize the disparity / depth map in order to use the full color range of gray level image if (displayDisp) slMat2cvMat(zed->normalizeMeasure(sl::zed::MEASURE::DISPARITY)).copyTo(disp); else slMat2cvMat(zed->normalizeMeasure(sl::zed::MEASURE::DEPTH)).copyTo(disp); // To get the depth at a given position, click on the disparity / depth map image cv::resize(disp, dispDisplay, displaySize); imshow(mouseStruct.name, dispDisplay); if (displayConfidenceMap) { slMat2cvMat(zed->normalizeMeasure(sl::zed::MEASURE::CONFIDENCE)).copyTo(confidencemap); cv::resize(confidencemap, confidencemapDisplay, displaySize); imshow("confidence", confidencemapDisplay); } // 'viewID' can be 'SIDE mode' or 'VIEW mode' if (viewID >= sl::zed::LEFT && viewID < sl::zed::LAST_SIDE) slMat2cvMat(zed->retrieveImage(static_cast<sl::zed::SIDE> (viewID))).copyTo(anaglyph); else slMat2cvMat(zed->getView(static_cast<sl::zed::VIEW_MODE> (viewID - (int) sl::zed::LAST_SIDE))).copyTo(anaglyph); cv::resize(anaglyph, anaglyphDisplay, displaySize); imshow("VIEW", anaglyphDisplay); key = cv::waitKey(5); // Keyboard shortcuts switch (key) { case 'b': if (confidenceThres >= 10) confidenceThres -= 10; break; case 'n': if (confidenceThres <= 90) confidenceThres += 10; break; // From 'SIDE' enum case '0': // Left viewID = 0; std::cout << "Current View switched to Left (rectified/aligned)" << std::endl; break; case '1': // Right viewID = 1; std::cout << "Current View switched to Right (rectified/aligned)" << std::endl; break; // From 'VIEW' enum case '2': // Side by Side viewID = 10; std::cout << "Current View switched to Side by Side mode" << std::endl; break; case '3': // Overlay viewID = 11; std::cout << "Current View switched to Overlay mode" << std::endl; break; case '4': // Difference viewID = 9; std::cout << "Current View switched to Difference mode" << std::endl; break; case '5': // Anaglyph viewID = 8; std::cout << "Current View switched to Anaglyph mode" << std::endl; break; case 'c': displayConfidenceMap = !displayConfidenceMap; break; case 's': dm_type = (dm_type == sl::zed::SENSING_MODE::STANDARD) ? sl::zed::SENSING_MODE::FILL : sl::zed::SENSING_MODE::STANDARD; std::cout << "SENSING_MODE " << sensing_mode2str(dm_type) << std::endl; break; case 'd': displayDisp = !displayDisp; break; } } else key = cv::waitKey(5); } delete zed; return 0; }