nsresult VP8TrackEncoder::PrepareRawFrame(VideoChunk &aChunk) { nsRefPtr<Image> img; if (aChunk.mFrame.GetForceBlack() || aChunk.IsNull()) { if (!mMuteFrame) { mMuteFrame = VideoFrame::CreateBlackImage(gfxIntSize(mFrameWidth, mFrameHeight)); MOZ_ASSERT(mMuteFrame); } img = mMuteFrame; } else { img = aChunk.mFrame.GetImage(); } ImageFormat format = img->GetFormat(); if (format != ImageFormat::PLANAR_YCBCR) { VP8LOG("Unsupported video format\n"); return NS_ERROR_FAILURE; } // Cast away constness b/c some of the accessors are non-const PlanarYCbCrImage* yuv = const_cast<PlanarYCbCrImage *>(static_cast<const PlanarYCbCrImage *>(img.get())); // Big-time assumption here that this is all contiguous data coming // from getUserMedia or other sources. MOZ_ASSERT(yuv); if (!yuv->IsValid()) { NS_WARNING("PlanarYCbCrImage is not valid"); return NS_ERROR_FAILURE; } const PlanarYCbCrImage::Data *data = yuv->GetData(); if (isYUV420(data) && !data->mCbSkip) { // 420 planar mVPXImageWrapper->planes[VPX_PLANE_Y] = data->mYChannel; mVPXImageWrapper->planes[VPX_PLANE_U] = data->mCbChannel; mVPXImageWrapper->planes[VPX_PLANE_V] = data->mCrChannel; mVPXImageWrapper->stride[VPX_PLANE_Y] = data->mYStride; mVPXImageWrapper->stride[VPX_PLANE_U] = data->mCbCrStride; mVPXImageWrapper->stride[VPX_PLANE_V] = data->mCbCrStride; } else { uint32_t yPlaneSize = mFrameWidth * mFrameHeight; uint32_t halfWidth = (mFrameWidth + 1) / 2; uint32_t halfHeight = (mFrameHeight + 1) / 2; uint32_t uvPlaneSize = halfWidth * halfHeight; if (mI420Frame.IsEmpty()) { mI420Frame.SetLength(yPlaneSize + uvPlaneSize * 2); } MOZ_ASSERT(mI420Frame.Length() >= (yPlaneSize + uvPlaneSize * 2)); uint8_t *y = mI420Frame.Elements(); uint8_t *cb = mI420Frame.Elements() + yPlaneSize; uint8_t *cr = mI420Frame.Elements() + yPlaneSize + uvPlaneSize; if (isYUV420(data) && data->mCbSkip) { // If mCbSkip is set, we assume it's nv12 or nv21. if (data->mCbChannel < data->mCrChannel) { // nv12 libyuv::NV12ToI420(data->mYChannel, data->mYStride, data->mCbChannel, data->mCbCrStride, y, mFrameWidth, cb, halfWidth, cr, halfWidth, mFrameWidth, mFrameHeight); } else { // nv21 libyuv::NV21ToI420(data->mYChannel, data->mYStride, data->mCrChannel, data->mCbCrStride, y, mFrameWidth, cb, halfWidth, cr, halfWidth, mFrameWidth, mFrameHeight); } } else if (isYUV444(data) && !data->mCbSkip) { libyuv::I444ToI420(data->mYChannel, data->mYStride, data->mCbChannel, data->mCbCrStride, data->mCrChannel, data->mCbCrStride, y, mFrameWidth, cb, halfWidth, cr, halfWidth, mFrameWidth, mFrameHeight); } else if (isYUV422(data) && !data->mCbSkip) { libyuv::I422ToI420(data->mYChannel, data->mYStride, data->mCbChannel, data->mCbCrStride, data->mCrChannel, data->mCbCrStride, y, mFrameWidth, cb, halfWidth, cr, halfWidth, mFrameWidth, mFrameHeight); } else { VP8LOG("Unsupported planar format\n"); return NS_ERROR_NOT_IMPLEMENTED; } mVPXImageWrapper->planes[VPX_PLANE_Y] = y; mVPXImageWrapper->planes[VPX_PLANE_U] = cb; mVPXImageWrapper->planes[VPX_PLANE_V] = cr; mVPXImageWrapper->stride[VPX_PLANE_Y] = mFrameWidth; mVPXImageWrapper->stride[VPX_PLANE_U] = halfWidth; mVPXImageWrapper->stride[VPX_PLANE_V] = halfWidth; } return NS_OK; }
bool DeprecatedImageClientSingle::UpdateImage(ImageContainer* aContainer, uint32_t aContentFlags) { AutoLockImage autoLock(aContainer); Image *image = autoLock.GetImage(); if (!image) { return false; } if (mLastPaintedImageSerial == image->GetSerial()) { return true; } if (image->GetFormat() == PLANAR_YCBCR && EnsureDeprecatedTextureClient(TEXTURE_YCBCR)) { PlanarYCbCrImage* ycbcr = static_cast<PlanarYCbCrImage*>(image); if (ycbcr->AsDeprecatedSharedPlanarYCbCrImage()) { AutoLockDeprecatedTextureClient lock(mDeprecatedTextureClient); SurfaceDescriptor sd; if (!ycbcr->AsDeprecatedSharedPlanarYCbCrImage()->ToSurfaceDescriptor(sd)) { return false; } if (IsSurfaceDescriptorValid(*lock.GetSurfaceDescriptor())) { GetForwarder()->DestroySharedSurface(lock.GetSurfaceDescriptor()); } *lock.GetSurfaceDescriptor() = sd; } else { AutoLockYCbCrClient clientLock(mDeprecatedTextureClient); if (!clientLock.Update(ycbcr)) { NS_WARNING("failed to update DeprecatedTextureClient (YCbCr)"); return false; } } } else if (image->GetFormat() == SHARED_TEXTURE && EnsureDeprecatedTextureClient(TEXTURE_SHARED_GL_EXTERNAL)) { SharedTextureImage* sharedImage = static_cast<SharedTextureImage*>(image); const SharedTextureImage::Data *data = sharedImage->GetData(); SharedTextureDescriptor texture(data->mShareType, data->mHandle, data->mSize, data->mInverted); mDeprecatedTextureClient->SetDescriptor(SurfaceDescriptor(texture)); } else if (image->GetFormat() == SHARED_RGB && EnsureDeprecatedTextureClient(TEXTURE_SHMEM)) { nsIntRect rect(0, 0, image->GetSize().width, image->GetSize().height); UpdatePictureRect(rect); AutoLockDeprecatedTextureClient lock(mDeprecatedTextureClient); SurfaceDescriptor desc; if (!static_cast<DeprecatedSharedRGBImage*>(image)->ToSurfaceDescriptor(desc)) { return false; } mDeprecatedTextureClient->SetDescriptor(desc); #ifdef MOZ_WIDGET_GONK } else if (image->GetFormat() == GONK_IO_SURFACE && EnsureDeprecatedTextureClient(TEXTURE_SHARED_GL_EXTERNAL)) { nsIntRect rect(0, 0, image->GetSize().width, image->GetSize().height); UpdatePictureRect(rect); AutoLockDeprecatedTextureClient lock(mDeprecatedTextureClient); SurfaceDescriptor desc = static_cast<GonkIOSurfaceImage*>(image)->GetSurfaceDescriptor(); if (!IsSurfaceDescriptorValid(desc)) { return false; } mDeprecatedTextureClient->SetDescriptor(desc); } else if (image->GetFormat() == GRALLOC_PLANAR_YCBCR) { EnsureDeprecatedTextureClient(TEXTURE_SHARED_GL_EXTERNAL); nsIntRect rect(0, 0, image->GetSize().width, image->GetSize().height); UpdatePictureRect(rect); AutoLockDeprecatedTextureClient lock(mDeprecatedTextureClient); SurfaceDescriptor desc = static_cast<GrallocPlanarYCbCrImage*>(image)->GetSurfaceDescriptor(); if (!IsSurfaceDescriptorValid(desc)) { return false; } mDeprecatedTextureClient->SetDescriptor(desc); #endif } else { nsRefPtr<gfxASurface> surface = image->GetAsSurface(); MOZ_ASSERT(surface); EnsureDeprecatedTextureClient(TEXTURE_SHMEM); MOZ_ASSERT(mDeprecatedTextureClient, "Failed to create texture client"); AutoLockShmemClient clientLock(mDeprecatedTextureClient); if (!clientLock.Update(image, aContentFlags, surface)) { NS_WARNING("failed to update DeprecatedTextureClient"); return false; } } Updated(); if (image->GetFormat() == PLANAR_YCBCR) { PlanarYCbCrImage* ycbcr = static_cast<PlanarYCbCrImage*>(image); UpdatePictureRect(ycbcr->GetData()->GetPictureRect()); } mLastPaintedImageSerial = image->GetSerial(); aContainer->NotifyPaintedImage(image); return true; }
void ImageLayerD3D10::RenderLayer() { ImageContainer *container = GetContainer(); if (!container) { return; } AutoLockImage autoLock(container); Image *image = autoLock.GetImage(); if (!image) { return; } gfxIntSize size = mScaleMode == SCALE_NONE ? image->GetSize() : mScaleToSize; SetEffectTransformAndOpacity(); ID3D10EffectTechnique *technique; if (image->GetFormat() == Image::CAIRO_SURFACE || image->GetFormat() == Image::REMOTE_IMAGE_BITMAP) { bool hasAlpha = false; if (image->GetFormat() == Image::REMOTE_IMAGE_BITMAP) { RemoteBitmapImage *remoteImage = static_cast<RemoteBitmapImage*>(image); if (!image->GetBackendData(LayerManager::LAYERS_D3D10)) { nsAutoPtr<TextureD3D10BackendData> dat = new TextureD3D10BackendData(); dat->mTexture = DataToTexture(device(), remoteImage->mData, remoteImage->mStride, remoteImage->mSize); if (dat->mTexture) { device()->CreateShaderResourceView(dat->mTexture, NULL, getter_AddRefs(dat->mSRView)); image->SetBackendData(LayerManager::LAYERS_D3D10, dat.forget()); } } hasAlpha = remoteImage->mFormat == RemoteImageData::BGRA32; } else { CairoImage *cairoImage = static_cast<CairoImage*>(image); if (!cairoImage->mSurface) { return; } if (!image->GetBackendData(LayerManager::LAYERS_D3D10)) { nsAutoPtr<TextureD3D10BackendData> dat = new TextureD3D10BackendData(); dat->mTexture = SurfaceToTexture(device(), cairoImage->mSurface, cairoImage->mSize); if (dat->mTexture) { device()->CreateShaderResourceView(dat->mTexture, NULL, getter_AddRefs(dat->mSRView)); image->SetBackendData(LayerManager::LAYERS_D3D10, dat.forget()); } } hasAlpha = cairoImage->mSurface->GetContentType() == gfxASurface::CONTENT_COLOR_ALPHA; } TextureD3D10BackendData *data = static_cast<TextureD3D10BackendData*>(image->GetBackendData(LayerManager::LAYERS_D3D10)); if (!data) { return; } nsRefPtr<ID3D10Device> dev; data->mTexture->GetDevice(getter_AddRefs(dev)); if (dev != device()) { return; } if (hasAlpha) { if (mFilter == gfxPattern::FILTER_NEAREST) { technique = effect()->GetTechniqueByName("RenderRGBALayerPremulPoint"); } else { technique = effect()->GetTechniqueByName("RenderRGBALayerPremul"); } } else { if (mFilter == gfxPattern::FILTER_NEAREST) { technique = effect()->GetTechniqueByName("RenderRGBLayerPremulPoint"); } else { technique = effect()->GetTechniqueByName("RenderRGBLayerPremul"); } } effect()->GetVariableByName("tRGB")->AsShaderResource()->SetResource(data->mSRView); effect()->GetVariableByName("vLayerQuad")->AsVector()->SetFloatVector( ShaderConstantRectD3D10( (float)0, (float)0, (float)size.width, (float)size.height) ); } else if (image->GetFormat() == Image::PLANAR_YCBCR) { PlanarYCbCrImage *yuvImage = static_cast<PlanarYCbCrImage*>(image); if (!yuvImage->mBufferSize) { return; } if (!yuvImage->GetBackendData(LayerManager::LAYERS_D3D10)) { AllocateTexturesYCbCr(yuvImage); } PlanarYCbCrD3D10BackendData *data = static_cast<PlanarYCbCrD3D10BackendData*>(yuvImage->GetBackendData(LayerManager::LAYERS_D3D10)); if (!data) { return; } nsRefPtr<ID3D10Device> dev; data->mYTexture->GetDevice(getter_AddRefs(dev)); if (dev != device()) { return; } // TODO: At some point we should try to deal with mFilter here, you don't // really want to use point filtering in the case of NEAREST, since that // would also use point filtering for Chroma upsampling. Where most likely // the user would only want point filtering for final RGB image upsampling. technique = effect()->GetTechniqueByName("RenderYCbCrLayer"); effect()->GetVariableByName("tY")->AsShaderResource()->SetResource(data->mYView); effect()->GetVariableByName("tCb")->AsShaderResource()->SetResource(data->mCbView); effect()->GetVariableByName("tCr")->AsShaderResource()->SetResource(data->mCrView); /* * Send 3d control data and metadata to NV3DVUtils */ if (GetNv3DVUtils()) { Nv_Stereo_Mode mode; switch (yuvImage->mData.mStereoMode) { case STEREO_MODE_LEFT_RIGHT: mode = NV_STEREO_MODE_LEFT_RIGHT; break; case STEREO_MODE_RIGHT_LEFT: mode = NV_STEREO_MODE_RIGHT_LEFT; break; case STEREO_MODE_BOTTOM_TOP: mode = NV_STEREO_MODE_BOTTOM_TOP; break; case STEREO_MODE_TOP_BOTTOM: mode = NV_STEREO_MODE_TOP_BOTTOM; break; case STEREO_MODE_MONO: mode = NV_STEREO_MODE_MONO; break; } // Send control data even in mono case so driver knows to leave stereo mode. GetNv3DVUtils()->SendNv3DVControl(mode, true, FIREFOX_3DV_APP_HANDLE); if (yuvImage->mData.mStereoMode != STEREO_MODE_MONO) { // Dst resource is optional GetNv3DVUtils()->SendNv3DVMetaData((unsigned int)yuvImage->mSize.width, (unsigned int)yuvImage->mSize.height, (HANDLE)(data->mYTexture), (HANDLE)(NULL)); } } effect()->GetVariableByName("vLayerQuad")->AsVector()->SetFloatVector( ShaderConstantRectD3D10( (float)0, (float)0, (float)size.width, (float)size.height) ); effect()->GetVariableByName("vTextureCoords")->AsVector()->SetFloatVector( ShaderConstantRectD3D10( (float)yuvImage->mData.mPicX / yuvImage->mData.mYSize.width, (float)yuvImage->mData.mPicY / yuvImage->mData.mYSize.height, (float)yuvImage->mData.mPicSize.width / yuvImage->mData.mYSize.width, (float)yuvImage->mData.mPicSize.height / yuvImage->mData.mYSize.height) ); } bool resetTexCoords = image->GetFormat() == Image::PLANAR_YCBCR; image = nsnull; autoLock.Unlock(); technique->GetPassByIndex(0)->Apply(0); device()->Draw(4, 0); if (resetTexCoords) { effect()->GetVariableByName("vTextureCoords")->AsVector()-> SetFloatVector(ShaderConstantRectD3D10(0, 0, 1.0f, 1.0f)); } GetContainer()->NotifyPaintedImage(image); }
void ImageLayerD3D10::RenderLayer() { ImageContainer *container = GetContainer(); if (!container) { return; } AutoLockImage autoLock(container); Image *image = autoLock.GetImage(); if (!image) { return; } gfxIntSize size = image->GetSize(); SetEffectTransformAndOpacity(); ID3D10EffectTechnique *technique; nsRefPtr<IDXGIKeyedMutex> keyedMutex; if (image->GetFormat() == ImageFormat::CAIRO_SURFACE || image->GetFormat() == ImageFormat::REMOTE_IMAGE_BITMAP || image->GetFormat() == ImageFormat::REMOTE_IMAGE_DXGI_TEXTURE || image->GetFormat() == ImageFormat::D3D9_RGB32_TEXTURE) { NS_ASSERTION(image->GetFormat() != ImageFormat::CAIRO_SURFACE || !static_cast<CairoImage*>(image)->mSurface || static_cast<CairoImage*>(image)->mSurface->GetContentType() != gfxASurface::CONTENT_ALPHA, "Image layer has alpha image"); bool hasAlpha = false; nsRefPtr<ID3D10ShaderResourceView> srView = GetImageSRView(image, hasAlpha, getter_AddRefs(keyedMutex)); if (!srView) { return; } uint8_t shaderFlags = SHADER_PREMUL; shaderFlags |= LoadMaskTexture(); shaderFlags |= hasAlpha ? SHADER_RGBA : SHADER_RGB; shaderFlags |= mFilter == gfxPattern::FILTER_NEAREST ? SHADER_POINT : SHADER_LINEAR; technique = SelectShader(shaderFlags); effect()->GetVariableByName("tRGB")->AsShaderResource()->SetResource(srView); effect()->GetVariableByName("vLayerQuad")->AsVector()->SetFloatVector( ShaderConstantRectD3D10( (float)0, (float)0, (float)size.width, (float)size.height) ); } else if (image->GetFormat() == ImageFormat::PLANAR_YCBCR) { PlanarYCbCrImage *yuvImage = static_cast<PlanarYCbCrImage*>(image); if (!yuvImage->IsValid()) { return; } if (!yuvImage->GetBackendData(mozilla::layers::LAYERS_D3D10)) { AllocateTexturesYCbCr(yuvImage); } PlanarYCbCrD3D10BackendData *data = static_cast<PlanarYCbCrD3D10BackendData*>(yuvImage->GetBackendData(mozilla::layers::LAYERS_D3D10)); if (!data) { return; } nsRefPtr<ID3D10Device> dev; data->mYTexture->GetDevice(getter_AddRefs(dev)); if (dev != device()) { return; } // TODO: At some point we should try to deal with mFilter here, you don't // really want to use point filtering in the case of NEAREST, since that // would also use point filtering for Chroma upsampling. Where most likely // the user would only want point filtering for final RGB image upsampling. technique = SelectShader(SHADER_YCBCR | LoadMaskTexture()); effect()->GetVariableByName("tY")->AsShaderResource()->SetResource(data->mYView); effect()->GetVariableByName("tCb")->AsShaderResource()->SetResource(data->mCbView); effect()->GetVariableByName("tCr")->AsShaderResource()->SetResource(data->mCrView); effect()->GetVariableByName("vLayerQuad")->AsVector()->SetFloatVector( ShaderConstantRectD3D10( (float)0, (float)0, (float)size.width, (float)size.height) ); effect()->GetVariableByName("vTextureCoords")->AsVector()->SetFloatVector( ShaderConstantRectD3D10( (float)yuvImage->GetData()->mPicX / yuvImage->GetData()->mYSize.width, (float)yuvImage->GetData()->mPicY / yuvImage->GetData()->mYSize.height, (float)yuvImage->GetData()->mPicSize.width / yuvImage->GetData()->mYSize.width, (float)yuvImage->GetData()->mPicSize.height / yuvImage->GetData()->mYSize.height) ); } bool resetTexCoords = image->GetFormat() == ImageFormat::PLANAR_YCBCR; image = nullptr; autoLock.Unlock(); technique->GetPassByIndex(0)->Apply(0); device()->Draw(4, 0); if (keyedMutex) { keyedMutex->ReleaseSync(0); } if (resetTexCoords) { effect()->GetVariableByName("vTextureCoords")->AsVector()-> SetFloatVector(ShaderConstantRectD3D10(0, 0, 1.0f, 1.0f)); } GetContainer()->NotifyPaintedImage(image); }
bool ImageClientSingle::UpdateImage(ImageContainer* aContainer, uint32_t aContentFlags) { AutoTArray<ImageContainer::OwningImage,4> images; uint32_t generationCounter; aContainer->GetCurrentImages(&images, &generationCounter); if (mLastUpdateGenerationCounter == generationCounter) { return true; } mLastUpdateGenerationCounter = generationCounter; for (int32_t i = images.Length() - 1; i >= 0; --i) { if (!images[i].mImage->IsValid()) { // Don't try to update to an invalid image. images.RemoveElementAt(i); } } if (images.IsEmpty()) { // This can happen if a ClearAllImages raced with SetCurrentImages from // another thread and ClearImagesFromImageBridge ran after the // SetCurrentImages call but before UpdateImageClientNow. // This can also happen if all images in the list are invalid. // We return true because the caller would attempt to recreate the // ImageClient otherwise, and that isn't going to help. return true; } nsTArray<Buffer> newBuffers; AutoTArray<CompositableForwarder::TimedTextureClient,4> textures; for (auto& img : images) { Image* image = img.mImage; #ifdef MOZ_WIDGET_GONK if (image->GetFormat() == ImageFormat::OVERLAY_IMAGE) { OverlayImage* overlayImage = static_cast<OverlayImage*>(image); OverlaySource source; if (overlayImage->GetSidebandStream().IsValid()) { // Duplicate GonkNativeHandle::NhObj for ipc, // since ParamTraits<GonkNativeHandle>::Write() absorbs native_handle_t. RefPtr<GonkNativeHandle::NhObj> nhObj = overlayImage->GetSidebandStream().GetDupNhObj(); GonkNativeHandle handle(nhObj); if (!handle.IsValid()) { gfxWarning() << "ImageClientSingle::UpdateImage failed in GetDupNhObj"; return false; } source.handle() = OverlayHandle(handle); } else { source.handle() = OverlayHandle(overlayImage->GetOverlayId()); } source.size() = overlayImage->GetSize(); GetForwarder()->UseOverlaySource(this, source, image->GetPictureRect()); continue; } #endif RefPtr<TextureClient> texture = image->GetTextureClient(this); const bool hasTextureClient = !!texture; for (int32_t i = mBuffers.Length() - 1; i >= 0; --i) { if (mBuffers[i].mImageSerial == image->GetSerial()) { if (hasTextureClient) { MOZ_ASSERT(image->GetTextureClient(this) == mBuffers[i].mTextureClient); } else { texture = mBuffers[i].mTextureClient; } // Remove this element from mBuffers so mBuffers only contains // images that aren't present in 'images' mBuffers.RemoveElementAt(i); } } if (!texture) { // Slow path, we should not be hitting it very often and if we do it means // we are using an Image class that is not backed by textureClient and we // should fix it. if (image->GetFormat() == ImageFormat::PLANAR_YCBCR) { PlanarYCbCrImage* ycbcr = static_cast<PlanarYCbCrImage*>(image); const PlanarYCbCrData* data = ycbcr->GetData(); if (!data) { return false; } texture = TextureClient::CreateForYCbCr(GetForwarder(), data->mYSize, data->mCbCrSize, data->mStereoMode, TextureFlags::DEFAULT | mTextureFlags ); if (!texture) { return false; } TextureClientAutoLock autoLock(texture, OpenMode::OPEN_WRITE_ONLY); if (!autoLock.Succeeded()) { return false; } bool status = UpdateYCbCrTextureClient(texture, *data); MOZ_ASSERT(status); if (!status) { return false; } } else if (image->GetFormat() == ImageFormat::SURFACE_TEXTURE || image->GetFormat() == ImageFormat::EGLIMAGE) { gfx::IntSize size = image->GetSize(); if (image->GetFormat() == ImageFormat::EGLIMAGE) { EGLImageImage* typedImage = image->AsEGLImageImage(); texture = EGLImageTextureData::CreateTextureClient( typedImage, size, GetForwarder(), mTextureFlags); #ifdef MOZ_WIDGET_ANDROID } else if (image->GetFormat() == ImageFormat::SURFACE_TEXTURE) { SurfaceTextureImage* typedImage = image->AsSurfaceTextureImage(); texture = AndroidSurfaceTextureData::CreateTextureClient( typedImage->GetSurfaceTexture(), size, typedImage->GetOriginPos(), GetForwarder(), mTextureFlags ); #endif } else { MOZ_ASSERT(false, "Bad ImageFormat."); } } else { RefPtr<gfx::SourceSurface> surface = image->GetAsSourceSurface(); MOZ_ASSERT(surface); texture = CreateTextureClientForDrawing(surface->GetFormat(), image->GetSize(), BackendSelector::Content, mTextureFlags); if (!texture) { return false; } MOZ_ASSERT(texture->CanExposeDrawTarget()); if (!texture->Lock(OpenMode::OPEN_WRITE_ONLY)) { return false; } { // We must not keep a reference to the DrawTarget after it has been unlocked. DrawTarget* dt = texture->BorrowDrawTarget(); if (!dt) { gfxWarning() << "ImageClientSingle::UpdateImage failed in BorrowDrawTarget"; return false; } MOZ_ASSERT(surface.get()); dt->CopySurface(surface, IntRect(IntPoint(), surface->GetSize()), IntPoint()); } texture->Unlock(); } } if (!texture || !AddTextureClient(texture)) { return false; } CompositableForwarder::TimedTextureClient* t = textures.AppendElement(); t->mTextureClient = texture; t->mTimeStamp = img.mTimeStamp; t->mPictureRect = image->GetPictureRect(); t->mFrameID = img.mFrameID; t->mProducerID = img.mProducerID; Buffer* newBuf = newBuffers.AppendElement(); newBuf->mImageSerial = image->GetSerial(); newBuf->mTextureClient = texture; texture->SyncWithObject(GetForwarder()->GetSyncObject()); } GetForwarder()->UseTextures(this, textures); for (auto& b : mBuffers) { RemoveTexture(b.mTextureClient); } mBuffers.SwapElements(newBuffers); return true; }
bool ImageClientSingle::UpdateImage(ImageContainer* aContainer, uint32_t aContentFlags) { AutoLockImage autoLock(aContainer); Image *image = autoLock.GetImage(); if (!image) { return false; } // Don't try to update to an invalid image. We return true because the caller // would attempt to recreate the ImageClient otherwise, and that isn't going // to help. if (!image->IsValid()) { return true; } if (mLastPaintedImageSerial == image->GetSerial()) { return true; } RefPtr<TextureClient> texture = image->GetTextureClient(this); AutoRemoveTexture autoRemoveTexture(this); if (texture != mFrontBuffer) { autoRemoveTexture.mTexture = mFrontBuffer; mFrontBuffer = nullptr; } if (!texture) { // Slow path, we should not be hitting it very often and if we do it means // we are using an Image class that is not backed by textureClient and we // should fix it. if (image->GetFormat() == ImageFormat::PLANAR_YCBCR) { PlanarYCbCrImage* ycbcr = static_cast<PlanarYCbCrImage*>(image); const PlanarYCbCrData* data = ycbcr->GetData(); if (!data) { return false; } texture = TextureClient::CreateForYCbCr(GetForwarder(), data->mYSize, data->mCbCrSize, data->mStereoMode, TextureFlags::DEFAULT | mTextureFlags ); if (!texture || !texture->Lock(OpenMode::OPEN_WRITE_ONLY)) { return false; } bool status = texture->AsTextureClientYCbCr()->UpdateYCbCr(*data); MOZ_ASSERT(status); texture->Unlock(); if (!status) { return false; } } else if (image->GetFormat() == ImageFormat::SURFACE_TEXTURE || image->GetFormat() == ImageFormat::EGLIMAGE) { gfx::IntSize size = image->GetSize(); if (image->GetFormat() == ImageFormat::EGLIMAGE) { EGLImageImage* typedImage = static_cast<EGLImageImage*>(image); texture = new EGLImageTextureClient(GetForwarder(), mTextureFlags, typedImage, size); #ifdef MOZ_WIDGET_ANDROID } else if (image->GetFormat() == ImageFormat::SURFACE_TEXTURE) { SurfaceTextureImage* typedImage = static_cast<SurfaceTextureImage*>(image); const SurfaceTextureImage::Data* data = typedImage->GetData(); texture = new SurfaceTextureClient(GetForwarder(), mTextureFlags, data->mSurfTex, size, data->mOriginPos); #endif } else { MOZ_ASSERT(false, "Bad ImageFormat."); } } else { RefPtr<gfx::SourceSurface> surface = image->GetAsSourceSurface(); MOZ_ASSERT(surface); texture = CreateTextureClientForDrawing(surface->GetFormat(), image->GetSize(), gfx::BackendType::NONE, mTextureFlags); if (!texture) { return false; } MOZ_ASSERT(texture->CanExposeDrawTarget()); if (!texture->Lock(OpenMode::OPEN_WRITE_ONLY)) { return false; } { // We must not keep a reference to the DrawTarget after it has been unlocked. DrawTarget* dt = texture->BorrowDrawTarget(); if (!dt) { gfxWarning() << "ImageClientSingle::UpdateImage failed in BorrowDrawTarget"; return false; } MOZ_ASSERT(surface.get()); dt->CopySurface(surface, IntRect(IntPoint(), surface->GetSize()), IntPoint()); } texture->Unlock(); } } if (!texture || !AddTextureClient(texture)) { return false; } mFrontBuffer = texture; GetForwarder()->UseTexture(this, texture); UpdatePictureRect(image->GetPictureRect()); mLastPaintedImageSerial = image->GetSerial(); aContainer->NotifyPaintedImage(image); texture->SyncWithObject(GetForwarder()->GetSyncObject()); return true; }
bool ImageClientSingle::UpdateImage(ImageContainer* aContainer, uint32_t aContentFlags) { AutoLockImage autoLock(aContainer); Image *image = autoLock.GetImage(); if (!image) { return false; } if (mLastPaintedImageSerial == image->GetSerial()) { return true; } if (image->GetFormat() == PLANAR_YCBCR) { EnsureTextureClient(TEXTURE_YCBCR); PlanarYCbCrImage* ycbcr = static_cast<PlanarYCbCrImage*>(image); if (ycbcr->AsSharedPlanarYCbCrImage()) { AutoLockTextureClient lock(mTextureClient); SurfaceDescriptor sd; if (!ycbcr->AsSharedPlanarYCbCrImage()->ToSurfaceDescriptor(sd)) { return false; } if (IsSurfaceDescriptorValid(*lock.GetSurfaceDescriptor())) { GetForwarder()->DestroySharedSurface(lock.GetSurfaceDescriptor()); } *lock.GetSurfaceDescriptor() = sd; } else { AutoLockYCbCrClient clientLock(mTextureClient); if (!clientLock.Update(ycbcr)) { NS_WARNING("failed to update TextureClient (YCbCr)"); return false; } } } else if (image->GetFormat() == SHARED_TEXTURE) { EnsureTextureClient(TEXTURE_SHARED_GL_EXTERNAL); SharedTextureImage* sharedImage = static_cast<SharedTextureImage*>(image); const SharedTextureImage::Data *data = sharedImage->GetData(); SharedTextureDescriptor texture(data->mShareType, data->mHandle, data->mSize, data->mInverted); mTextureClient->SetDescriptor(SurfaceDescriptor(texture)); } else if (image->GetFormat() == SHARED_RGB) { EnsureTextureClient(TEXTURE_SHMEM); nsIntRect rect(0, 0, image->GetSize().width, image->GetSize().height); UpdatePictureRect(rect); AutoLockTextureClient lock(mTextureClient); SurfaceDescriptor desc; if (!static_cast<SharedRGBImage*>(image)->ToSurfaceDescriptor(desc)) { return false; } mTextureClient->SetDescriptor(desc); } else { nsRefPtr<gfxASurface> surface = image->GetAsSurface(); MOZ_ASSERT(surface); EnsureTextureClient(TEXTURE_SHMEM); nsRefPtr<gfxPattern> pattern = new gfxPattern(surface); pattern->SetFilter(mFilter); AutoLockShmemClient clientLock(mTextureClient); if (!clientLock.Update(image, aContentFlags, pattern)) { NS_WARNING("failed to update TextureClient"); return false; } } Updated(); if (image->GetFormat() == PLANAR_YCBCR) { PlanarYCbCrImage* ycbcr = static_cast<PlanarYCbCrImage*>(image); UpdatePictureRect(ycbcr->GetData()->GetPictureRect()); } mLastPaintedImageSerial = image->GetSerial(); aContainer->NotifyPaintedImage(image); return true; }
VideoData* VideoData::Create(nsVideoInfo& aInfo, ImageContainer* aContainer, PRInt64 aOffset, PRInt64 aTime, PRInt64 aEndTime, const YCbCrBuffer& aBuffer, bool aKeyframe, PRInt64 aTimecode, nsIntRect aPicture) { if (!aContainer) { return nsnull; } // The following situation should never happen unless there is a bug // in the decoder if (aBuffer.mPlanes[1].mWidth != aBuffer.mPlanes[2].mWidth || aBuffer.mPlanes[1].mHeight != aBuffer.mPlanes[2].mHeight) { NS_ERROR("C planes with different sizes"); return nsnull; } // The following situations could be triggered by invalid input if (aPicture.width <= 0 || aPicture.height <= 0) { NS_WARNING("Empty picture rect"); return nsnull; } if (!ValidatePlane(aBuffer.mPlanes[0]) || !ValidatePlane(aBuffer.mPlanes[1]) || !ValidatePlane(aBuffer.mPlanes[2])) { NS_WARNING("Invalid plane size"); return nsnull; } // Ensure the picture size specified in the headers can be extracted out of // the frame we've been supplied without indexing out of bounds. PRUint32 xLimit; PRUint32 yLimit; if (!AddOverflow32(aPicture.x, aPicture.width, xLimit) || xLimit > aBuffer.mPlanes[0].mStride || !AddOverflow32(aPicture.y, aPicture.height, yLimit) || yLimit > aBuffer.mPlanes[0].mHeight) { // The specified picture dimensions can't be contained inside the video // frame, we'll stomp memory if we try to copy it. Fail. NS_WARNING("Overflowing picture rect"); return nsnull; } nsAutoPtr<VideoData> v(new VideoData(aOffset, aTime, aEndTime, aKeyframe, aTimecode, aInfo.mDisplay)); // Currently our decoder only knows how to output to PLANAR_YCBCR // format. Image::Format format = Image::PLANAR_YCBCR; v->mImage = aContainer->CreateImage(&format, 1); if (!v->mImage) { return nsnull; } NS_ASSERTION(v->mImage->GetFormat() == Image::PLANAR_YCBCR, "Wrong format?"); PlanarYCbCrImage* videoImage = static_cast<PlanarYCbCrImage*>(v->mImage.get()); PlanarYCbCrImage::Data data; data.mYChannel = aBuffer.mPlanes[0].mData; data.mYSize = gfxIntSize(aBuffer.mPlanes[0].mWidth, aBuffer.mPlanes[0].mHeight); data.mYStride = aBuffer.mPlanes[0].mStride; data.mCbChannel = aBuffer.mPlanes[1].mData; data.mCrChannel = aBuffer.mPlanes[2].mData; data.mCbCrSize = gfxIntSize(aBuffer.mPlanes[1].mWidth, aBuffer.mPlanes[1].mHeight); data.mCbCrStride = aBuffer.mPlanes[1].mStride; data.mPicX = aPicture.x; data.mPicY = aPicture.y; data.mPicSize = gfxIntSize(aPicture.width, aPicture.height); data.mStereoMode = aInfo.mStereoMode; videoImage->SetData(data); // Copies buffer return v.forget(); }
void ImageLayerOGL::RenderLayer(int, const nsIntPoint& aOffset) { nsRefPtr<ImageContainer> container = GetContainer(); if (!container || mOGLManager->CompositingDisabled()) return; mOGLManager->MakeCurrent(); AutoLockImage autoLock(container); Image *image = autoLock.GetImage(); if (!image) { return; } NS_ASSERTION(image->GetFormat() != REMOTE_IMAGE_BITMAP, "Remote images aren't handled yet in OGL layers!"); if (image->GetFormat() == PLANAR_YCBCR) { PlanarYCbCrImage *yuvImage = static_cast<PlanarYCbCrImage*>(image); if (!yuvImage->IsValid()) { return; } PlanarYCbCrOGLBackendData *data = static_cast<PlanarYCbCrOGLBackendData*>(yuvImage->GetBackendData(LAYERS_OPENGL)); if (data && data->mTextures->GetGLContext() != gl()) { // If these textures were allocated by another layer manager, // clear them out and re-allocate below. data = nullptr; yuvImage->SetBackendData(LAYERS_OPENGL, nullptr); } if (!data) { AllocateTexturesYCbCr(yuvImage); data = static_cast<PlanarYCbCrOGLBackendData*>(yuvImage->GetBackendData(LAYERS_OPENGL)); } if (!data || data->mTextures->GetGLContext() != gl()) { // XXX - Can this ever happen? If so I need to fix this! return; } gl()->MakeCurrent(); gl()->fActiveTexture(LOCAL_GL_TEXTURE2); gl()->fBindTexture(LOCAL_GL_TEXTURE_2D, data->mTextures[2].GetTextureID()); gl()->ApplyFilterToBoundTexture(mFilter); gl()->fActiveTexture(LOCAL_GL_TEXTURE1); gl()->fBindTexture(LOCAL_GL_TEXTURE_2D, data->mTextures[1].GetTextureID()); gl()->ApplyFilterToBoundTexture(mFilter); gl()->fActiveTexture(LOCAL_GL_TEXTURE0); gl()->fBindTexture(LOCAL_GL_TEXTURE_2D, data->mTextures[0].GetTextureID()); gl()->ApplyFilterToBoundTexture(mFilter); ShaderProgramOGL *program = mOGLManager->GetProgram(YCbCrLayerProgramType, GetMaskLayer()); program->Activate(); program->SetLayerQuadRect(nsIntRect(0, 0, yuvImage->GetSize().width, yuvImage->GetSize().height)); program->SetLayerTransform(GetEffectiveTransform()); program->SetLayerOpacity(GetEffectiveOpacity()); program->SetRenderOffset(aOffset); program->SetYCbCrTextureUnits(0, 1, 2); program->LoadMask(GetMaskLayer()); mOGLManager->BindAndDrawQuadWithTextureRect(program, yuvImage->GetData()->GetPictureRect(), nsIntSize(yuvImage->GetData()->mYSize.width, yuvImage->GetData()->mYSize.height)); // We shouldn't need to do this, but do it anyway just in case // someone else forgets. gl()->fActiveTexture(LOCAL_GL_TEXTURE0); } else if (image->GetFormat() == CAIRO_SURFACE) { CairoImage *cairoImage = static_cast<CairoImage*>(image); if (!cairoImage->mSurface) { return; } NS_ASSERTION(cairoImage->mSurface->GetContentType() != gfxASurface::CONTENT_ALPHA, "Image layer has alpha image"); CairoOGLBackendData *data = static_cast<CairoOGLBackendData*>(cairoImage->GetBackendData(LAYERS_OPENGL)); if (data && data->mTexture.GetGLContext() != gl()) { // If this texture was allocated by another layer manager, clear // it out and re-allocate below. data = nullptr; cairoImage->SetBackendData(LAYERS_OPENGL, nullptr); } if (!data) { AllocateTexturesCairo(cairoImage); data = static_cast<CairoOGLBackendData*>(cairoImage->GetBackendData(LAYERS_OPENGL)); } if (!data || data->mTexture.GetGLContext() != gl()) { // XXX - Can this ever happen? If so I need to fix this! return; } gl()->MakeCurrent(); gl()->fActiveTexture(LOCAL_GL_TEXTURE0); gl()->fBindTexture(LOCAL_GL_TEXTURE_2D, data->mTexture.GetTextureID()); ShaderProgramOGL *program = mOGLManager->GetProgram(data->mLayerProgram, GetMaskLayer()); gl()->ApplyFilterToBoundTexture(mFilter); program->Activate(); program->SetLayerQuadRect(nsIntRect(0, 0, cairoImage->GetSize().width, cairoImage->GetSize().height)); program->SetLayerTransform(GetEffectiveTransform()); program->SetLayerOpacity(GetEffectiveOpacity()); program->SetRenderOffset(aOffset); program->SetTextureUnit(0); program->LoadMask(GetMaskLayer()); mOGLManager->BindAndDrawQuad(program); } else if (image->GetFormat() == SHARED_TEXTURE) { SharedTextureImage* texImage = static_cast<SharedTextureImage*>(image); const SharedTextureImage::Data* data = texImage->GetData(); GLContext::SharedHandleDetails handleDetails; if (!gl()->GetSharedHandleDetails(data->mShareType, data->mHandle, handleDetails)) { NS_ERROR("Failed to get shared handle details"); return; } ShaderProgramOGL* program = mOGLManager->GetProgram(handleDetails.mProgramType, GetMaskLayer()); program->Activate(); if (handleDetails.mProgramType == gl::RGBARectLayerProgramType) { // 2DRect case, get the multiplier right for a sampler2DRect program->SetTexCoordMultiplier(data->mSize.width, data->mSize.height); } program->SetLayerTransform(GetEffectiveTransform()); program->SetLayerOpacity(GetEffectiveOpacity()); program->SetRenderOffset(aOffset); program->SetTextureUnit(0); program->SetTextureTransform(handleDetails.mTextureTransform); program->LoadMask(GetMaskLayer()); if (!texImage->GetBackendData(LAYERS_OPENGL)) { AllocateTextureSharedTexture(texImage, gl(), handleDetails.mTarget); } ImageOGLBackendData *backendData = static_cast<ImageOGLBackendData*>(texImage->GetBackendData(LAYERS_OPENGL)); gl()->fActiveTexture(LOCAL_GL_TEXTURE0); gl()->fBindTexture(handleDetails.mTarget, backendData->mTexture.GetTextureID()); if (!gl()->AttachSharedHandle(data->mShareType, data->mHandle)) { NS_ERROR("Failed to bind shared texture handle"); return; } gl()->ApplyFilterToBoundTexture(handleDetails.mTarget, mFilter); program->SetLayerQuadRect(nsIntRect(nsIntPoint(0, 0), data->mSize)); mOGLManager->BindAndDrawQuad(program, data->mInverted); gl()->fBindTexture(handleDetails.mTarget, 0); gl()->DetachSharedHandle(data->mShareType, data->mHandle); #ifdef MOZ_WIDGET_GONK } else if (image->GetFormat() == GONK_IO_SURFACE) { GonkIOSurfaceImage *ioImage = static_cast<GonkIOSurfaceImage*>(image); if (!ioImage) { return; } gl()->MakeCurrent(); gl()->fActiveTexture(LOCAL_GL_TEXTURE0); if (!ioImage->GetBackendData(LAYERS_OPENGL)) { AllocateTextureIOSurface(ioImage, gl()); } GonkIOSurfaceImageOGLBackendData *data = static_cast<GonkIOSurfaceImageOGLBackendData*>(ioImage->GetBackendData(LAYERS_OPENGL)); gl()->fActiveTexture(LOCAL_GL_TEXTURE0); gl()->BindExternalBuffer(data->mTexture.GetTextureID(), ioImage->GetNativeBuffer()); ShaderProgramOGL *program = mOGLManager->GetProgram(RGBAExternalLayerProgramType, GetMaskLayer()); gl()->ApplyFilterToBoundTexture(mFilter); program->Activate(); program->SetLayerQuadRect(nsIntRect(0, 0, ioImage->GetSize().width, ioImage->GetSize().height)); program->SetLayerTransform(GetEffectiveTransform()); program->SetLayerOpacity(GetEffectiveOpacity()); program->SetRenderOffset(aOffset); program->SetTextureUnit(0); program->LoadMask(GetMaskLayer()); mOGLManager->BindAndDrawQuadWithTextureRect(program, GetVisibleRegion().GetBounds(), nsIntSize(ioImage->GetSize().width, ioImage->GetSize().height)); #endif } GetContainer()->NotifyPaintedImage(image); }
int FFmpegH264Decoder::AllocateYUV420PVideoBuffer(AVCodecContext* aCodecContext, AVFrame* aFrame) { // Older versions of ffmpeg require that edges be allocated* around* the // actual image. int edgeWidth = avcodec_get_edge_width(); int decodeWidth = aCodecContext->width + edgeWidth * 2; int decodeHeight = aCodecContext->height + edgeWidth * 2; // Align width and height to possibly speed up decode. int stride_align[AV_NUM_DATA_POINTERS]; avcodec_align_dimensions2(aCodecContext, &decodeWidth, &decodeHeight, stride_align); // Get strides for each plane. av_image_fill_linesizes(aFrame->linesize, aCodecContext->pix_fmt, decodeWidth); // Let FFmpeg set up its YUV plane pointers and tell us how much memory we // need. // Note that we're passing |nullptr| here as the base address as we haven't // allocated our image yet. We will adjust |aFrame->data| below. size_t allocSize = av_image_fill_pointers(aFrame->data, aCodecContext->pix_fmt, decodeHeight, nullptr /* base address */, aFrame->linesize); nsRefPtr<Image> image = mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR); PlanarYCbCrImage* ycbcr = reinterpret_cast<PlanarYCbCrImage*>(image.get()); uint8_t* buffer = ycbcr->AllocateAndGetNewBuffer(allocSize); if (!buffer) { NS_WARNING("Failed to allocate buffer for FFmpeg video decoding"); return -1; } // Now that we've allocated our image, we can add its address to the offsets // set by |av_image_fill_pointers| above. We also have to add |edgeWidth| // pixels of padding here. for (uint32_t i = 0; i < AV_NUM_DATA_POINTERS; i++) { // The C planes are half the resolution of the Y plane, so we need to halve // the edge width here. uint32_t planeEdgeWidth = edgeWidth / (i ? 2 : 1); // Add buffer offset, plus a horizontal bar |edgeWidth| pixels high at the // top of the frame, plus |edgeWidth| pixels from the left of the frame. aFrame->data[i] += reinterpret_cast<ptrdiff_t>( buffer + planeEdgeWidth * aFrame->linesize[i] + planeEdgeWidth); } // Unused, but needs to be non-zero to keep ffmpeg happy. aFrame->type = GECKO_FRAME_TYPE; aFrame->extended_data = aFrame->data; aFrame->width = aCodecContext->width; aFrame->height = aCodecContext->height; mozilla::layers::PlanarYCbCrData data; PlanarYCbCrDataFromAVFrame(data, aFrame); ycbcr->SetDataNoCopy(data); mCurrentImage.swap(image); return 0; }
void ImageLayerD3D10::RenderLayer() { ImageContainer *container = GetContainer(); if (!container) { return; } AutoLockImage autoLock(container); Image *image = autoLock.GetImage(); if (!image) { return; } gfxIntSize size = mScaleMode == SCALE_NONE ? image->GetSize() : mScaleToSize; SetEffectTransformAndOpacity(); ID3D10EffectTechnique *technique; if (image->GetFormat() == Image::CAIRO_SURFACE || image->GetFormat() == Image::REMOTE_IMAGE_BITMAP) { bool hasAlpha = false; nsRefPtr<ID3D10ShaderResourceView> srView = GetImageSRView(image, hasAlpha); if (!srView) { return; } PRUint8 shaderFlags = SHADER_PREMUL; shaderFlags |= LoadMaskTexture(); shaderFlags |= hasAlpha ? SHADER_RGBA : SHADER_RGB; shaderFlags |= mFilter == gfxPattern::FILTER_NEAREST ? SHADER_POINT : SHADER_LINEAR; technique = SelectShader(shaderFlags); effect()->GetVariableByName("tRGB")->AsShaderResource()->SetResource(srView); effect()->GetVariableByName("vLayerQuad")->AsVector()->SetFloatVector( ShaderConstantRectD3D10( (float)0, (float)0, (float)size.width, (float)size.height) ); } else if (image->GetFormat() == Image::PLANAR_YCBCR) { PlanarYCbCrImage *yuvImage = static_cast<PlanarYCbCrImage*>(image); if (!yuvImage->mBufferSize) { return; } if (!yuvImage->GetBackendData(LayerManager::LAYERS_D3D10)) { AllocateTexturesYCbCr(yuvImage); } PlanarYCbCrD3D10BackendData *data = static_cast<PlanarYCbCrD3D10BackendData*>(yuvImage->GetBackendData(LayerManager::LAYERS_D3D10)); if (!data) { return; } nsRefPtr<ID3D10Device> dev; data->mYTexture->GetDevice(getter_AddRefs(dev)); if (dev != device()) { return; } // TODO: At some point we should try to deal with mFilter here, you don't // really want to use point filtering in the case of NEAREST, since that // would also use point filtering for Chroma upsampling. Where most likely // the user would only want point filtering for final RGB image upsampling. technique = SelectShader(SHADER_YCBCR | LoadMaskTexture()); effect()->GetVariableByName("tY")->AsShaderResource()->SetResource(data->mYView); effect()->GetVariableByName("tCb")->AsShaderResource()->SetResource(data->mCbView); effect()->GetVariableByName("tCr")->AsShaderResource()->SetResource(data->mCrView); /* * Send 3d control data and metadata to NV3DVUtils */ if (GetNv3DVUtils()) { Nv_Stereo_Mode mode; switch (yuvImage->mData.mStereoMode) { case STEREO_MODE_LEFT_RIGHT: mode = NV_STEREO_MODE_LEFT_RIGHT; break; case STEREO_MODE_RIGHT_LEFT: mode = NV_STEREO_MODE_RIGHT_LEFT; break; case STEREO_MODE_BOTTOM_TOP: mode = NV_STEREO_MODE_BOTTOM_TOP; break; case STEREO_MODE_TOP_BOTTOM: mode = NV_STEREO_MODE_TOP_BOTTOM; break; case STEREO_MODE_MONO: mode = NV_STEREO_MODE_MONO; break; } // Send control data even in mono case so driver knows to leave stereo mode. GetNv3DVUtils()->SendNv3DVControl(mode, true, FIREFOX_3DV_APP_HANDLE); if (yuvImage->mData.mStereoMode != STEREO_MODE_MONO) { // Dst resource is optional GetNv3DVUtils()->SendNv3DVMetaData((unsigned int)yuvImage->mSize.width, (unsigned int)yuvImage->mSize.height, (HANDLE)(data->mYTexture), (HANDLE)(NULL)); } } effect()->GetVariableByName("vLayerQuad")->AsVector()->SetFloatVector( ShaderConstantRectD3D10( (float)0, (float)0, (float)size.width, (float)size.height) ); effect()->GetVariableByName("vTextureCoords")->AsVector()->SetFloatVector( ShaderConstantRectD3D10( (float)yuvImage->mData.mPicX / yuvImage->mData.mYSize.width, (float)yuvImage->mData.mPicY / yuvImage->mData.mYSize.height, (float)yuvImage->mData.mPicSize.width / yuvImage->mData.mYSize.width, (float)yuvImage->mData.mPicSize.height / yuvImage->mData.mYSize.height) ); } bool resetTexCoords = image->GetFormat() == Image::PLANAR_YCBCR; image = nsnull; autoLock.Unlock(); technique->GetPassByIndex(0)->Apply(0); device()->Draw(4, 0); if (resetTexCoords) { effect()->GetVariableByName("vTextureCoords")->AsVector()-> SetFloatVector(ShaderConstantRectD3D10(0, 0, 1.0f, 1.0f)); } GetContainer()->NotifyPaintedImage(image); }
bool ImageClientSingle::UpdateImage(ImageContainer* aContainer, uint32_t aContentFlags) { AutoLockImage autoLock(aContainer); Image *image = autoLock.GetImage(); if (!image) { return false; } if (mLastPaintedImageSerial == image->GetSerial()) { return true; } if (image->AsSharedImage() && image->AsSharedImage()->GetTextureClient()) { // fast path: no need to allocate and/or copy image data RefPtr<TextureClient> texture = image->AsSharedImage()->GetTextureClient(); if (texture->IsSharedWithCompositor()) { // XXX - temporary fix for bug 911941 // This will be changed with bug 912907 return false; } if (mFrontBuffer) { RemoveTextureClient(mFrontBuffer); } mFrontBuffer = texture; AddTextureClient(texture); GetForwarder()->UpdatedTexture(this, texture, nullptr); GetForwarder()->UseTexture(this, texture); } else if (image->GetFormat() == PLANAR_YCBCR) { PlanarYCbCrImage* ycbcr = static_cast<PlanarYCbCrImage*>(image); const PlanarYCbCrData* data = ycbcr->GetData(); if (!data) { return false; } if (mFrontBuffer && mFrontBuffer->IsImmutable()) { RemoveTextureClient(mFrontBuffer); mFrontBuffer = nullptr; } bool bufferCreated = false; if (!mFrontBuffer) { mFrontBuffer = CreateBufferTextureClient(gfx::FORMAT_YUV, TEXTURE_FLAGS_DEFAULT); gfx::IntSize ySize(data->mYSize.width, data->mYSize.height); gfx::IntSize cbCrSize(data->mCbCrSize.width, data->mCbCrSize.height); if (!mFrontBuffer->AsTextureClientYCbCr()->AllocateForYCbCr(ySize, cbCrSize, data->mStereoMode)) { mFrontBuffer = nullptr; return false; } bufferCreated = true; } if (!mFrontBuffer->Lock(OPEN_READ_WRITE)) { return false; } bool status = mFrontBuffer->AsTextureClientYCbCr()->UpdateYCbCr(*data); mFrontBuffer->Unlock(); if (bufferCreated) { AddTextureClient(mFrontBuffer); } if (status) { GetForwarder()->UpdatedTexture(this, mFrontBuffer, nullptr); GetForwarder()->UseTexture(this, mFrontBuffer); } else { MOZ_ASSERT(false); return false; } } else if (image->GetFormat() == SHARED_TEXTURE) { SharedTextureImage* sharedImage = static_cast<SharedTextureImage*>(image); const SharedTextureImage::Data *data = sharedImage->GetData(); gfx::IntSize size = gfx::IntSize(image->GetSize().width, image->GetSize().height); if (mFrontBuffer) { RemoveTextureClient(mFrontBuffer); mFrontBuffer = nullptr; } RefPtr<SharedTextureClientOGL> buffer = new SharedTextureClientOGL(mTextureFlags); buffer->InitWith(data->mHandle, size, data->mShareType, data->mInverted); mFrontBuffer = buffer; AddTextureClient(mFrontBuffer); GetForwarder()->UseTexture(this, mFrontBuffer); } else { nsRefPtr<gfxASurface> surface = image->GetAsSurface(); MOZ_ASSERT(surface); gfx::IntSize size = gfx::IntSize(image->GetSize().width, image->GetSize().height); if (mFrontBuffer && (mFrontBuffer->IsImmutable() || mFrontBuffer->GetSize() != size)) { RemoveTextureClient(mFrontBuffer); mFrontBuffer = nullptr; } bool bufferCreated = false; if (!mFrontBuffer) { gfxImageFormat format = gfxPlatform::GetPlatform()->OptimalFormatForContent(surface->GetContentType()); mFrontBuffer = CreateBufferTextureClient(gfx::ImageFormatToSurfaceFormat(format), TEXTURE_FLAGS_DEFAULT); MOZ_ASSERT(mFrontBuffer->AsTextureClientSurface()); mFrontBuffer->AsTextureClientSurface()->AllocateForSurface(size); bufferCreated = true; } if (!mFrontBuffer->Lock(OPEN_READ_WRITE)) { return false; } bool status = mFrontBuffer->AsTextureClientSurface()->UpdateSurface(surface); mFrontBuffer->Unlock(); if (bufferCreated) { AddTextureClient(mFrontBuffer); } if (status) { GetForwarder()->UpdatedTexture(this, mFrontBuffer, nullptr); GetForwarder()->UseTexture(this, mFrontBuffer); } else { return false; } } UpdatePictureRect(image->GetPictureRect()); mLastPaintedImageSerial = image->GetSerial(); aContainer->NotifyPaintedImage(image); return true; }
void GonkCameraPreview::ReceiveFrame(PRUint8 *aData, PRUint32 aLength) { DOM_CAMERA_LOGI("%s:%d : this=%p\n", __func__, __LINE__, this); if (mInput->HaveEnoughBuffered(TRACK_VIDEO)) { if (mDiscardedFrameCount == 0) { DOM_CAMERA_LOGI("mInput has enough data buffered, starting to discard\n"); } ++mDiscardedFrameCount; return; } else if (mDiscardedFrameCount) { DOM_CAMERA_LOGI("mInput needs more data again; discarded %d frames in a row\n", mDiscardedFrameCount); mDiscardedFrameCount = 0; } switch (mFormat) { case GonkCameraHardware::PREVIEW_FORMAT_YUV420SP: { // de-interlace the u and v planes uint8_t* y = aData; uint32_t yN = mWidth * mHeight; NS_ASSERTION((yN & 0x3) == 0, "Invalid image dimensions!"); uint32_t uvN = yN / 4; uint32_t* src = (uint32_t*)( y + yN ); uint32_t* d = new uint32_t[ uvN / 2 ]; uint32_t* u = d; uint32_t* v = u + uvN / 4; // we're handling pairs of 32-bit words, so divide by 8 NS_ASSERTION((uvN & 0x7) == 0, "Invalid image dimensions!"); uvN /= 8; while (uvN--) { uint32_t src0 = *src++; uint32_t src1 = *src++; uint32_t u0; uint32_t v0; uint32_t u1; uint32_t v1; DEINTERLACE( u0, v0, src0, src1 ); src0 = *src++; src1 = *src++; DEINTERLACE( u1, v1, src0, src1 ); *u++ = u0; *u++ = u1; *v++ = v0; *v++ = v1; } memcpy(y + yN, d, yN / 2); delete[] d; } break; case GonkCameraHardware::PREVIEW_FORMAT_YUV420P: // no transformating required break; default: // in a format we don't handle, get out of here return; } Image::Format format = Image::PLANAR_YCBCR; nsRefPtr<Image> image = mImageContainer->CreateImage(&format, 1); image->AddRef(); PlanarYCbCrImage* videoImage = static_cast<PlanarYCbCrImage*>(image.get()); /** * If you change either lumaBpp or chromaBpp, make sure the * assertions below still hold. */ const PRUint8 lumaBpp = 8; const PRUint8 chromaBpp = 4; PlanarYCbCrImage::Data data; data.mYChannel = aData; data.mYSize = gfxIntSize(mWidth, mHeight); data.mYStride = mWidth * lumaBpp; NS_ASSERTION((data.mYStride & 0x7) == 0, "Invalid image dimensions!"); data.mYStride /= 8; data.mCbCrStride = mWidth * chromaBpp; NS_ASSERTION((data.mCbCrStride & 0x7) == 0, "Invalid image dimensions!"); data.mCbCrStride /= 8; data.mCbChannel = aData + mHeight * data.mYStride; data.mCrChannel = data.mCbChannel + mHeight * data.mCbCrStride / 2; data.mCbCrSize = gfxIntSize(mWidth / 2, mHeight / 2); data.mPicX = 0; data.mPicY = 0; data.mPicSize = gfxIntSize(mWidth, mHeight); data.mStereoMode = mozilla::layers::STEREO_MODE_MONO; videoImage->SetData(data); // copies buffer mVideoSegment.AppendFrame(videoImage, 1, gfxIntSize(mWidth, mHeight)); mInput->AppendToTrack(TRACK_VIDEO, &mVideoSegment); mFrameCount += 1; if ((mFrameCount % 10) == 0) { DOM_CAMERA_LOGI("%s:%d : mFrameCount = %d\n", __func__, __LINE__, mFrameCount); } }
/* static */ already_AddRefed<VideoData> VideoData::Create(const VideoInfo& aInfo, ImageContainer* aContainer, Image* aImage, int64_t aOffset, int64_t aTime, int64_t aDuration, const YCbCrBuffer& aBuffer, bool aKeyframe, int64_t aTimecode, const IntRect& aPicture) { if (!aImage && !aContainer) { // Create a dummy VideoData with no image. This gives us something to // send to media streams if necessary. RefPtr<VideoData> v(new VideoData(aOffset, aTime, aDuration, aKeyframe, aTimecode, aInfo.mDisplay, 0)); return v.forget(); } // The following situation should never happen unless there is a bug // in the decoder if (aBuffer.mPlanes[1].mWidth != aBuffer.mPlanes[2].mWidth || aBuffer.mPlanes[1].mHeight != aBuffer.mPlanes[2].mHeight) { NS_ERROR("C planes with different sizes"); return nullptr; } // The following situations could be triggered by invalid input if (aPicture.width <= 0 || aPicture.height <= 0) { // In debug mode, makes the error more noticeable MOZ_ASSERT(false, "Empty picture rect"); return nullptr; } if (!ValidatePlane(aBuffer.mPlanes[0]) || !ValidatePlane(aBuffer.mPlanes[1]) || !ValidatePlane(aBuffer.mPlanes[2])) { NS_WARNING("Invalid plane size"); return nullptr; } // Ensure the picture size specified in the headers can be extracted out of // the frame we've been supplied without indexing out of bounds. CheckedUint32 xLimit = aPicture.x + CheckedUint32(aPicture.width); CheckedUint32 yLimit = aPicture.y + CheckedUint32(aPicture.height); if (!xLimit.isValid() || xLimit.value() > aBuffer.mPlanes[0].mStride || !yLimit.isValid() || yLimit.value() > aBuffer.mPlanes[0].mHeight) { // The specified picture dimensions can't be contained inside the video // frame, we'll stomp memory if we try to copy it. Fail. NS_WARNING("Overflowing picture rect"); return nullptr; } RefPtr<VideoData> v(new VideoData(aOffset, aTime, aDuration, aKeyframe, aTimecode, aInfo.mDisplay, 0)); #ifdef MOZ_WIDGET_GONK const YCbCrBuffer::Plane &Y = aBuffer.mPlanes[0]; const YCbCrBuffer::Plane &Cb = aBuffer.mPlanes[1]; const YCbCrBuffer::Plane &Cr = aBuffer.mPlanes[2]; #endif if (!aImage) { // Currently our decoder only knows how to output to ImageFormat::PLANAR_YCBCR // format. #ifdef MOZ_WIDGET_GONK if (IsYV12Format(Y, Cb, Cr) && !IsInEmulator()) { v->mImage = new layers::GrallocImage(); } #endif if (!v->mImage) { v->mImage = aContainer->CreatePlanarYCbCrImage(); } } else { v->mImage = aImage; } if (!v->mImage) { return nullptr; } NS_ASSERTION(v->mImage->GetFormat() == ImageFormat::PLANAR_YCBCR || v->mImage->GetFormat() == ImageFormat::GRALLOC_PLANAR_YCBCR, "Wrong format?"); PlanarYCbCrImage* videoImage = v->mImage->AsPlanarYCbCrImage(); MOZ_ASSERT(videoImage); bool shouldCopyData = (aImage == nullptr); if (!VideoData::SetVideoDataToImage(videoImage, aInfo, aBuffer, aPicture, shouldCopyData)) { return nullptr; } #ifdef MOZ_WIDGET_GONK if (!videoImage->IsValid() && !aImage && IsYV12Format(Y, Cb, Cr)) { // Failed to allocate gralloc. Try fallback. v->mImage = aContainer->CreatePlanarYCbCrImage(); if (!v->mImage) { return nullptr; } videoImage = v->mImage->AsPlanarYCbCrImage(); if(!VideoData::SetVideoDataToImage(videoImage, aInfo, aBuffer, aPicture, true /* aCopyData */)) { return nullptr; } } #endif return v.forget(); }
int FFmpegH264Decoder<LIBAV_VER>::AllocateYUV420PVideoBuffer( AVCodecContext* aCodecContext, AVFrame* aFrame) { bool needAlign = aCodecContext->codec->capabilities & CODEC_CAP_DR1; int edgeWidth = needAlign ? avcodec_get_edge_width() : 0; int decodeWidth = aCodecContext->width + edgeWidth * 2; // Make sure the decodeWidth is a multiple of 32, so a UV plane stride will be // a multiple of 16. FFmpeg uses SSE2 accelerated code to copy a frame line by // line. decodeWidth = (decodeWidth + 31) & ~31; int decodeHeight = aCodecContext->height + edgeWidth * 2; if (needAlign) { // Align width and height to account for CODEC_FLAG_EMU_EDGE. int stride_align[AV_NUM_DATA_POINTERS]; avcodec_align_dimensions2(aCodecContext, &decodeWidth, &decodeHeight, stride_align); } // Get strides for each plane. av_image_fill_linesizes(aFrame->linesize, aCodecContext->pix_fmt, decodeWidth); // Let FFmpeg set up its YUV plane pointers and tell us how much memory we // need. // Note that we're passing |nullptr| here as the base address as we haven't // allocated our image yet. We will adjust |aFrame->data| below. size_t allocSize = av_image_fill_pointers(aFrame->data, aCodecContext->pix_fmt, decodeHeight, nullptr /* base address */, aFrame->linesize); nsRefPtr<Image> image = mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR); PlanarYCbCrImage* ycbcr = static_cast<PlanarYCbCrImage*>(image.get()); uint8_t* buffer = ycbcr->AllocateAndGetNewBuffer(allocSize + 64); // FFmpeg requires a 16/32 bytes-aligned buffer, align it on 64 to be safe buffer = reinterpret_cast<uint8_t*>((reinterpret_cast<uintptr_t>(buffer) + 63) & ~63); if (!buffer) { NS_WARNING("Failed to allocate buffer for FFmpeg video decoding"); return -1; } // Now that we've allocated our image, we can add its address to the offsets // set by |av_image_fill_pointers| above. We also have to add |edgeWidth| // pixels of padding here. for (uint32_t i = 0; i < AV_NUM_DATA_POINTERS; i++) { // The C planes are half the resolution of the Y plane, so we need to halve // the edge width here. uint32_t planeEdgeWidth = edgeWidth / (i ? 2 : 1); // Add buffer offset, plus a horizontal bar |edgeWidth| pixels high at the // top of the frame, plus |edgeWidth| pixels from the left of the frame. aFrame->data[i] += reinterpret_cast<ptrdiff_t>( buffer + planeEdgeWidth * aFrame->linesize[i] + planeEdgeWidth); } // Unused, but needs to be non-zero to keep ffmpeg happy. aFrame->type = GECKO_FRAME_TYPE; aFrame->extended_data = aFrame->data; aFrame->width = aCodecContext->width; aFrame->height = aCodecContext->height; aFrame->opaque = static_cast<void*>(image.forget().take()); return 0; }
int FFmpegH264Decoder<LIBAV_VER>::AllocateYUV420PVideoBuffer( AVCodecContext* aCodecContext, AVFrame* aFrame) { bool needAlign = aCodecContext->codec->capabilities & CODEC_CAP_DR1; bool needEdge = !(aCodecContext->flags & CODEC_FLAG_EMU_EDGE); int edgeWidth = needEdge ? avcodec_get_edge_width() : 0; int decodeWidth = aCodecContext->width + edgeWidth * 2; int decodeHeight = aCodecContext->height + edgeWidth * 2; if (needAlign) { // Align width and height to account for CODEC_CAP_DR1. // Make sure the decodeWidth is a multiple of 64, so a UV plane stride will be // a multiple of 32. FFmpeg uses SSE3 accelerated code to copy a frame line by // line. // VP9 decoder uses MOVAPS/VEX.256 which requires 32-bytes aligned memory. decodeWidth = (decodeWidth + 63) & ~63; decodeHeight = (decodeHeight + 63) & ~63; } PodZero(&aFrame->data[0], AV_NUM_DATA_POINTERS); PodZero(&aFrame->linesize[0], AV_NUM_DATA_POINTERS); int pitch = decodeWidth; int chroma_pitch = (pitch + 1) / 2; int chroma_height = (decodeHeight +1) / 2; // Get strides for each plane. aFrame->linesize[0] = pitch; aFrame->linesize[1] = aFrame->linesize[2] = chroma_pitch; size_t allocSize = pitch * decodeHeight + (chroma_pitch * chroma_height) * 2; RefPtr<Image> image = mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR); PlanarYCbCrImage* ycbcr = static_cast<PlanarYCbCrImage*>(image.get()); uint8_t* buffer = ycbcr->AllocateAndGetNewBuffer(allocSize + 64); // FFmpeg requires a 16/32 bytes-aligned buffer, align it on 64 to be safe buffer = reinterpret_cast<uint8_t*>((reinterpret_cast<uintptr_t>(buffer) + 63) & ~63); if (!buffer) { NS_WARNING("Failed to allocate buffer for FFmpeg video decoding"); return -1; } int offsets[3] = { 0, pitch * decodeHeight, pitch * decodeHeight + chroma_pitch * chroma_height }; // Add a horizontal bar |edgeWidth| pixels high at the // top of the frame, plus |edgeWidth| pixels from the left of the frame. int planesEdgeWidth[3] = { edgeWidth * aFrame->linesize[0] + edgeWidth, edgeWidth / 2 * aFrame->linesize[1] + edgeWidth / 2, edgeWidth / 2 * aFrame->linesize[2] + edgeWidth / 2 }; for (uint32_t i = 0; i < 3; i++) { aFrame->data[i] = buffer + offsets[i] + planesEdgeWidth[i]; } aFrame->extended_data = aFrame->data; aFrame->width = aCodecContext->width; aFrame->height = aCodecContext->height; aFrame->opaque = static_cast<void*>(image.forget().take()); aFrame->type = FF_BUFFER_TYPE_USER; aFrame->reordered_opaque = aCodecContext->reordered_opaque; #if LIBAVCODEC_VERSION_MAJOR == 53 if (aCodecContext->pkt) { aFrame->pkt_pts = aCodecContext->pkt->pts; } #endif return 0; }
bool ImageClientSingle::UpdateImageInternal(ImageContainer* aContainer, uint32_t aContentFlags, bool* aIsSwapped) { AutoLockImage autoLock(aContainer); *aIsSwapped = false; Image *image = autoLock.GetImage(); if (!image) { return false; } if (mLastPaintedImageSerial == image->GetSerial()) { return true; } if (image->AsSharedImage() && image->AsSharedImage()->GetTextureClient(this)) { // fast path: no need to allocate and/or copy image data RefPtr<TextureClient> texture = image->AsSharedImage()->GetTextureClient(this); if (mFrontBuffer) { GetForwarder()->RemoveTextureFromCompositable(this, mFrontBuffer); } mFrontBuffer = texture; if (!AddTextureClient(texture)) { mFrontBuffer = nullptr; return false; } GetForwarder()->UpdatedTexture(this, texture, nullptr); GetForwarder()->UseTexture(this, texture); } else if (image->GetFormat() == ImageFormat::PLANAR_YCBCR) { PlanarYCbCrImage* ycbcr = static_cast<PlanarYCbCrImage*>(image); const PlanarYCbCrData* data = ycbcr->GetData(); if (!data) { return false; } if (mFrontBuffer && mFrontBuffer->IsImmutable()) { GetForwarder()->RemoveTextureFromCompositable(this, mFrontBuffer); mFrontBuffer = nullptr; } bool bufferCreated = false; if (!mFrontBuffer) { mFrontBuffer = CreateBufferTextureClient(gfx::SurfaceFormat::YUV, TEXTURE_FLAGS_DEFAULT); gfx::IntSize ySize(data->mYSize.width, data->mYSize.height); gfx::IntSize cbCrSize(data->mCbCrSize.width, data->mCbCrSize.height); if (!mFrontBuffer->AsTextureClientYCbCr()->AllocateForYCbCr(ySize, cbCrSize, data->mStereoMode)) { mFrontBuffer = nullptr; return false; } bufferCreated = true; } if (!mFrontBuffer->Lock(OPEN_WRITE_ONLY)) { return false; } bool status = mFrontBuffer->AsTextureClientYCbCr()->UpdateYCbCr(*data); mFrontBuffer->Unlock(); if (bufferCreated) { if (!AddTextureClient(mFrontBuffer)) { mFrontBuffer = nullptr; return false; } } if (status) { GetForwarder()->UpdatedTexture(this, mFrontBuffer, nullptr); GetForwarder()->UseTexture(this, mFrontBuffer); } else { MOZ_ASSERT(false); return false; } } else if (image->GetFormat() == ImageFormat::SHARED_TEXTURE) { SharedTextureImage* sharedImage = static_cast<SharedTextureImage*>(image); const SharedTextureImage::Data *data = sharedImage->GetData(); gfx::IntSize size = gfx::IntSize(image->GetSize().width, image->GetSize().height); if (mFrontBuffer) { GetForwarder()->RemoveTextureFromCompositable(this, mFrontBuffer); mFrontBuffer = nullptr; } RefPtr<SharedTextureClientOGL> buffer = new SharedTextureClientOGL(mTextureFlags); buffer->InitWith(data->mHandle, size, data->mShareType, data->mInverted); mFrontBuffer = buffer; if (!AddTextureClient(mFrontBuffer)) { mFrontBuffer = nullptr; return false; } GetForwarder()->UseTexture(this, mFrontBuffer); } else { RefPtr<gfx::SourceSurface> surface = image->GetAsSourceSurface(); MOZ_ASSERT(surface); gfx::IntSize size = image->GetSize(); if (mFrontBuffer && (mFrontBuffer->IsImmutable() || mFrontBuffer->GetSize() != size)) { GetForwarder()->RemoveTextureFromCompositable(this, mFrontBuffer); mFrontBuffer = nullptr; } bool bufferCreated = false; if (!mFrontBuffer) { gfxImageFormat format = gfxPlatform::GetPlatform()->OptimalFormatForContent(gfx::ContentForFormat(surface->GetFormat())); mFrontBuffer = CreateTextureClientForDrawing(gfx::ImageFormatToSurfaceFormat(format), mTextureFlags, gfx::BackendType::NONE, size); MOZ_ASSERT(mFrontBuffer->AsTextureClientDrawTarget()); if (!mFrontBuffer->AsTextureClientDrawTarget()->AllocateForSurface(size)) { mFrontBuffer = nullptr; return false; } bufferCreated = true; } if (!mFrontBuffer->Lock(OPEN_WRITE_ONLY)) { return false; } { // We must not keep a reference to the DrawTarget after it has been unlocked. RefPtr<DrawTarget> dt = mFrontBuffer->AsTextureClientDrawTarget()->GetAsDrawTarget(); MOZ_ASSERT(surface.get()); dt->CopySurface(surface, IntRect(IntPoint(), surface->GetSize()), IntPoint()); } mFrontBuffer->Unlock(); if (bufferCreated) { if (!AddTextureClient(mFrontBuffer)) { mFrontBuffer = nullptr; return false; } } GetForwarder()->UpdatedTexture(this, mFrontBuffer, nullptr); GetForwarder()->UseTexture(this, mFrontBuffer); } UpdatePictureRect(image->GetPictureRect()); mLastPaintedImageSerial = image->GetSerial(); aContainer->NotifyPaintedImage(image); *aIsSwapped = true; return true; }
void ImageLayerOGL::RenderLayer(int, const nsIntPoint& aOffset) { nsRefPtr<ImageContainer> container = GetContainer(); if (!container || mOGLManager->CompositingDisabled()) return; mOGLManager->MakeCurrent(); AutoLockImage autoLock(container); Image *image = autoLock.GetImage(); if (!image) { return; } NS_ASSERTION(image->GetFormat() != REMOTE_IMAGE_BITMAP, "Remote images aren't handled yet in OGL layers!"); if (image->GetFormat() == PLANAR_YCBCR) { PlanarYCbCrImage *yuvImage = static_cast<PlanarYCbCrImage*>(image); if (!yuvImage->IsValid()) { return; } PlanarYCbCrOGLBackendData *data = static_cast<PlanarYCbCrOGLBackendData*>(yuvImage->GetBackendData(LAYERS_OPENGL)); if (data && data->mTextures->GetGLContext() != gl()) { // If these textures were allocated by another layer manager, // clear them out and re-allocate below. data = nullptr; yuvImage->SetBackendData(LAYERS_OPENGL, nullptr); } if (!data) { AllocateTexturesYCbCr(yuvImage); data = static_cast<PlanarYCbCrOGLBackendData*>(yuvImage->GetBackendData(LAYERS_OPENGL)); } if (!data || data->mTextures->GetGLContext() != gl()) { // XXX - Can this ever happen? If so I need to fix this! return; } gl()->MakeCurrent(); gl()->fActiveTexture(LOCAL_GL_TEXTURE2); gl()->fBindTexture(LOCAL_GL_TEXTURE_2D, data->mTextures[2].GetTextureID()); gl()->ApplyFilterToBoundTexture(mFilter); gl()->fActiveTexture(LOCAL_GL_TEXTURE1); gl()->fBindTexture(LOCAL_GL_TEXTURE_2D, data->mTextures[1].GetTextureID()); gl()->ApplyFilterToBoundTexture(mFilter); gl()->fActiveTexture(LOCAL_GL_TEXTURE0); gl()->fBindTexture(LOCAL_GL_TEXTURE_2D, data->mTextures[0].GetTextureID()); gl()->ApplyFilterToBoundTexture(mFilter); ShaderProgramOGL *program = mOGLManager->GetProgram(YCbCrLayerProgramType, GetMaskLayer()); program->Activate(); program->SetLayerQuadRect(nsIntRect(0, 0, yuvImage->GetSize().width, yuvImage->GetSize().height)); program->SetLayerTransform(GetEffectiveTransform()); program->SetLayerOpacity(GetEffectiveOpacity()); program->SetRenderOffset(aOffset); program->SetYCbCrTextureUnits(0, 1, 2); program->LoadMask(GetMaskLayer()); mOGLManager->BindAndDrawQuadWithTextureRect(program, yuvImage->GetData()->GetPictureRect(), nsIntSize(yuvImage->GetData()->mYSize.width, yuvImage->GetData()->mYSize.height)); // We shouldn't need to do this, but do it anyway just in case // someone else forgets. gl()->fActiveTexture(LOCAL_GL_TEXTURE0); } else if (image->GetFormat() == CAIRO_SURFACE) { CairoImage *cairoImage = static_cast<CairoImage*>(image); if (!cairoImage->mSurface) { return; } NS_ASSERTION(cairoImage->mSurface->GetContentType() != gfxASurface::CONTENT_ALPHA, "Image layer has alpha image"); CairoOGLBackendData *data = static_cast<CairoOGLBackendData*>(cairoImage->GetBackendData(LAYERS_OPENGL)); if (data && data->mTexture.GetGLContext() != gl()) { // If this texture was allocated by another layer manager, clear // it out and re-allocate below. data = nullptr; cairoImage->SetBackendData(LAYERS_OPENGL, nullptr); } if (!data) { AllocateTexturesCairo(cairoImage); data = static_cast<CairoOGLBackendData*>(cairoImage->GetBackendData(LAYERS_OPENGL)); } if (!data || data->mTexture.GetGLContext() != gl()) { // XXX - Can this ever happen? If so I need to fix this! return; } gl()->MakeCurrent(); gl()->fActiveTexture(LOCAL_GL_TEXTURE0); gl()->fBindTexture(LOCAL_GL_TEXTURE_2D, data->mTexture.GetTextureID()); ShaderProgramOGL *program = mOGLManager->GetProgram(data->mLayerProgram, GetMaskLayer()); gl()->ApplyFilterToBoundTexture(mFilter); program->Activate(); program->SetLayerQuadRect(nsIntRect(0, 0, cairoImage->GetSize().width, cairoImage->GetSize().height)); program->SetLayerTransform(GetEffectiveTransform()); program->SetLayerOpacity(GetEffectiveOpacity()); program->SetRenderOffset(aOffset); program->SetTextureUnit(0); program->LoadMask(GetMaskLayer()); mOGLManager->BindAndDrawQuad(program); #ifdef XP_MACOSX } else if (image->GetFormat() == MAC_IO_SURFACE) { MacIOSurfaceImage *ioImage = static_cast<MacIOSurfaceImage*>(image); if (!mOGLManager->GetThebesLayerCallback()) { // If its an empty transaction we still need to update // the plugin IO Surface and make sure we grab the // new image ioImage->Update(GetContainer()); image = nullptr; autoLock.Refresh(); image = autoLock.GetImage(); gl()->MakeCurrent(); ioImage = static_cast<MacIOSurfaceImage*>(image); } if (!ioImage) { return; } gl()->fActiveTexture(LOCAL_GL_TEXTURE0); if (!ioImage->GetBackendData(LAYERS_OPENGL)) { AllocateTextureIOSurface(ioImage, gl()); } MacIOSurfaceImageOGLBackendData *data = static_cast<MacIOSurfaceImageOGLBackendData*>(ioImage->GetBackendData(LAYERS_OPENGL)); gl()->fActiveTexture(LOCAL_GL_TEXTURE0); gl()->fBindTexture(LOCAL_GL_TEXTURE_RECTANGLE_ARB, data->mTexture.GetTextureID()); ShaderProgramOGL *program = mOGLManager->GetProgram(gl::RGBARectLayerProgramType, GetMaskLayer()); program->Activate(); if (program->GetTexCoordMultiplierUniformLocation() != -1) { // 2DRect case, get the multiplier right for a sampler2DRect program->SetTexCoordMultiplier(ioImage->GetSize().width, ioImage->GetSize().height); } else { NS_ASSERTION(0, "no rects?"); } program->SetLayerQuadRect(nsIntRect(0, 0, ioImage->GetSize().width, ioImage->GetSize().height)); program->SetLayerTransform(GetEffectiveTransform()); program->SetLayerOpacity(GetEffectiveOpacity()); program->SetRenderOffset(aOffset); program->SetTextureUnit(0); program->LoadMask(GetMaskLayer()); mOGLManager->BindAndDrawQuad(program); gl()->fBindTexture(LOCAL_GL_TEXTURE_RECTANGLE_ARB, 0); #endif #ifdef MOZ_WIDGET_GONK } else if (image->GetFormat() == GONK_IO_SURFACE) { GonkIOSurfaceImage *ioImage = static_cast<GonkIOSurfaceImage*>(image); if (!ioImage) { return; } gl()->MakeCurrent(); gl()->fActiveTexture(LOCAL_GL_TEXTURE0); if (!ioImage->GetBackendData(LAYERS_OPENGL)) { AllocateTextureIOSurface(ioImage, gl()); } GonkIOSurfaceImageOGLBackendData *data = static_cast<GonkIOSurfaceImageOGLBackendData*>(ioImage->GetBackendData(LAYERS_OPENGL)); gl()->fActiveTexture(LOCAL_GL_TEXTURE0); gl()->BindExternalBuffer(data->mTexture.GetTextureID(), ioImage->GetNativeBuffer()); ShaderProgramOGL *program = mOGLManager->GetProgram(RGBAExternalLayerProgramType, GetMaskLayer()); gl()->ApplyFilterToBoundTexture(mFilter); program->Activate(); program->SetLayerQuadRect(nsIntRect(0, 0, ioImage->GetSize().width, ioImage->GetSize().height)); program->SetLayerTransform(GetEffectiveTransform()); program->SetLayerOpacity(GetEffectiveOpacity()); program->SetRenderOffset(aOffset); program->SetTextureUnit(0); program->LoadMask(GetMaskLayer()); mOGLManager->BindAndDrawQuadWithTextureRect(program, GetVisibleRegion().GetBounds(), nsIntSize(ioImage->GetSize().width, ioImage->GetSize().height)); #endif } GetContainer()->NotifyPaintedImage(image); }
VideoData* VideoData::Create(VideoInfo& aInfo, ImageContainer* aContainer, Image* aImage, int64_t aOffset, int64_t aTime, int64_t aDuration, const YCbCrBuffer& aBuffer, bool aKeyframe, int64_t aTimecode, nsIntRect aPicture) { if (!aImage && !aContainer) { // Create a dummy VideoData with no image. This gives us something to // send to media streams if necessary. nsAutoPtr<VideoData> v(new VideoData(aOffset, aTime, aDuration, aKeyframe, aTimecode, aInfo.mDisplay)); return v.forget(); } // The following situation should never happen unless there is a bug // in the decoder if (aBuffer.mPlanes[1].mWidth != aBuffer.mPlanes[2].mWidth || aBuffer.mPlanes[1].mHeight != aBuffer.mPlanes[2].mHeight) { NS_ERROR("C planes with different sizes"); return nullptr; } // The following situations could be triggered by invalid input if (aPicture.width <= 0 || aPicture.height <= 0) { NS_WARNING("Empty picture rect"); return nullptr; } if (!ValidatePlane(aBuffer.mPlanes[0]) || !ValidatePlane(aBuffer.mPlanes[1]) || !ValidatePlane(aBuffer.mPlanes[2])) { NS_WARNING("Invalid plane size"); return nullptr; } // Ensure the picture size specified in the headers can be extracted out of // the frame we've been supplied without indexing out of bounds. CheckedUint32 xLimit = aPicture.x + CheckedUint32(aPicture.width); CheckedUint32 yLimit = aPicture.y + CheckedUint32(aPicture.height); if (!xLimit.isValid() || xLimit.value() > aBuffer.mPlanes[0].mStride || !yLimit.isValid() || yLimit.value() > aBuffer.mPlanes[0].mHeight) { // The specified picture dimensions can't be contained inside the video // frame, we'll stomp memory if we try to copy it. Fail. NS_WARNING("Overflowing picture rect"); return nullptr; } nsAutoPtr<VideoData> v(new VideoData(aOffset, aTime, aDuration, aKeyframe, aTimecode, aInfo.mDisplay)); const YCbCrBuffer::Plane &Y = aBuffer.mPlanes[0]; const YCbCrBuffer::Plane &Cb = aBuffer.mPlanes[1]; const YCbCrBuffer::Plane &Cr = aBuffer.mPlanes[2]; if (!aImage) { // Currently our decoder only knows how to output to PLANAR_YCBCR // format. ImageFormat format[2] = {PLANAR_YCBCR, GRALLOC_PLANAR_YCBCR}; if (IsYV12Format(Y, Cb, Cr)) { v->mImage = aContainer->CreateImage(format, 2); } else { v->mImage = aContainer->CreateImage(format, 1); } } else { v->mImage = aImage; } if (!v->mImage) { return nullptr; } NS_ASSERTION(v->mImage->GetFormat() == PLANAR_YCBCR || v->mImage->GetFormat() == GRALLOC_PLANAR_YCBCR, "Wrong format?"); PlanarYCbCrImage* videoImage = static_cast<PlanarYCbCrImage*>(v->mImage.get()); PlanarYCbCrData data; data.mYChannel = Y.mData + Y.mOffset; data.mYSize = gfxIntSize(Y.mWidth, Y.mHeight); data.mYStride = Y.mStride; data.mYSkip = Y.mSkip; data.mCbChannel = Cb.mData + Cb.mOffset; data.mCrChannel = Cr.mData + Cr.mOffset; data.mCbCrSize = gfxIntSize(Cb.mWidth, Cb.mHeight); data.mCbCrStride = Cb.mStride; data.mCbSkip = Cb.mSkip; data.mCrSkip = Cr.mSkip; data.mPicX = aPicture.x; data.mPicY = aPicture.y; data.mPicSize = gfxIntSize(aPicture.width, aPicture.height); data.mStereoMode = aInfo.mStereoMode; videoImage->SetDelayedConversion(true); if (!aImage) { videoImage->SetData(data); } else { videoImage->SetDataNoCopy(data); } return v.forget(); }
void ImageLayerOGL::RenderLayer(int, const nsIntPoint& aOffset) { nsRefPtr<ImageContainer> container = GetContainer(); if (!container) return; mOGLManager->MakeCurrent(); AutoLockImage autoLock(container); Image *image = autoLock.GetImage(); if (!image) { return; } NS_ASSERTION(image->GetFormat() != Image::REMOTE_IMAGE_BITMAP, "Remote images aren't handled yet in OGL layers!"); NS_ASSERTION(mScaleMode == SCALE_NONE, "Scale modes other than none not handled yet in OGL layers!"); if (image->GetFormat() == Image::PLANAR_YCBCR) { PlanarYCbCrImage *yuvImage = static_cast<PlanarYCbCrImage*>(image); if (!yuvImage->mBufferSize) { return; } PlanarYCbCrOGLBackendData *data = static_cast<PlanarYCbCrOGLBackendData*>(yuvImage->GetBackendData(LayerManager::LAYERS_OPENGL)); if (data && data->mTextures->GetGLContext() != gl()) { // If these textures were allocated by another layer manager, // clear them out and re-allocate below. data = nsnull; yuvImage->SetBackendData(LayerManager::LAYERS_OPENGL, nsnull); } if (!data) { AllocateTexturesYCbCr(yuvImage); data = static_cast<PlanarYCbCrOGLBackendData*>(yuvImage->GetBackendData(LayerManager::LAYERS_OPENGL)); } if (!data || data->mTextures->GetGLContext() != gl()) { // XXX - Can this ever happen? If so I need to fix this! return; } gl()->MakeCurrent(); gl()->fActiveTexture(LOCAL_GL_TEXTURE2); gl()->fBindTexture(LOCAL_GL_TEXTURE_2D, data->mTextures[2].GetTextureID()); gl()->ApplyFilterToBoundTexture(mFilter); gl()->fActiveTexture(LOCAL_GL_TEXTURE1); gl()->fBindTexture(LOCAL_GL_TEXTURE_2D, data->mTextures[1].GetTextureID()); gl()->ApplyFilterToBoundTexture(mFilter); gl()->fActiveTexture(LOCAL_GL_TEXTURE0); gl()->fBindTexture(LOCAL_GL_TEXTURE_2D, data->mTextures[0].GetTextureID()); gl()->ApplyFilterToBoundTexture(mFilter); YCbCrTextureLayerProgram *program = mOGLManager->GetYCbCrLayerProgram(); program->Activate(); program->SetLayerQuadRect(nsIntRect(0, 0, yuvImage->mSize.width, yuvImage->mSize.height)); program->SetLayerTransform(GetEffectiveTransform()); program->SetLayerOpacity(GetEffectiveOpacity()); program->SetRenderOffset(aOffset); program->SetYCbCrTextureUnits(0, 1, 2); mOGLManager->BindAndDrawQuadWithTextureRect(program, yuvImage->mData.GetPictureRect(), nsIntSize(yuvImage->mData.mYSize.width, yuvImage->mData.mYSize.height)); // We shouldn't need to do this, but do it anyway just in case // someone else forgets. gl()->fActiveTexture(LOCAL_GL_TEXTURE0); } else if (image->GetFormat() == Image::CAIRO_SURFACE) { CairoImage *cairoImage = static_cast<CairoImage*>(image); if (!cairoImage->mSurface) { return; } CairoOGLBackendData *data = static_cast<CairoOGLBackendData*>(cairoImage->GetBackendData(LayerManager::LAYERS_OPENGL)); if (data && data->mTexture.GetGLContext() != gl()) { // If this texture was allocated by another layer manager, clear // it out and re-allocate below. data = nsnull; cairoImage->SetBackendData(LayerManager::LAYERS_OPENGL, nsnull); } if (!data) { AllocateTexturesCairo(cairoImage); data = static_cast<CairoOGLBackendData*>(cairoImage->GetBackendData(LayerManager::LAYERS_OPENGL)); } if (!data || data->mTexture.GetGLContext() != gl()) { // XXX - Can this ever happen? If so I need to fix this! return; } gl()->MakeCurrent(); unsigned int iwidth = cairoImage->mSize.width; unsigned int iheight = cairoImage->mSize.height; gl()->fActiveTexture(LOCAL_GL_TEXTURE0); gl()->fBindTexture(LOCAL_GL_TEXTURE_2D, data->mTexture.GetTextureID()); #if defined(MOZ_WIDGET_GTK2) && !defined(MOZ_PLATFORM_MAEMO) GLXPixmap pixmap; if (cairoImage->mSurface) { pixmap = sGLXLibrary.CreatePixmap(cairoImage->mSurface); NS_ASSERTION(pixmap, "Failed to create pixmap!"); if (pixmap) { sGLXLibrary.BindTexImage(pixmap); } } #endif ColorTextureLayerProgram *program = mOGLManager->GetColorTextureLayerProgram(data->mLayerProgram); gl()->ApplyFilterToBoundTexture(mFilter); program->Activate(); // The following uniform controls the scaling of the vertex coords. // Instead of setting the scale here and using coords in the range [0,1], we // set an identity transform and use pixel coordinates below program->SetLayerQuadRect(nsIntRect(0, 0, 1, 1)); program->SetLayerTransform(GetEffectiveTransform()); program->SetLayerOpacity(GetEffectiveOpacity()); program->SetRenderOffset(aOffset); program->SetTextureUnit(0); nsIntRect rect = GetVisibleRegion().GetBounds(); GLContext::RectTriangles triangleBuffer; float tex_offset_u = float(rect.x % iwidth) / iwidth; float tex_offset_v = float(rect.y % iheight) / iheight; triangleBuffer.addRect(rect.x, rect.y, rect.x + rect.width, rect.y + rect.height, tex_offset_u, tex_offset_v, tex_offset_u + float(rect.width) / float(iwidth), tex_offset_v + float(rect.height) / float(iheight)); GLuint vertAttribIndex = program->AttribLocation(LayerProgram::VertexAttrib); GLuint texCoordAttribIndex = program->AttribLocation(LayerProgram::TexCoordAttrib); NS_ASSERTION(texCoordAttribIndex != GLuint(-1), "no texture coords?"); gl()->fBindBuffer(LOCAL_GL_ARRAY_BUFFER, 0); gl()->fVertexAttribPointer(vertAttribIndex, 2, LOCAL_GL_FLOAT, LOCAL_GL_FALSE, 0, triangleBuffer.vertexPointer()); gl()->fVertexAttribPointer(texCoordAttribIndex, 2, LOCAL_GL_FLOAT, LOCAL_GL_FALSE, 0, triangleBuffer.texCoordPointer()); { gl()->fEnableVertexAttribArray(texCoordAttribIndex); { gl()->fEnableVertexAttribArray(vertAttribIndex); gl()->fDrawArrays(LOCAL_GL_TRIANGLES, 0, triangleBuffer.elements()); gl()->fDisableVertexAttribArray(vertAttribIndex); } gl()->fDisableVertexAttribArray(texCoordAttribIndex); } #if defined(MOZ_WIDGET_GTK2) && !defined(MOZ_PLATFORM_MAEMO) if (cairoImage->mSurface && pixmap) { sGLXLibrary.ReleaseTexImage(pixmap); sGLXLibrary.DestroyPixmap(pixmap); } #endif #ifdef XP_MACOSX } else if (image->GetFormat() == Image::MAC_IO_SURFACE) { MacIOSurfaceImage *ioImage = static_cast<MacIOSurfaceImage*>(image); if (!mOGLManager->GetThebesLayerCallback()) { // If its an empty transaction we still need to update // the plugin IO Surface and make sure we grab the // new image ioImage->Update(GetContainer()); image = nsnull; autoLock.Refresh(); image = autoLock.GetImage(); gl()->MakeCurrent(); ioImage = static_cast<MacIOSurfaceImage*>(image); } if (!ioImage) { return; } gl()->fActiveTexture(LOCAL_GL_TEXTURE0); if (!ioImage->GetBackendData(LayerManager::LAYERS_OPENGL)) { AllocateTextureIOSurface(ioImage, gl()); } MacIOSurfaceImageOGLBackendData *data = static_cast<MacIOSurfaceImageOGLBackendData*>(ioImage->GetBackendData(LayerManager::LAYERS_OPENGL)); gl()->fActiveTexture(LOCAL_GL_TEXTURE0); gl()->fBindTexture(LOCAL_GL_TEXTURE_RECTANGLE_ARB, data->mTexture.GetTextureID()); ColorTextureLayerProgram *program = mOGLManager->GetRGBARectLayerProgram(); program->Activate(); if (program->GetTexCoordMultiplierUniformLocation() != -1) { // 2DRect case, get the multiplier right for a sampler2DRect float f[] = { float(ioImage->GetSize().width), float(ioImage->GetSize().height) }; program->SetUniform(program->GetTexCoordMultiplierUniformLocation(), 2, f); } else { NS_ASSERTION(0, "no rects?"); } program->SetLayerQuadRect(nsIntRect(0, 0, ioImage->GetSize().width, ioImage->GetSize().height)); program->SetLayerTransform(GetEffectiveTransform()); program->SetLayerOpacity(GetEffectiveOpacity()); program->SetRenderOffset(aOffset); program->SetTextureUnit(0); mOGLManager->BindAndDrawQuad(program); gl()->fBindTexture(LOCAL_GL_TEXTURE_RECTANGLE_ARB, 0); #endif } GetContainer()->NotifyPaintedImage(image); }
nsresult VP8TrackEncoder::PrepareRawFrame(VideoChunk &aChunk) { RefPtr<Image> img; if (aChunk.mFrame.GetForceBlack() || aChunk.IsNull()) { if (!mMuteFrame) { mMuteFrame = VideoFrame::CreateBlackImage(gfx::IntSize(mFrameWidth, mFrameHeight)); MOZ_ASSERT(mMuteFrame); } img = mMuteFrame; } else { img = aChunk.mFrame.GetImage(); } if (img->GetSize() != IntSize(mFrameWidth, mFrameHeight)) { VP8LOG("Dynamic resolution changes (was %dx%d, now %dx%d) are unsupported\n", mFrameWidth, mFrameHeight, img->GetSize().width, img->GetSize().height); return NS_ERROR_FAILURE; } ImageFormat format = img->GetFormat(); if (format == ImageFormat::PLANAR_YCBCR) { PlanarYCbCrImage* yuv = static_cast<PlanarYCbCrImage *>(img.get()); MOZ_RELEASE_ASSERT(yuv); if (!yuv->IsValid()) { NS_WARNING("PlanarYCbCrImage is not valid"); return NS_ERROR_FAILURE; } const PlanarYCbCrImage::Data *data = yuv->GetData(); if (isYUV420(data) && !data->mCbSkip) { // 420 planar, no need for conversions mVPXImageWrapper->planes[VPX_PLANE_Y] = data->mYChannel; mVPXImageWrapper->planes[VPX_PLANE_U] = data->mCbChannel; mVPXImageWrapper->planes[VPX_PLANE_V] = data->mCrChannel; mVPXImageWrapper->stride[VPX_PLANE_Y] = data->mYStride; mVPXImageWrapper->stride[VPX_PLANE_U] = data->mCbCrStride; mVPXImageWrapper->stride[VPX_PLANE_V] = data->mCbCrStride; return NS_OK; } } // Not 420 planar, have to convert uint32_t yPlaneSize = mFrameWidth * mFrameHeight; uint32_t halfWidth = (mFrameWidth + 1) / 2; uint32_t halfHeight = (mFrameHeight + 1) / 2; uint32_t uvPlaneSize = halfWidth * halfHeight; if (mI420Frame.IsEmpty()) { mI420Frame.SetLength(yPlaneSize + uvPlaneSize * 2); } uint8_t *y = mI420Frame.Elements(); uint8_t *cb = mI420Frame.Elements() + yPlaneSize; uint8_t *cr = mI420Frame.Elements() + yPlaneSize + uvPlaneSize; if (format == ImageFormat::PLANAR_YCBCR) { PlanarYCbCrImage* yuv = static_cast<PlanarYCbCrImage *>(img.get()); MOZ_RELEASE_ASSERT(yuv); if (!yuv->IsValid()) { NS_WARNING("PlanarYCbCrImage is not valid"); return NS_ERROR_FAILURE; } const PlanarYCbCrImage::Data *data = yuv->GetData(); int rv; std::string yuvFormat; if (isYUV420(data) && data->mCbSkip) { // If mCbSkip is set, we assume it's nv12 or nv21. if (data->mCbChannel < data->mCrChannel) { // nv12 rv = libyuv::NV12ToI420(data->mYChannel, data->mYStride, data->mCbChannel, data->mCbCrStride, y, mFrameWidth, cb, halfWidth, cr, halfWidth, mFrameWidth, mFrameHeight); yuvFormat = "NV12"; } else { // nv21 rv = libyuv::NV21ToI420(data->mYChannel, data->mYStride, data->mCrChannel, data->mCbCrStride, y, mFrameWidth, cb, halfWidth, cr, halfWidth, mFrameWidth, mFrameHeight); yuvFormat = "NV21"; } } else if (isYUV444(data) && !data->mCbSkip) { rv = libyuv::I444ToI420(data->mYChannel, data->mYStride, data->mCbChannel, data->mCbCrStride, data->mCrChannel, data->mCbCrStride, y, mFrameWidth, cb, halfWidth, cr, halfWidth, mFrameWidth, mFrameHeight); yuvFormat = "I444"; } else if (isYUV422(data) && !data->mCbSkip) { rv = libyuv::I422ToI420(data->mYChannel, data->mYStride, data->mCbChannel, data->mCbCrStride, data->mCrChannel, data->mCbCrStride, y, mFrameWidth, cb, halfWidth, cr, halfWidth, mFrameWidth, mFrameHeight); yuvFormat = "I422"; } else { VP8LOG("Unsupported planar format\n"); NS_ASSERTION(false, "Unsupported planar format"); return NS_ERROR_NOT_IMPLEMENTED; } if (rv != 0) { VP8LOG("Converting an %s frame to I420 failed\n", yuvFormat.c_str()); return NS_ERROR_FAILURE; } VP8LOG("Converted an %s frame to I420\n"); } else { // Not YCbCr at all. Try to get access to the raw data and convert. RefPtr<SourceSurface> surf = img->GetAsSourceSurface(); if (!surf) { VP8LOG("Getting surface from %s image failed\n", Stringify(format).c_str()); return NS_ERROR_FAILURE; } RefPtr<DataSourceSurface> data = surf->GetDataSurface(); if (!data) { VP8LOG("Getting data surface from %s image with %s (%s) surface failed\n", Stringify(format).c_str(), Stringify(surf->GetType()).c_str(), Stringify(surf->GetFormat()).c_str()); return NS_ERROR_FAILURE; } DataSourceSurface::ScopedMap map(data, DataSourceSurface::READ); if (!map.IsMapped()) { VP8LOG("Reading DataSourceSurface from %s image with %s (%s) surface failed\n", Stringify(format).c_str(), Stringify(surf->GetType()).c_str(), Stringify(surf->GetFormat()).c_str()); return NS_ERROR_FAILURE; } int rv; switch (surf->GetFormat()) { case SurfaceFormat::B8G8R8A8: case SurfaceFormat::B8G8R8X8: rv = libyuv::ARGBToI420(static_cast<uint8*>(map.GetData()), map.GetStride(), y, mFrameWidth, cb, halfWidth, cr, halfWidth, mFrameWidth, mFrameHeight); break; case SurfaceFormat::R5G6B5_UINT16: rv = libyuv::RGB565ToI420(static_cast<uint8*>(map.GetData()), map.GetStride(), y, mFrameWidth, cb, halfWidth, cr, halfWidth, mFrameWidth, mFrameHeight); break; default: VP8LOG("Unsupported SourceSurface format %s\n", Stringify(surf->GetFormat()).c_str()); NS_ASSERTION(false, "Unsupported SourceSurface format"); return NS_ERROR_NOT_IMPLEMENTED; } if (rv != 0) { VP8LOG("%s to I420 conversion failed\n", Stringify(surf->GetFormat()).c_str()); return NS_ERROR_FAILURE; } VP8LOG("Converted a %s frame to I420\n", Stringify(surf->GetFormat()).c_str()); } mVPXImageWrapper->planes[VPX_PLANE_Y] = y; mVPXImageWrapper->planes[VPX_PLANE_U] = cb; mVPXImageWrapper->planes[VPX_PLANE_V] = cr; mVPXImageWrapper->stride[VPX_PLANE_Y] = mFrameWidth; mVPXImageWrapper->stride[VPX_PLANE_U] = halfWidth; mVPXImageWrapper->stride[VPX_PLANE_V] = halfWidth; return NS_OK; }
/* static */ already_AddRefed<TextureClient> ImageClient::CreateTextureClientForImage(Image* aImage, KnowsCompositor* aForwarder) { RefPtr<TextureClient> texture; if (aImage->GetFormat() == ImageFormat::PLANAR_YCBCR) { PlanarYCbCrImage* ycbcr = static_cast<PlanarYCbCrImage*>(aImage); const PlanarYCbCrData* data = ycbcr->GetData(); if (!data) { return nullptr; } texture = TextureClient::CreateForYCbCr(aForwarder, data->mYSize, data->mYStride, data->mCbCrSize, data->mCbCrStride, data->mStereoMode, data->mColorDepth, data->mYUVColorSpace, TextureFlags::DEFAULT); if (!texture) { return nullptr; } TextureClientAutoLock autoLock(texture, OpenMode::OPEN_WRITE_ONLY); if (!autoLock.Succeeded()) { return nullptr; } bool status = UpdateYCbCrTextureClient(texture, *data); MOZ_ASSERT(status); if (!status) { return nullptr; } #ifdef MOZ_WIDGET_ANDROID } else if (aImage->GetFormat() == ImageFormat::SURFACE_TEXTURE) { gfx::IntSize size = aImage->GetSize(); SurfaceTextureImage* typedImage = aImage->AsSurfaceTextureImage(); texture = AndroidSurfaceTextureData::CreateTextureClient( typedImage->GetHandle(), size, typedImage->GetContinuous(), typedImage->GetOriginPos(), aForwarder->GetTextureForwarder(), TextureFlags::DEFAULT); #endif } else { RefPtr<gfx::SourceSurface> surface = aImage->GetAsSourceSurface(); MOZ_ASSERT(surface); texture = TextureClient::CreateForDrawing(aForwarder, surface->GetFormat(), aImage->GetSize(), BackendSelector::Content, TextureFlags::DEFAULT); if (!texture) { return nullptr; } MOZ_ASSERT(texture->CanExposeDrawTarget()); if (!texture->Lock(OpenMode::OPEN_WRITE_ONLY)) { return nullptr; } { // We must not keep a reference to the DrawTarget after it has been unlocked. DrawTarget* dt = texture->BorrowDrawTarget(); if (!dt) { gfxWarning() << "ImageClientSingle::UpdateImage failed in BorrowDrawTarget"; return nullptr; } MOZ_ASSERT(surface.get()); dt->CopySurface(surface, IntRect(IntPoint(), surface->GetSize()), IntPoint()); } texture->Unlock(); } return texture.forget(); }
SharedImage* ImageContainerChild::CreateSharedImageFromData(Image* image) { NS_ABORT_IF_FALSE(InImageBridgeChildThread(), "Should be in ImageBridgeChild thread."); if (!image) { return nullptr; } if (image->GetFormat() == PLANAR_YCBCR ) { PlanarYCbCrImage *planarYCbCrImage = static_cast<PlanarYCbCrImage*>(image); const PlanarYCbCrImage::Data *data = planarYCbCrImage->GetData(); NS_ASSERTION(data, "Must be able to retrieve yuv data from image!"); if (!data) { return nullptr; } SharedMemory::SharedMemoryType shmType = OptimalShmemType(); size_t size = ShmemYCbCrImage::ComputeMinBufferSize(data->mYSize, data->mCbCrSize); Shmem shmem; if (!AllocUnsafeShmem(size, shmType, &shmem)) { return nullptr; } ShmemYCbCrImage::InitializeBufferInfo(shmem.get<uint8_t>(), data->mYSize, data->mCbCrSize); ShmemYCbCrImage shmemImage(shmem); if (!shmemImage.IsValid() || shmem.Size<uint8_t>() < size) { DeallocShmem(shmem); return nullptr; } for (int i = 0; i < data->mYSize.height; i++) { memcpy(shmemImage.GetYData() + i * shmemImage.GetYStride(), data->mYChannel + i * data->mYStride, data->mYSize.width); } for (int i = 0; i < data->mCbCrSize.height; i++) { memcpy(shmemImage.GetCbData() + i * shmemImage.GetCbCrStride(), data->mCbChannel + i * data->mCbCrStride, data->mCbCrSize.width); memcpy(shmemImage.GetCrData() + i * shmemImage.GetCbCrStride(), data->mCrChannel + i * data->mCbCrStride, data->mCbCrSize.width); } ++mActiveImageCount; SharedImage* result = new SharedImage(YCbCrImage(shmem, 0, data->GetPictureRect())); return result; #ifdef MOZ_WIDGET_GONK } else if (image->GetFormat() == GONK_IO_SURFACE) { GonkIOSurfaceImage* gonkImage = static_cast<GonkIOSurfaceImage*>(image); SharedImage* result = new SharedImage(gonkImage->GetSurfaceDescriptor()); return result; } else if (image->GetFormat() == GRALLOC_PLANAR_YCBCR) { GrallocPlanarYCbCrImage* GrallocImage = static_cast<GrallocPlanarYCbCrImage*>(image); SharedImage* result = new SharedImage(GrallocImage->GetSurfaceDescriptor()); return result; #endif } else { NS_RUNTIMEABORT("TODO: Only YCbCrImage is supported here right now."); } return nullptr; }