void MediaEngineGonkVideoSource::RotateImage(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight) { layers::GrallocImage *nativeImage = static_cast<layers::GrallocImage*>(aImage); android::sp<GraphicBuffer> graphicBuffer = nativeImage->GetGraphicBuffer(); void *pMem = nullptr; // Bug 1109957 size will be wrong if width or height are odd uint32_t size = aWidth * aHeight * 3 / 2; MOZ_ASSERT(!(aWidth & 1) && !(aHeight & 1)); graphicBuffer->lock(GraphicBuffer::USAGE_SW_READ_MASK, &pMem); uint8_t* srcPtr = static_cast<uint8_t*>(pMem); // Create a video frame and append it to the track. RefPtr<layers::PlanarYCbCrImage> image = new GonkCameraImage(); uint32_t dstWidth; uint32_t dstHeight; if (mRotation == 90 || mRotation == 270) { dstWidth = aHeight; dstHeight = aWidth; } else { dstWidth = aWidth; dstHeight = aHeight; } uint32_t half_width = dstWidth / 2; MOZ_ASSERT(mTextureClientAllocator); RefPtr<layers::TextureClient> textureClient = mTextureClientAllocator->CreateOrRecycle(gfx::SurfaceFormat::YUV, gfx::IntSize(dstWidth, dstHeight), layers::BackendSelector::Content, layers::TextureFlags::DEFAULT, layers::ALLOC_DISALLOW_BUFFERTEXTURECLIENT); if (textureClient) { android::sp<android::GraphicBuffer> destBuffer = static_cast<layers::GrallocTextureData*>(textureClient->GetInternalData())->GetGraphicBuffer(); void* destMem = nullptr; destBuffer->lock(android::GraphicBuffer::USAGE_SW_WRITE_OFTEN, &destMem); uint8_t* dstPtr = static_cast<uint8_t*>(destMem); int32_t yStride = destBuffer->getStride(); // Align to 16 bytes boundary int32_t uvStride = ((yStride / 2) + 15) & ~0x0F; libyuv::ConvertToI420(srcPtr, size, dstPtr, yStride, dstPtr + (yStride * dstHeight + (uvStride * dstHeight / 2)), uvStride, dstPtr + (yStride * dstHeight), uvStride, 0, 0, graphicBuffer->getStride(), aHeight, aWidth, aHeight, static_cast<libyuv::RotationMode>(mRotation), libyuv::FOURCC_NV21); destBuffer->unlock(); image->AsGrallocImage()->AdoptData(textureClient, gfx::IntSize(dstWidth, dstHeight)); } else { // Handle out of gralloc case. image = mImageContainer->CreatePlanarYCbCrImage(); uint8_t* dstPtr = image->AsPlanarYCbCrImage()->AllocateAndGetNewBuffer(size); libyuv::ConvertToI420(srcPtr, size, dstPtr, dstWidth, dstPtr + (dstWidth * dstHeight), half_width, dstPtr + (dstWidth * dstHeight * 5 / 4), half_width, 0, 0, graphicBuffer->getStride(), aHeight, aWidth, aHeight, static_cast<libyuv::RotationMode>(mRotation), ConvertPixelFormatToFOURCC(graphicBuffer->getPixelFormat())); const uint8_t lumaBpp = 8; const uint8_t chromaBpp = 4; layers::PlanarYCbCrData data; data.mYChannel = dstPtr; data.mYSize = IntSize(dstWidth, dstHeight); data.mYStride = dstWidth * lumaBpp / 8; data.mCbCrStride = dstWidth * chromaBpp / 8; data.mCbChannel = dstPtr + dstHeight * data.mYStride; data.mCrChannel = data.mCbChannel + data.mCbCrStride * (dstHeight / 2); data.mCbCrSize = IntSize(dstWidth / 2, dstHeight / 2); data.mPicX = 0; data.mPicY = 0; data.mPicSize = IntSize(dstWidth, dstHeight); data.mStereoMode = StereoMode::MONO; image->AsPlanarYCbCrImage()->AdoptData(data); } graphicBuffer->unlock(); // Implicitly releases last preview image. mImage = image.forget(); }
void VideoFrameContainer::SetCurrentFrames(const VideoSegment& aSegment) { TRACE(); if (aSegment.IsEmpty()) { return; } MutexAutoLock lock(mMutex); AutoTimer<Telemetry::VFC_SETVIDEOSEGMENT_LOCK_HOLD_MS> lockHold; // Collect any new frames produced in this iteration. AutoTArray<ImageContainer::NonOwningImage, 4> newImages; PrincipalHandle lastPrincipalHandle = PRINCIPAL_HANDLE_NONE; VideoSegment::ConstChunkIterator iter(aSegment); while (!iter.IsEnded()) { VideoChunk chunk = *iter; const VideoFrame* frame = &chunk.mFrame; if (*frame == mLastPlayedVideoFrame) { iter.Next(); continue; } Image* image = frame->GetImage(); CONTAINER_LOG( LogLevel::Verbose, ("VideoFrameContainer %p writing video frame %p (%d x %d)", this, image, frame->GetIntrinsicSize().width, frame->GetIntrinsicSize().height)); if (frame->GetForceBlack()) { if (!mBlackImage) { RefPtr<Image> blackImage = GetImageContainer()->CreatePlanarYCbCrImage(); if (blackImage) { // Sets the image to a single black pixel, which will be scaled to // fill the rendered size. if (SetImageToBlackPixel(blackImage->AsPlanarYCbCrImage())) { mBlackImage = blackImage; } } } if (mBlackImage) { image = mBlackImage; } } // Don't append null image to the newImages. if (!image) { iter.Next(); continue; } newImages.AppendElement( ImageContainer::NonOwningImage(image, chunk.mTimeStamp)); lastPrincipalHandle = chunk.GetPrincipalHandle(); mLastPlayedVideoFrame = *frame; iter.Next(); } // Don't update if there are no changes. if (newImages.IsEmpty()) { return; } AutoTArray<ImageContainer::NonOwningImage, 4> images; bool principalHandleChanged = lastPrincipalHandle != PRINCIPAL_HANDLE_NONE && lastPrincipalHandle != GetLastPrincipalHandleLocked(); // Add the frames from this iteration. for (auto& image : newImages) { image.mFrameID = NewFrameID(); images.AppendElement(image); } if (principalHandleChanged) { UpdatePrincipalHandleForFrameIDLocked(lastPrincipalHandle, newImages.LastElement().mFrameID); } SetCurrentFramesLocked(mLastPlayedVideoFrame.GetIntrinsicSize(), images); nsCOMPtr<nsIRunnable> event = new VideoFrameContainerInvalidateRunnable(this); mMainThread->Dispatch(event.forget()); images.ClearAndRetainStorage(); }