/* static */ already_AddRefed<Image> VideoFrame::CreateBlackImage(const gfx::IntSize& aSize) { RefPtr<ImageContainer> container; RefPtr<Image> image; container = LayerManager::CreateImageContainer(); image = container->CreateImage(ImageFormat::PLANAR_YCBCR); if (!image) { MOZ_ASSERT(false); return nullptr; } int len = ((aSize.width * aSize.height) * 3 / 2); PlanarYCbCrImage* planar = static_cast<PlanarYCbCrImage*>(image.get()); // Generate a black image. ScopedDeletePtr<uint8_t> frame(new uint8_t[len]); int y = aSize.width * aSize.height; // Fill Y plane. memset(frame.rwget(), 0x10, y); // Fill Cb/Cr planes. memset(frame.rwget() + y, 0x80, (len - y)); const uint8_t lumaBpp = 8; const uint8_t chromaBpp = 4; layers::PlanarYCbCrData data; data.mYChannel = frame.rwget(); data.mYSize = gfx::IntSize(aSize.width, aSize.height); data.mYStride = (int32_t) (aSize.width * lumaBpp / 8.0); data.mCbCrStride = (int32_t) (aSize.width * chromaBpp / 8.0); data.mCbChannel = frame.rwget() + aSize.height * data.mYStride; data.mCrChannel = data.mCbChannel + aSize.height * data.mCbCrStride / 2; data.mCbCrSize = gfx::IntSize(aSize.width / 2, aSize.height / 2); data.mPicX = 0; data.mPicY = 0; data.mPicSize = gfx::IntSize(aSize.width, aSize.height); data.mStereoMode = StereoMode::MONO; // SetData copies data, so we can free data. if (!planar->SetData(data)) { MOZ_ASSERT(false); return nullptr; } return image.forget(); }
Image *CreateI420Image() { PlanarYCbCrImage *image = new RecyclingPlanarYCbCrImage(new BufferRecycleBin()); PlanarYCbCrData data; data.mPicSize = mImageSize; const uint32_t yPlaneSize = mImageSize.width * mImageSize.height; const uint32_t halfWidth = (mImageSize.width + 1) / 2; const uint32_t halfHeight = (mImageSize.height + 1) / 2; const uint32_t uvPlaneSize = halfWidth * halfHeight; // Y plane. uint8_t *y = mSourceBuffer.Elements(); data.mYChannel = y; data.mYSize.width = mImageSize.width; data.mYSize.height = mImageSize.height; data.mYStride = mImageSize.width; data.mYSkip = 0; // Cr plane. uint8_t *cr = y + yPlaneSize + uvPlaneSize; data.mCrChannel = cr; data.mCrSkip = 0; // Cb plane uint8_t *cb = y + yPlaneSize; data.mCbChannel = cb; data.mCbSkip = 0; // CrCb plane vectors. data.mCbCrStride = halfWidth; data.mCbCrSize.width = halfWidth; data.mCbCrSize.height = halfHeight; image->SetData(data); return image; }
VideoData* VideoData::Create(VideoInfo& aInfo, ImageContainer* aContainer, int64_t aOffset, int64_t aTime, int64_t aEndTime, const YCbCrBuffer& aBuffer, bool aKeyframe, int64_t aTimecode, nsIntRect aPicture) { if (!aContainer) { // Create a dummy VideoData with no image. This gives us something to // send to media streams if necessary. nsAutoPtr<VideoData> v(new VideoData(aOffset, aTime, aEndTime, aKeyframe, aTimecode, aInfo.mDisplay)); return v.forget(); } // The following situation should never happen unless there is a bug // in the decoder if (aBuffer.mPlanes[1].mWidth != aBuffer.mPlanes[2].mWidth || aBuffer.mPlanes[1].mHeight != aBuffer.mPlanes[2].mHeight) { NS_ERROR("C planes with different sizes"); return nullptr; } // The following situations could be triggered by invalid input if (aPicture.width <= 0 || aPicture.height <= 0) { NS_WARNING("Empty picture rect"); return nullptr; } if (!ValidatePlane(aBuffer.mPlanes[0]) || !ValidatePlane(aBuffer.mPlanes[1]) || !ValidatePlane(aBuffer.mPlanes[2])) { NS_WARNING("Invalid plane size"); return nullptr; } // Ensure the picture size specified in the headers can be extracted out of // the frame we've been supplied without indexing out of bounds. CheckedUint32 xLimit = aPicture.x + CheckedUint32(aPicture.width); CheckedUint32 yLimit = aPicture.y + CheckedUint32(aPicture.height); if (!xLimit.isValid() || xLimit.value() > aBuffer.mPlanes[0].mStride || !yLimit.isValid() || yLimit.value() > aBuffer.mPlanes[0].mHeight) { // The specified picture dimensions can't be contained inside the video // frame, we'll stomp memory if we try to copy it. Fail. NS_WARNING("Overflowing picture rect"); return nullptr; } nsAutoPtr<VideoData> v(new VideoData(aOffset, aTime, aEndTime, aKeyframe, aTimecode, aInfo.mDisplay)); const YCbCrBuffer::Plane &Y = aBuffer.mPlanes[0]; const YCbCrBuffer::Plane &Cb = aBuffer.mPlanes[1]; const YCbCrBuffer::Plane &Cr = aBuffer.mPlanes[2]; // Currently our decoder only knows how to output to PLANAR_YCBCR // format. ImageFormat format[2] = {PLANAR_YCBCR, GRALLOC_PLANAR_YCBCR}; if (IsYV12Format(Y, Cb, Cr)) { v->mImage = aContainer->CreateImage(format, 2); } else { v->mImage = aContainer->CreateImage(format, 1); } if (!v->mImage) { return nullptr; } NS_ASSERTION(v->mImage->GetFormat() == PLANAR_YCBCR || v->mImage->GetFormat() == GRALLOC_PLANAR_YCBCR, "Wrong format?"); PlanarYCbCrImage* videoImage = static_cast<PlanarYCbCrImage*>(v->mImage.get()); PlanarYCbCrImage::Data data; data.mYChannel = Y.mData + Y.mOffset; data.mYSize = gfxIntSize(Y.mWidth, Y.mHeight); data.mYStride = Y.mStride; data.mYSkip = Y.mSkip; data.mCbChannel = Cb.mData + Cb.mOffset; data.mCrChannel = Cr.mData + Cr.mOffset; data.mCbCrSize = gfxIntSize(Cb.mWidth, Cb.mHeight); data.mCbCrStride = Cb.mStride; data.mCbSkip = Cb.mSkip; data.mCrSkip = Cr.mSkip; data.mPicX = aPicture.x; data.mPicY = aPicture.y; data.mPicSize = gfxIntSize(aPicture.width, aPicture.height); data.mStereoMode = aInfo.mStereoMode; videoImage->SetDelayedConversion(true); videoImage->SetData(data); return v.forget(); }
VideoData* VideoData::Create(nsVideoInfo& aInfo, ImageContainer* aContainer, PRInt64 aOffset, PRInt64 aTime, PRInt64 aEndTime, const YCbCrBuffer& aBuffer, bool aKeyframe, PRInt64 aTimecode, nsIntRect aPicture) { if (!aContainer) { return nsnull; } // The following situation should never happen unless there is a bug // in the decoder if (aBuffer.mPlanes[1].mWidth != aBuffer.mPlanes[2].mWidth || aBuffer.mPlanes[1].mHeight != aBuffer.mPlanes[2].mHeight) { NS_ERROR("C planes with different sizes"); return nsnull; } // The following situations could be triggered by invalid input if (aPicture.width <= 0 || aPicture.height <= 0) { NS_WARNING("Empty picture rect"); return nsnull; } if (!ValidatePlane(aBuffer.mPlanes[0]) || !ValidatePlane(aBuffer.mPlanes[1]) || !ValidatePlane(aBuffer.mPlanes[2])) { NS_WARNING("Invalid plane size"); return nsnull; } // Ensure the picture size specified in the headers can be extracted out of // the frame we've been supplied without indexing out of bounds. PRUint32 xLimit; PRUint32 yLimit; if (!AddOverflow32(aPicture.x, aPicture.width, xLimit) || xLimit > aBuffer.mPlanes[0].mStride || !AddOverflow32(aPicture.y, aPicture.height, yLimit) || yLimit > aBuffer.mPlanes[0].mHeight) { // The specified picture dimensions can't be contained inside the video // frame, we'll stomp memory if we try to copy it. Fail. NS_WARNING("Overflowing picture rect"); return nsnull; } nsAutoPtr<VideoData> v(new VideoData(aOffset, aTime, aEndTime, aKeyframe, aTimecode, aInfo.mDisplay)); // Currently our decoder only knows how to output to PLANAR_YCBCR // format. Image::Format format = Image::PLANAR_YCBCR; v->mImage = aContainer->CreateImage(&format, 1); if (!v->mImage) { return nsnull; } NS_ASSERTION(v->mImage->GetFormat() == Image::PLANAR_YCBCR, "Wrong format?"); PlanarYCbCrImage* videoImage = static_cast<PlanarYCbCrImage*>(v->mImage.get()); PlanarYCbCrImage::Data data; data.mYChannel = aBuffer.mPlanes[0].mData; data.mYSize = gfxIntSize(aBuffer.mPlanes[0].mWidth, aBuffer.mPlanes[0].mHeight); data.mYStride = aBuffer.mPlanes[0].mStride; data.mCbChannel = aBuffer.mPlanes[1].mData; data.mCrChannel = aBuffer.mPlanes[2].mData; data.mCbCrSize = gfxIntSize(aBuffer.mPlanes[1].mWidth, aBuffer.mPlanes[1].mHeight); data.mCbCrStride = aBuffer.mPlanes[1].mStride; data.mPicX = aPicture.x; data.mPicY = aPicture.y; data.mPicSize = gfxIntSize(aPicture.width, aPicture.height); data.mStereoMode = aInfo.mStereoMode; videoImage->SetData(data); // Copies buffer return v.forget(); }
void GonkCameraPreview::ReceiveFrame(PRUint8 *aData, PRUint32 aLength) { DOM_CAMERA_LOGI("%s:%d : this=%p\n", __func__, __LINE__, this); if (mInput->HaveEnoughBuffered(TRACK_VIDEO)) { if (mDiscardedFrameCount == 0) { DOM_CAMERA_LOGI("mInput has enough data buffered, starting to discard\n"); } ++mDiscardedFrameCount; return; } else if (mDiscardedFrameCount) { DOM_CAMERA_LOGI("mInput needs more data again; discarded %d frames in a row\n", mDiscardedFrameCount); mDiscardedFrameCount = 0; } switch (mFormat) { case GonkCameraHardware::PREVIEW_FORMAT_YUV420SP: { // de-interlace the u and v planes uint8_t* y = aData; uint32_t yN = mWidth * mHeight; NS_ASSERTION((yN & 0x3) == 0, "Invalid image dimensions!"); uint32_t uvN = yN / 4; uint32_t* src = (uint32_t*)( y + yN ); uint32_t* d = new uint32_t[ uvN / 2 ]; uint32_t* u = d; uint32_t* v = u + uvN / 4; // we're handling pairs of 32-bit words, so divide by 8 NS_ASSERTION((uvN & 0x7) == 0, "Invalid image dimensions!"); uvN /= 8; while (uvN--) { uint32_t src0 = *src++; uint32_t src1 = *src++; uint32_t u0; uint32_t v0; uint32_t u1; uint32_t v1; DEINTERLACE( u0, v0, src0, src1 ); src0 = *src++; src1 = *src++; DEINTERLACE( u1, v1, src0, src1 ); *u++ = u0; *u++ = u1; *v++ = v0; *v++ = v1; } memcpy(y + yN, d, yN / 2); delete[] d; } break; case GonkCameraHardware::PREVIEW_FORMAT_YUV420P: // no transformating required break; default: // in a format we don't handle, get out of here return; } Image::Format format = Image::PLANAR_YCBCR; nsRefPtr<Image> image = mImageContainer->CreateImage(&format, 1); image->AddRef(); PlanarYCbCrImage* videoImage = static_cast<PlanarYCbCrImage*>(image.get()); /** * If you change either lumaBpp or chromaBpp, make sure the * assertions below still hold. */ const PRUint8 lumaBpp = 8; const PRUint8 chromaBpp = 4; PlanarYCbCrImage::Data data; data.mYChannel = aData; data.mYSize = gfxIntSize(mWidth, mHeight); data.mYStride = mWidth * lumaBpp; NS_ASSERTION((data.mYStride & 0x7) == 0, "Invalid image dimensions!"); data.mYStride /= 8; data.mCbCrStride = mWidth * chromaBpp; NS_ASSERTION((data.mCbCrStride & 0x7) == 0, "Invalid image dimensions!"); data.mCbCrStride /= 8; data.mCbChannel = aData + mHeight * data.mYStride; data.mCrChannel = data.mCbChannel + mHeight * data.mCbCrStride / 2; data.mCbCrSize = gfxIntSize(mWidth / 2, mHeight / 2); data.mPicX = 0; data.mPicY = 0; data.mPicSize = gfxIntSize(mWidth, mHeight); data.mStereoMode = mozilla::layers::STEREO_MODE_MONO; videoImage->SetData(data); // copies buffer mVideoSegment.AppendFrame(videoImage, 1, gfxIntSize(mWidth, mHeight)); mInput->AppendToTrack(TRACK_VIDEO, &mVideoSegment); mFrameCount += 1; if ((mFrameCount % 10) == 0) { DOM_CAMERA_LOGI("%s:%d : mFrameCount = %d\n", __func__, __LINE__, mFrameCount); } }