nsresult
NS_NewByteInputStream(nsIInputStream** aStreamResult,
                      const char* aStringToRead, int32_t aLength,
                      nsAssignmentType aAssignment)
{
  NS_PRECONDITION(aStreamResult, "null out ptr");

  RefPtr<nsStringInputStream> stream = new nsStringInputStream();

  nsresult rv;
  switch (aAssignment) {
    case NS_ASSIGNMENT_COPY:
      rv = stream->SetData(aStringToRead, aLength);
      break;
    case NS_ASSIGNMENT_DEPEND:
      rv = stream->ShareData(aStringToRead, aLength);
      break;
    case NS_ASSIGNMENT_ADOPT:
      rv = stream->AdoptData(const_cast<char*>(aStringToRead), aLength);
      break;
    default:
      NS_ERROR("invalid assignment type");
      rv = NS_ERROR_INVALID_ARG;
  }

  if (NS_FAILED(rv)) {
    return rv;
  }

  stream.forget(aStreamResult);
  return NS_OK;
}
Example #2
0
/* static */
already_AddRefed<VideoData>
VideoData::Create(const VideoInfo& aInfo,
                  ImageContainer* aContainer,
                  int64_t aOffset,
                  int64_t aTime,
                  int64_t aDuration,
                  mozilla::layers::TextureClient* aBuffer,
                  bool aKeyframe,
                  int64_t aTimecode,
                  const IntRect& aPicture)
{
  if (!aContainer) {
    // Create a dummy VideoData with no image. This gives us something to
    // send to media streams if necessary.
    RefPtr<VideoData> v(new VideoData(aOffset,
                                        aTime,
                                        aDuration,
                                        aKeyframe,
                                        aTimecode,
                                        aInfo.mDisplay,
                                        0));
    return v.forget();
  }

  // The following situations could be triggered by invalid input
  if (aPicture.width <= 0 || aPicture.height <= 0) {
    NS_WARNING("Empty picture rect");
    return nullptr;
  }

  // Ensure the picture size specified in the headers can be extracted out of
  // the frame we've been supplied without indexing out of bounds.
  CheckedUint32 xLimit = aPicture.x + CheckedUint32(aPicture.width);
  CheckedUint32 yLimit = aPicture.y + CheckedUint32(aPicture.height);
  if (!xLimit.isValid() || !yLimit.isValid())
  {
    // The specified picture dimensions can't be contained inside the video
    // frame, we'll stomp memory if we try to copy it. Fail.
    NS_WARNING("Overflowing picture rect");
    return nullptr;
  }

  RefPtr<VideoData> v(new VideoData(aOffset,
                                      aTime,
                                      aDuration,
                                      aKeyframe,
                                      aTimecode,
                                      aInfo.mDisplay,
                                      0));

  RefPtr<layers::GrallocImage> image = new layers::GrallocImage();
  image->AdoptData(aBuffer, aPicture.Size());
  v->mImage = image;

  return v.forget();
}
static MediaDataEncoder::EncodedData Encode(
    const RefPtr<MediaDataEncoder> aEncoder, const size_t aNumFrames,
    const layers::PlanarYCbCrData& aYCbCrData) {
  MediaDataEncoder::EncodedData output;
  bool succeeded;
  for (size_t i = 0; i < aNumFrames; i++) {
    RefPtr<layers::PlanarYCbCrImage> img =
        new layers::RecyclingPlanarYCbCrImage(new layers::BufferRecycleBin());
    img->AdoptData(aYCbCrData);
    RefPtr<MediaData> frame = VideoData::CreateFromImage(
        kImageSize, 0, TimeUnit::FromMicroseconds(i * 30000),
        TimeUnit::FromMicroseconds(30000), img, (i & 0xF) == 0,
        TimeUnit::FromMicroseconds(i * 30000));
    media::Await(GetMediaThreadPool(MediaThreadType::PLAYBACK),
                 aEncoder->Encode(frame),
                 [&output, &succeeded](MediaDataEncoder::EncodedData encoded) {
                   output.AppendElements(std::move(encoded));
                   succeeded = true;
                 },
                 [&succeeded](MediaResult r) { succeeded = false; });
    EXPECT_TRUE(succeeded);
    if (!succeeded) {
      return output;
    }
  }

  size_t pending = 0;
  media::Await(
      GetMediaThreadPool(MediaThreadType::PLAYBACK), aEncoder->Drain(),
      [&pending, &output, &succeeded](MediaDataEncoder::EncodedData encoded) {
        pending = encoded.Length();
        output.AppendElements(std::move(encoded));
        succeeded = true;
      },
      [&succeeded](MediaResult r) { succeeded = false; });
  EXPECT_TRUE(succeeded);
  if (!succeeded) {
    return output;
  }

  if (pending > 0) {
    media::Await(GetMediaThreadPool(MediaThreadType::PLAYBACK),
                 aEncoder->Drain(),
                 [&succeeded](MediaDataEncoder::EncodedData encoded) {
                   EXPECT_EQ(encoded.Length(), 0UL);
                   succeeded = true;
                 },
                 [&succeeded](MediaResult r) { succeeded = false; });
    EXPECT_TRUE(succeeded);
  }

  return output;
}
uint8_t *
AndroidMediaReader::ImageBufferCallback::CreateI420Image(size_t aWidth,
                                                         size_t aHeight)
{
  RefPtr<PlanarYCbCrImage> yuvImage = mImageContainer->CreatePlanarYCbCrImage();
  mImage = yuvImage;

  if (!yuvImage) {
    NS_WARNING("Could not create I420 image");
    return nullptr;
  }

  size_t frameSize = aWidth * aHeight;

  // Allocate enough for one full resolution Y plane
  // and two quarter resolution Cb/Cr planes.
  uint8_t *buffer = yuvImage->AllocateAndGetNewBuffer(frameSize * 3 / 2);

  mozilla::layers::PlanarYCbCrData frameDesc;

  frameDesc.mYChannel = buffer;
  frameDesc.mCbChannel = buffer + frameSize;
  frameDesc.mCrChannel = buffer + frameSize * 5 / 4;

  frameDesc.mYSize = IntSize(aWidth, aHeight);
  frameDesc.mCbCrSize = IntSize(aWidth / 2, aHeight / 2);

  frameDesc.mYStride = aWidth;
  frameDesc.mCbCrStride = aWidth / 2;

  frameDesc.mYSkip = 0;
  frameDesc.mCbSkip = 0;
  frameDesc.mCrSkip = 0;

  frameDesc.mPicX = 0;
  frameDesc.mPicY = 0;
  frameDesc.mPicSize = IntSize(aWidth, aHeight);

  yuvImage->AdoptData(frameDesc);

  return buffer;
}