void
MediaEngineTabVideoSource::
NotifyPull(MediaStreamGraph*, SourceMediaStream* aSource, mozilla::TrackID aID, mozilla::StreamTime aDesiredTime, mozilla::TrackTicks& aLastEndTime)
{
  VideoSegment segment;
  MonitorAutoLock mon(mMonitor);

  // Note: we're not giving up mImage here
  nsRefPtr<layers::CairoImage> image = mImage;
  TrackTicks target = TimeToTicksRoundUp(USECS_PER_S, aDesiredTime);
  TrackTicks delta = target - aLastEndTime;
  if (delta > 0) {
    // nullptr images are allowed
    if (image) {
      gfx::IntSize size = image->GetSize();
      segment.AppendFrame(image.forget(), delta, gfx::ThebesIntSize(size));
    } else {
      segment.AppendFrame(nullptr, delta, gfxIntSize(0,0));
    }
    // This can fail if either a) we haven't added the track yet, or b)
    // we've removed or finished the track.
    if (aSource->AppendToTrack(aID, &(segment))) {
      aLastEndTime = target;
    }
  }
}
void
MediaEngineDefaultVideoSource::NotifyPull(MediaStreamGraph* aGraph,
                                          SourceMediaStream *aSource,
                                          TrackID aID,
                                          StreamTime aDesiredTime,
                                          TrackTicks &aLastEndTime)
{
  // AddTrack takes ownership of segment
  VideoSegment segment;
  MonitorAutoLock lock(mMonitor);
  if (mState != kStarted) {
    return;
  }

  // Note: we're not giving up mImage here
  nsRefPtr<layers::Image> image = mImage;
  TrackTicks target = TimeToTicksRoundUp(USECS_PER_S, aDesiredTime);
  TrackTicks delta = target - aLastEndTime;

  if (delta > 0) {
    // nullptr images are allowed
    if (image) {
      segment.AppendFrame(image.forget(), delta,
                          IntSize(mOpts.mWidth, mOpts.mHeight));
    } else {
      segment.AppendFrame(nullptr, delta, IntSize(0, 0));
    }
    // This can fail if either a) we haven't added the track yet, or b)
    // we've removed or finished the track.
    if (aSource->AppendToTrack(aID, &segment)) {
      aLastEndTime = target;
    }
  }
}
Exemple #3
0
// Test encoding a track that starts with null data
TEST(VP8VideoTrackEncoder, NullFrameFirst)
{
  // Initiate VP8 encoder
  TestVP8TrackEncoder encoder;
  InitParam param = {true, 640, 480};
  encoder.TestInit(param);
  YUVBufferGenerator generator;
  generator.Init(mozilla::gfx::IntSize(640, 480));
  RefPtr<Image> image = generator.GenerateI420Image();
  TimeStamp now = TimeStamp::Now();
  VideoSegment segment;

  // Pass 2 100ms null frames to the encoder.
  for (uint32_t i = 0; i < 2; ++i) {
    segment.AppendFrame(nullptr,
                        mozilla::StreamTime(9000), // 100ms
                        generator.GetSize(),
                        PRINCIPAL_HANDLE_NONE,
                        false,
                        now + TimeDuration::FromSeconds(i * 0.1));
  }

  // Pass a real 100ms frame to the encoder.
  segment.AppendFrame(image.forget(),
                      mozilla::StreamTime(9000), // 100ms
                      generator.GetSize(),
                      PRINCIPAL_HANDLE_NONE,
                      false,
                      now + TimeDuration::FromSeconds(0.3));

  encoder.SetCurrentFrames(segment);

  // End the track.
  segment.Clear();
  encoder.NotifyQueuedTrackChanges(nullptr, 0, 0, TrackEventCommand::TRACK_EVENT_ENDED, segment);

  EncodedFrameContainer container;
  ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));

  EXPECT_TRUE(encoder.IsEncodingComplete());

  // Verify total duration being 0.3s.
  uint64_t totalDuration = 0;
  for (auto& frame : container.GetEncodedFrames()) {
    totalDuration += frame->GetDuration();
  }
  const uint64_t pointThree = (PR_USEC_PER_SEC / 10) * 3;
  EXPECT_EQ(pointThree, totalDuration);
}
TEST(VP8VideoTrackEncoder, FrameEncode)
{
  // Initiate VP8 encoder
  TestVP8TrackEncoder encoder;
  InitParam param = {true, 640, 480, 90000};
  encoder.TestInit(param);

  // Create YUV images as source.
  nsTArray<RefPtr<Image>> images;
  YUVBufferGenerator generator;
  generator.Init(mozilla::gfx::IntSize(640, 480));
  generator.Generate(images);

  // Put generated YUV frame into video segment.
  // Duration of each frame is 1 second.
  VideoSegment segment;
  for (nsTArray<RefPtr<Image>>::size_type i = 0; i < images.Length(); i++)
  {
    RefPtr<Image> image = images[i];
    segment.AppendFrame(image.forget(), mozilla::StreamTime(90000), generator.GetSize());
  }

  // track change notification.
  encoder.NotifyQueuedTrackChanges(nullptr, 0, 0, 0, segment);

  // Pull Encoded Data back from encoder.
  EncodedFrameContainer container;
  EXPECT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
}
void
MediaEngineDefaultVideoSource::NotifyPull(MediaStreamGraph* aGraph,
                                          SourceMediaStream *aSource,
                                          TrackID aID,
                                          StreamTime aDesiredTime,
                                          const PrincipalHandle& aPrincipalHandle)
{
  // AddTrack takes ownership of segment
  VideoSegment segment;
  MonitorAutoLock lock(mMonitor);
  if (mState != kStarted) {
    return;
  }

  // Note: we're not giving up mImage here
  RefPtr<layers::Image> image = mImage;
  StreamTime delta = aDesiredTime - aSource->GetEndOfAppendedData(aID);

  if (delta > 0) {
    // nullptr images are allowed
    IntSize size(image ? mOpts.mWidth : 0, image ? mOpts.mHeight : 0);
    segment.AppendFrame(image.forget(), delta, size, aPrincipalHandle);
    // This can fail if either a) we haven't added the track yet, or b)
    // we've removed or finished the track.
    aSource->AppendToTrack(aID, &segment);
  }
}
void
DOMHwMediaStream::Init(MediaStream* stream)
{
  SourceMediaStream* srcStream = stream->AsSourceStream();

  if (srcStream) {
    VideoSegment segment;
#ifdef MOZ_WIDGET_GONK
    const StreamTime delta = STREAM_TIME_MAX; // Because MediaStreamGraph will run out frames in non-autoplay mode,
                                              // we must give it bigger frame length to cover this situation.
    mImageData.mOverlayId = DEFAULT_IMAGE_ID;
    mImageData.mSize.width = DEFAULT_IMAGE_WIDTH;
    mImageData.mSize.height = DEFAULT_IMAGE_HEIGHT;
    mOverlayImage->SetData(mImageData);

    RefPtr<Image> image = static_cast<Image*>(mOverlayImage.get());
    mozilla::gfx::IntSize size = image->GetSize();

    segment.AppendFrame(image.forget(), delta, size);
#endif
    srcStream->AddTrack(TRACK_VIDEO_PRIMARY, 0, new VideoSegment());
    srcStream->AppendToTrack(TRACK_VIDEO_PRIMARY, &segment);
    srcStream->FinishAddTracks();
    srcStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
  }
}
void
MediaEngineTabVideoSource::NotifyPull(MediaStreamGraph*,
                                      SourceMediaStream* aSource,
                                      TrackID aID, StreamTime aDesiredTime,
                                      const PrincipalHandle& aPrincipalHandle)
{
  VideoSegment segment;
  MonitorAutoLock mon(mMonitor);
  if (mState != kStarted) {
    return;
  }

  // Note: we're not giving up mImage here
  RefPtr<layers::SourceSurfaceImage> image = mImage;
  StreamTime delta = aDesiredTime - aSource->GetEndOfAppendedData(aID);
  if (delta > 0) {
    // nullptr images are allowed
    gfx::IntSize size = image ? image->GetSize() : IntSize(0, 0);
    segment.AppendFrame(image.forget().downcast<layers::Image>(), delta, size,
                        aPrincipalHandle);
    // This can fail if either a) we haven't added the track yet, or b)
    // we've removed or finished the track.
    aSource->AppendToTrack(aID, &(segment));
  }
}
void
MediaEngineDefaultVideoSource::NotifyPull(MediaStreamGraph* aGraph,
                                          SourceMediaStream *aSource,
                                          TrackID aID,
                                          StreamTime aDesiredTime)
{
  // AddTrack takes ownership of segment
  VideoSegment segment;
  MonitorAutoLock lock(mMonitor);
  if (mState != kStarted) {
    return;
  }

  // Note: we're not giving up mImage here
  RefPtr<layers::Image> image = mImage;
  StreamTime delta = aDesiredTime - aSource->GetEndOfAppendedData(aID);

  if (delta > 0) {
    // nullptr images are allowed
    IntSize size(image ? mOpts.mWidth : 0, image ? mOpts.mHeight : 0);
    segment.AppendFrame(image.forget(), delta, size);
    // This can fail if either a) we haven't added the track yet, or b)
    // we've removed or finished the track.
    aSource->AppendToTrack(aID, &segment);
    // Generate null data for fake tracks.
    if (mHasFakeTracks) {
      for (int i = 0; i < kFakeVideoTrackCount; ++i) {
        VideoSegment nullSegment;
        nullSegment.AppendNullData(delta);
        aSource->AppendToTrack(kTrackCount + i, &nullSegment);
      }
    }
  }
}
// Called if the graph thinks it's running out of buffered video; repeat
// the last frame for whatever minimum period it think it needs.  Note that
// this means that no *real* frame can be inserted during this period.
void
MediaEngineWebRTCVideoSource::NotifyPull(MediaStreamGraph* aGraph,
                                         SourceMediaStream *aSource,
                                         TrackID aID,
                                         StreamTime aDesiredTime,
                                         TrackTicks &aLastEndTime)
{
  VideoSegment segment;

  MonitorAutoLock lock(mMonitor);
  if (mState != kStarted)
    return;

  // Note: we're not giving up mImage here
  nsRefPtr<layers::Image> image = mImage;
  TrackTicks target = TimeToTicksRoundUp(USECS_PER_S, aDesiredTime);
  TrackTicks delta = target - aLastEndTime;
  LOGFRAME(("NotifyPull, desired = %ld, target = %ld, delta = %ld %s", (int64_t) aDesiredTime, 
            (int64_t) target, (int64_t) delta, image ? "" : "<null>"));

  // Bug 846188 We may want to limit incoming frames to the requested frame rate
  // mFps - if you want 30FPS, and the camera gives you 60FPS, this could
  // cause issues.
  // We may want to signal if the actual frame rate is below mMinFPS -
  // cameras often don't return the requested frame rate especially in low
  // light; we should consider surfacing this so that we can switch to a
  // lower resolution (which may up the frame rate)

  // Don't append if we've already provided a frame that supposedly goes past the current aDesiredTime
  // Doing so means a negative delta and thus messes up handling of the graph
  if (delta > 0) {
    // nullptr images are allowed
    if (image) {
      segment.AppendFrame(image.forget(), delta, gfxIntSize(mWidth, mHeight));
    } else {
      segment.AppendFrame(nullptr, delta, gfxIntSize(0,0));
    }
    // This can fail if either a) we haven't added the track yet, or b)
    // we've removed or finished the track.
    if (aSource->AppendToTrack(aID, &(segment))) {
      aLastEndTime = target;
    }
  }
}
NS_IMETHODIMP
MediaEngineDefaultVideoSource::Notify(nsITimer* aTimer)
{
    VideoSegment segment;

    nsRefPtr<layers::PlanarYCbCrImage> image = mImage;
    segment.AppendFrame(image.forget(), USECS_PER_S / FPS, gfxIntSize(WIDTH, HEIGHT));
    mSource->AppendToTrack(mTrackID, &segment);

    return NS_OK;
}
// ViEExternalRenderer Callback. Process every incoming frame here.
int
MediaEngineWebRTCVideoSource::DeliverFrame(
   unsigned char* buffer, int size, uint32_t time_stamp, int64_t render_time)
{
  ReentrantMonitorAutoEnter enter(mMonitor);

  if (mInSnapshotMode) {
    // Set the condition variable to false and notify Snapshot().
    PR_Lock(mSnapshotLock);
    mInSnapshotMode = false;
    PR_NotifyCondVar(mSnapshotCondVar);
    PR_Unlock(mSnapshotLock);
    return 0;
  }

  // Check for proper state.
  if (mState != kStarted) {
    return 0;
  }

  // Create a video frame and append it to the track.
  layers::Image::Format format = layers::Image::PLANAR_YCBCR;
  nsRefPtr<layers::Image> image = mImageContainer->CreateImage(&format, 1);

  layers::PlanarYCbCrImage* videoImage = static_cast<layers::PlanarYCbCrImage*>(image.get());

  PRUint8* frame = static_cast<PRUint8*> (buffer);
  const PRUint8 lumaBpp = 8;
  const PRUint8 chromaBpp = 4;

  layers::PlanarYCbCrImage::Data data;
  data.mYChannel = frame;
  data.mYSize = gfxIntSize(mWidth, mHeight);
  data.mYStride = mWidth * lumaBpp/ 8;
  data.mCbCrStride = mWidth * chromaBpp / 8;
  data.mCbChannel = frame + mHeight * data.mYStride;
  data.mCrChannel = data.mCbChannel + mHeight * data.mCbCrStride / 2;
  data.mCbCrSize = gfxIntSize(mWidth/ 2, mHeight/ 2);
  data.mPicX = 0;
  data.mPicY = 0;
  data.mPicSize = gfxIntSize(mWidth, mHeight);
  data.mStereoMode = layers::STEREO_MODE_MONO;

  videoImage->SetData(data);

  VideoSegment segment;
  segment.AppendFrame(image.forget(), 1, gfxIntSize(mWidth, mHeight));
  mSource->AppendToTrack(mTrackID, &(segment));
  return 0;
}
TEST(VideoSegment, TestAppendFrameNotForceBlack) {
  RefPtr<layers::Image> testImage = nullptr;

  VideoSegment segment;
  segment.AppendFrame(testImage.forget(), mozilla::StreamTime(90000),
                      mozilla::gfx::IntSize(640, 480), PRINCIPAL_HANDLE_NONE);

  VideoSegment::ChunkIterator iter(segment);
  while (!iter.IsEnded()) {
    VideoChunk chunk = *iter;
    EXPECT_FALSE(chunk.mFrame.GetForceBlack());
    iter.Next();
  }
}
nsresult
MediaEngineDefaultVideoSource::Start(SourceMediaStream* aStream, TrackID aID)
{
  if (mState != kAllocated) {
    return NS_ERROR_FAILURE;
  }

  mTimer = do_CreateInstance(NS_TIMER_CONTRACTID);
  if (!mTimer) {
    return NS_ERROR_FAILURE;
  }

  mSource = aStream;

  // Allocate a single blank Image
  ImageFormat format = PLANAR_YCBCR;
  mImageContainer = layers::LayerManager::CreateImageContainer();

  nsRefPtr<layers::Image> image = mImageContainer->CreateImage(&format, 1);
  mImage = static_cast<layers::PlanarYCbCrImage*>(image.get());

  layers::PlanarYCbCrImage::Data data;
  // Allocate a single blank Image
  mCb = 16;
  mCr = 16;
  AllocateSolidColorFrame(data, mOpts.mWidth, mOpts.mHeight, 0x80, mCb, mCr);
  // SetData copies data, so we can free the frame
  mImage->SetData(data);
  ReleaseFrame(data);

  // AddTrack takes ownership of segment
  VideoSegment *segment = new VideoSegment();
  segment->AppendFrame(image.forget(), USECS_PER_S / mOpts.mFPS,
                       gfxIntSize(mOpts.mWidth, mOpts.mHeight));
  mSource->AddTrack(aID, VIDEO_RATE, 0, segment);

  // We aren't going to add any more tracks
  mSource->AdvanceKnownTracksTime(STREAM_TIME_MAX);

  // Remember TrackID so we can end it later
  mTrackID = aID;

  // Start timer for subsequent frames
  mTimer->InitWithCallback(this, 1000 / mOpts.mFPS, nsITimer::TYPE_REPEATING_SLACK);
  mState = kStarted;

  return NS_OK;
}
NS_IMETHODIMP
MediaEngineDefaultVideoSource::Notify(nsITimer* aTimer)
{
  // Update the target color
  if (mCr <= 16) {
    if (mCb < 240) {
      mCb++;
    } else {
      mCr++;
    }
  } else if (mCb >= 240) {
    if (mCr < 240) {
      mCr++;
    } else {
      mCb--;
    }
  } else if (mCr >= 240) {
    if (mCb > 16) {
      mCb--;
    } else {
      mCr--;
    }
  } else {
    mCr--;
  }

  // Allocate a single solid color image
  ImageFormat format = PLANAR_YCBCR;
  nsRefPtr<layers::Image> image = mImageContainer->CreateImage(&format, 1);
  nsRefPtr<layers::PlanarYCbCrImage> ycbcr_image =
      static_cast<layers::PlanarYCbCrImage*>(image.get());
  layers::PlanarYCbCrImage::Data data;
  AllocateSolidColorFrame(data, mOpts.mWidth, mOpts.mHeight, 0x80, mCb, mCr);
  ycbcr_image->SetData(data);
  // SetData copies data, so we can free the frame
  ReleaseFrame(data);

  // AddTrack takes ownership of segment
  VideoSegment segment;
  segment.AppendFrame(ycbcr_image.forget(), USECS_PER_S / mOpts.mFPS,
                      gfxIntSize(mOpts.mWidth, mOpts.mHeight));
  mSource->AppendToTrack(mTrackID, &segment);

  return NS_OK;
}
// guts for appending data to the MSG track
bool MediaEngineCameraVideoSource::AppendToTrack(SourceMediaStream* aSource,
                                                 layers::Image* aImage,
                                                 TrackID aID,
                                                 StreamTime delta)
{
  MOZ_ASSERT(aSource);

  VideoSegment segment;
  RefPtr<layers::Image> image = aImage;
  IntSize size(image ? mWidth : 0, image ? mHeight : 0);
  segment.AppendFrame(image.forget(), delta, size);

  // This is safe from any thread, and is safe if the track is Finished
  // or Destroyed.
  // This can fail if either a) we haven't added the track yet, or b)
  // we've removed or finished the track.
  return aSource->AppendToTrack(aID, &(segment));
}
Exemple #16
0
void
MediaEncoder::NotifyQueuedTrackChanges(MediaStreamGraph* aGraph,
                                       TrackID aID,
                                       StreamTime aTrackOffset,
                                       TrackEventCommand aTrackEvents,
                                       const MediaSegment& aQueuedMedia,
                                       MediaStream* aInputStream,
                                       TrackID aInputTrackID)
{
  if (!mDirectConnected) {
    NotifyRealtimeData(aGraph, aID, aTrackOffset, aTrackEvents, aQueuedMedia);
  } else {
    if (aTrackEvents != TrackEventCommand::TRACK_EVENT_NONE) {
      // forward events (TRACK_EVENT_ENDED) but not the media
      if (aQueuedMedia.GetType() == MediaSegment::VIDEO) {
        VideoSegment segment;
        NotifyRealtimeData(aGraph, aID, aTrackOffset, aTrackEvents, segment);
      } else {
        AudioSegment segment;
        NotifyRealtimeData(aGraph, aID, aTrackOffset, aTrackEvents, segment);
      }
    }
    if (mSuspended == RECORD_RESUMED) {
      if (mVideoEncoder) {
        if (aQueuedMedia.GetType() == MediaSegment::VIDEO) {
          // insert a null frame of duration equal to the first segment passed
          // after Resume(), so it'll get added to one of the DirectListener frames
          VideoSegment segment;
          gfx::IntSize size(0,0);
          segment.AppendFrame(nullptr, aQueuedMedia.GetDuration(), size,
                              PRINCIPAL_HANDLE_NONE);
          mVideoEncoder->NotifyQueuedTrackChanges(aGraph, aID,
                                                  aTrackOffset, aTrackEvents,
                                                  segment);
          mSuspended = RECORD_NOT_SUSPENDED;
        }
      } else {
        mSuspended = RECORD_NOT_SUSPENDED; // no video
      }
    }
  }
}
Exemple #17
0
// Test encoding a track that has to skip frames.
TEST(VP8VideoTrackEncoder, SkippedFrames)
{
  // Initiate VP8 encoder
  TestVP8TrackEncoder encoder;
  InitParam param = {true, 640, 480};
  encoder.TestInit(param);
  YUVBufferGenerator generator;
  generator.Init(mozilla::gfx::IntSize(640, 480));
  TimeStamp now = TimeStamp::Now();
  VideoSegment segment;

  // Pass 100 frames of the shortest possible duration where we don't get
  // rounding errors between input/output rate.
  for (uint32_t i = 0; i < 100; ++i) {
    segment.AppendFrame(generator.GenerateI420Image(),
                        mozilla::StreamTime(90), // 1ms
                        generator.GetSize(),
                        PRINCIPAL_HANDLE_NONE,
                        false,
                        now + TimeDuration::FromMilliseconds(i));
  }

  encoder.SetCurrentFrames(segment);

  // End the track.
  segment.Clear();
  encoder.NotifyQueuedTrackChanges(nullptr, 0, 0, TrackEventCommand::TRACK_EVENT_ENDED, segment);

  EncodedFrameContainer container;
  ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));

  EXPECT_TRUE(encoder.IsEncodingComplete());

  // Verify total duration being 100 * 1ms = 100ms.
  uint64_t totalDuration = 0;
  for (auto& frame : container.GetEncodedFrames()) {
    totalDuration += frame->GetDuration();
  }
  const uint64_t hundredMillis = PR_USEC_PER_SEC / 10;
  EXPECT_EQ(hundredMillis, totalDuration);
}
Exemple #18
0
// Test that encoding a single frame gives useful output.
TEST(VP8VideoTrackEncoder, SingleFrameEncode)
{
  // Initiate VP8 encoder
  TestVP8TrackEncoder encoder;
  InitParam param = {true, 640, 480};
  encoder.TestInit(param);

  // Pass a half-second frame to the encoder.
  YUVBufferGenerator generator;
  generator.Init(mozilla::gfx::IntSize(640, 480));
  VideoSegment segment;
  segment.AppendFrame(generator.GenerateI420Image(),
                      mozilla::StreamTime(45000), // 1/2 second
                      generator.GetSize(),
                      PRINCIPAL_HANDLE_NONE);

  encoder.SetCurrentFrames(segment);

  // End the track.
  segment.Clear();
  encoder.NotifyQueuedTrackChanges(nullptr, 0, 0, TrackEventCommand::TRACK_EVENT_ENDED, segment);

  EncodedFrameContainer container;
  ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));

  EXPECT_TRUE(encoder.IsEncodingComplete());

  // Read out encoded data, and verify.
  const nsTArray<RefPtr<EncodedFrame>>& frames = container.GetEncodedFrames();
  const size_t oneElement = 1;
  ASSERT_EQ(oneElement, frames.Length());

  EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[0]->GetFrameType()) <<
    "We only have one frame, so it should be a keyframe";

  const uint64_t halfSecond = PR_USEC_PER_SEC / 2;
  EXPECT_EQ(halfSecond, frames[0]->GetDuration());
}
Exemple #19
0
// Encode test
TEST(VP8VideoTrackEncoder, FrameEncode)
{
  // Initiate VP8 encoder
  TestVP8TrackEncoder encoder;
  InitParam param = {true, 640, 480};
  encoder.TestInit(param);

  // Create YUV images as source.
  nsTArray<RefPtr<Image>> images;
  YUVBufferGenerator generator;
  generator.Init(mozilla::gfx::IntSize(640, 480));
  images.AppendElement(generator.GenerateI420Image());
  images.AppendElement(generator.GenerateNV12Image());
  images.AppendElement(generator.GenerateNV21Image());

  // Put generated YUV frame into video segment.
  // Duration of each frame is 1 second.
  VideoSegment segment;
  TimeStamp now = TimeStamp::Now();
  for (nsTArray<RefPtr<Image>>::size_type i = 0; i < images.Length(); i++)
  {
    RefPtr<Image> image = images[i];
    segment.AppendFrame(image.forget(),
                        mozilla::StreamTime(90000),
                        generator.GetSize(),
                        PRINCIPAL_HANDLE_NONE,
                        false,
                        now + TimeDuration::FromSeconds(i));
  }

  // track change notification.
  encoder.SetCurrentFrames(segment);

  // Pull Encoded Data back from encoder.
  EncodedFrameContainer container;
  EXPECT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
}
void
DOMHwMediaStream::SetImageSize(uint32_t width, uint32_t height)
{
#ifdef MOZ_WIDGET_GONK
  OverlayImage::Data imgData;

  imgData.mOverlayId = mOverlayImage->GetOverlayId();
  imgData.mSize = IntSize(width, height);
  mOverlayImage->SetData(imgData);
#endif

  SourceMediaStream* srcStream = GetInputStream()->AsSourceStream();
  StreamBuffer::Track* track = srcStream->FindTrack(TRACK_VIDEO_PRIMARY);

  if (!track || !track->GetSegment()) {
    return;
  }

#ifdef MOZ_WIDGET_GONK
  // Clear the old segment.
  // Changing the existing content of segment is a Very BAD thing, and this way will
  // confuse consumers of MediaStreams.
  // It is only acceptable for DOMHwMediaStream
  // because DOMHwMediaStream doesn't have consumers of TV streams currently.
  track->GetSegment()->Clear();

  // Change the image size.
  const StreamTime delta = STREAM_TIME_MAX;
  RefPtr<Image> image = static_cast<Image*>(mOverlayImage.get());
  mozilla::gfx::IntSize size = image->GetSize();
  VideoSegment segment;

  segment.AppendFrame(image.forget(), delta, size);
  srcStream->AppendToTrack(TRACK_VIDEO_PRIMARY, &segment);
#endif
}
nsresult
MediaEngineDefaultVideoSource::Start(SourceMediaStream* aStream, TrackID aID)
{
    if (mState != kAllocated) {
        return NS_ERROR_FAILURE;
    }

    mTimer = do_CreateInstance(NS_TIMER_CONTRACTID);
    if (!mTimer) {
        return NS_ERROR_FAILURE;
    }

    mSource = aStream;

    // Allocate a single blank Image
    layers::Image::Format format = layers::Image::PLANAR_YCBCR;
    mImageContainer = layers::LayerManager::CreateImageContainer();

    nsRefPtr<layers::Image> image = mImageContainer->CreateImage(&format, 1);

    int len = ((WIDTH * HEIGHT) * 3 / 2);
    mImage = static_cast<layers::PlanarYCbCrImage*>(image.get());
    PRUint8* frame = (PRUint8*) PR_Malloc(len);
    memset(frame, 0x80, len); // Gray

    const PRUint8 lumaBpp = 8;
    const PRUint8 chromaBpp = 4;

    layers::PlanarYCbCrImage::Data data;
    data.mYChannel = frame;
    data.mYSize = gfxIntSize(WIDTH, HEIGHT);
    data.mYStride = WIDTH * lumaBpp / 8.0;
    data.mCbCrStride = WIDTH * chromaBpp / 8.0;
    data.mCbChannel = frame + HEIGHT * data.mYStride;
    data.mCrChannel = data.mCbChannel + HEIGHT * data.mCbCrStride / 2;
    data.mCbCrSize = gfxIntSize(WIDTH / 2, HEIGHT / 2);
    data.mPicX = 0;
    data.mPicY = 0;
    data.mPicSize = gfxIntSize(WIDTH, HEIGHT);
    data.mStereoMode = layers::STEREO_MODE_MONO;

    // SetData copies data, so we can free the frame
    mImage->SetData(data);
    PR_Free(frame);


    // AddTrack takes ownership of segment
    VideoSegment *segment = new VideoSegment();
    segment->AppendFrame(image.forget(), USECS_PER_S / FPS, gfxIntSize(WIDTH, HEIGHT));
    mSource->AddTrack(aID, RATE, 0, segment);

    // We aren't going to add any more tracks
    mSource->AdvanceKnownTracksTime(STREAM_TIME_MAX);

    // Remember TrackID so we can end it later
    mTrackID = aID;

    // Start timer for subsequent frames
    mTimer->InitWithCallback(this, 1000 / FPS, nsITimer::TYPE_REPEATING_SLACK);
    mState = kStarted;

    return NS_OK;
}