Ejemplo n.º 1
0
HRESULT
WMFVideoMFTManager::CreateD3DVideoFrame(IMFSample* aSample,
                                        int64_t aStreamOffset,
                                        VideoData** aOutVideoData)
{
  NS_ENSURE_TRUE(aSample, E_POINTER);
  NS_ENSURE_TRUE(aOutVideoData, E_POINTER);
  NS_ENSURE_TRUE(mDXVA2Manager, E_ABORT);
  NS_ENSURE_TRUE(mUseHwAccel, E_ABORT);

  *aOutVideoData = nullptr;
  HRESULT hr;

  nsRefPtr<Image> image;
  hr = mDXVA2Manager->CopyToImage(aSample,
                                  mPictureRegion,
                                  mImageContainer,
                                  getter_AddRefs(image));
  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
  NS_ENSURE_TRUE(image, E_FAIL);

  Microseconds pts = GetSampleTime(aSample);
  Microseconds duration = GetSampleDuration(aSample);
  nsRefPtr<VideoData> v = VideoData::CreateFromImage(mVideoInfo,
                                                     mImageContainer,
                                                     aStreamOffset,
                                                     pts,
                                                     duration,
                                                     image.forget(),
                                                     false,
                                                     -1,
                                                     ToIntRect(mPictureRegion));

  NS_ENSURE_TRUE(v, E_FAIL);
  v.forget(aOutVideoData);

  return S_OK;
}
Ejemplo n.º 2
0
HRESULT
WMFReader::CreateD3DVideoFrame(IMFSample* aSample,
                               int64_t aTimestampUsecs,
                               int64_t aDurationUsecs,
                               int64_t aOffsetBytes,
                               VideoData** aOutVideoData)
{
  NS_ENSURE_TRUE(aSample, E_POINTER);
  NS_ENSURE_TRUE(aOutVideoData, E_POINTER);
  NS_ENSURE_TRUE(mDXVA2Manager, E_ABORT);
  NS_ENSURE_TRUE(mUseHwAccel, E_ABORT);

  *aOutVideoData = nullptr;
  HRESULT hr;

  nsRefPtr<Image> image;
  hr = mDXVA2Manager->CopyToImage(aSample,
                                  mPictureRegion,
                                  mDecoder->GetImageContainer(),
                                  getter_AddRefs(image));
  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
  NS_ENSURE_TRUE(image, E_FAIL);

  nsRefPtr<VideoData> v = VideoData::CreateFromImage(mInfo.mVideo,
                                                     mDecoder->GetImageContainer(),
                                                     aOffsetBytes,
                                                     aTimestampUsecs,
                                                     aDurationUsecs,
                                                     image.forget(),
                                                     false,
                                                     -1,
                                                     ToIntRect(mPictureRegion));

  NS_ENSURE_TRUE(v, E_FAIL);
  v.forget(aOutVideoData);

  return S_OK;
}
Ejemplo n.º 3
0
IntRect XMLElement::GetIntRect(const ea::string& name) const
{
    return ToIntRect(GetAttribute(name));
}
nsresult
GonkVideoDecoderManager::CreateVideoData(int64_t aStreamOffset, VideoData **v)
{
  *v = nullptr;
  nsRefPtr<VideoData> data;
  int64_t timeUs;
  int32_t keyFrame;

  if (mVideoBuffer == nullptr) {
    GVDM_LOG("Video Buffer is not valid!");
    return NS_ERROR_UNEXPECTED;
  }

  if (!mVideoBuffer->meta_data()->findInt64(kKeyTime, &timeUs)) {
    GVDM_LOG("Decoder did not return frame time");
    return NS_ERROR_UNEXPECTED;
  }

  int64_t duration;
  nsresult rv = QueueFrameTimeOut(timeUs, duration);
  NS_ENSURE_SUCCESS(rv, rv);

  if (mVideoBuffer->range_length() == 0) {
    // Some decoders may return spurious empty buffers that we just want to ignore
    // quoted from Android's AwesomePlayer.cpp
    ReleaseVideoBuffer();
    return NS_ERROR_NOT_AVAILABLE;
  }

  if (!mVideoBuffer->meta_data()->findInt32(kKeyIsSyncFrame, &keyFrame)) {
    keyFrame = 0;
  }

  gfx::IntRect picture = ToIntRect(mPicture);
  if (mFrameInfo.mWidth != mInitialFrame.width ||
      mFrameInfo.mHeight != mInitialFrame.height) {

    // Frame size is different from what the container reports. This is legal,
    // and we will preserve the ratio of the crop rectangle as it
    // was reported relative to the picture size reported by the container.
    picture.x = (mPicture.x * mFrameInfo.mWidth) / mInitialFrame.width;
    picture.y = (mPicture.y * mFrameInfo.mHeight) / mInitialFrame.height;
    picture.width = (mFrameInfo.mWidth * mPicture.width) / mInitialFrame.width;
    picture.height = (mFrameInfo.mHeight * mPicture.height) / mInitialFrame.height;
  }

  RefPtr<mozilla::layers::TextureClient> textureClient;

  if ((mVideoBuffer->graphicBuffer().get())) {
    textureClient = mNativeWindow->getTextureClientFromBuffer(mVideoBuffer->graphicBuffer().get());
  }

  if (textureClient) {
    GrallocTextureClientOGL* grallocClient = static_cast<GrallocTextureClientOGL*>(textureClient.get());
    grallocClient->SetMediaBuffer(mVideoBuffer);
    textureClient->SetRecycleCallback(GonkVideoDecoderManager::RecycleCallback, this);

    data = VideoData::Create(mInfo.mVideo,
                             mImageContainer,
                             aStreamOffset,
                             timeUs,
                             duration,
                             textureClient,
                             keyFrame,
                             -1,
                             picture);
  } else {
    if (!mVideoBuffer->data()) {
      GVDM_LOG("No data in Video Buffer!");
      return NS_ERROR_UNEXPECTED;
    }
    uint8_t *yuv420p_buffer = (uint8_t *)mVideoBuffer->data();
    int32_t stride = mFrameInfo.mStride;
    int32_t slice_height = mFrameInfo.mSliceHeight;

    // Converts to OMX_COLOR_FormatYUV420Planar
    if (mFrameInfo.mColorFormat != OMX_COLOR_FormatYUV420Planar) {
      ARect crop;
      crop.top = 0;
      crop.bottom = mFrameInfo.mHeight;
      crop.left = 0;
      crop.right = mFrameInfo.mWidth;
      yuv420p_buffer = GetColorConverterBuffer(mFrameInfo.mWidth, mFrameInfo.mHeight);
      if (mColorConverter.convertDecoderOutputToI420(mVideoBuffer->data(),
          mFrameInfo.mWidth, mFrameInfo.mHeight, crop, yuv420p_buffer) != OK) {
          ReleaseVideoBuffer();
          GVDM_LOG("Color conversion failed!");
          return NS_ERROR_UNEXPECTED;
      }
        stride = mFrameInfo.mWidth;
        slice_height = mFrameInfo.mHeight;
    }

    size_t yuv420p_y_size = stride * slice_height;
    size_t yuv420p_u_size = ((stride + 1) / 2) * ((slice_height + 1) / 2);
    uint8_t *yuv420p_y = yuv420p_buffer;
    uint8_t *yuv420p_u = yuv420p_y + yuv420p_y_size;
    uint8_t *yuv420p_v = yuv420p_u + yuv420p_u_size;

    // This is the approximate byte position in the stream.
    int64_t pos = aStreamOffset;

    VideoData::YCbCrBuffer b;
    b.mPlanes[0].mData = yuv420p_y;
    b.mPlanes[0].mWidth = mFrameInfo.mWidth;
    b.mPlanes[0].mHeight = mFrameInfo.mHeight;
    b.mPlanes[0].mStride = stride;
    b.mPlanes[0].mOffset = 0;
    b.mPlanes[0].mSkip = 0;

    b.mPlanes[1].mData = yuv420p_u;
    b.mPlanes[1].mWidth = (mFrameInfo.mWidth + 1) / 2;
    b.mPlanes[1].mHeight = (mFrameInfo.mHeight + 1) / 2;
    b.mPlanes[1].mStride = (stride + 1) / 2;
    b.mPlanes[1].mOffset = 0;
    b.mPlanes[1].mSkip = 0;

    b.mPlanes[2].mData = yuv420p_v;
    b.mPlanes[2].mWidth =(mFrameInfo.mWidth + 1) / 2;
    b.mPlanes[2].mHeight = (mFrameInfo.mHeight + 1) / 2;
    b.mPlanes[2].mStride = (stride + 1) / 2;
    b.mPlanes[2].mOffset = 0;
    b.mPlanes[2].mSkip = 0;

    data = VideoData::Create(
        mInfo.mVideo,
        mImageContainer,
        pos,
        timeUs,
        1, // We don't know the duration.
        b,
        keyFrame,
        -1,
        picture);
    ReleaseVideoBuffer();
  }

  data.forget(v);
  return NS_OK;
}
Ejemplo n.º 5
0
HRESULT
WMFVideoMFTManager::CreateBasicVideoFrame(IMFSample* aSample,
                                          int64_t aStreamOffset,
                                          VideoData** aOutVideoData)
{
  NS_ENSURE_TRUE(aSample, E_POINTER);
  NS_ENSURE_TRUE(aOutVideoData, E_POINTER);

  *aOutVideoData = nullptr;

  HRESULT hr;
  RefPtr<IMFMediaBuffer> buffer;

  // Must convert to contiguous buffer to use IMD2DBuffer interface.
  hr = aSample->ConvertToContiguousBuffer(byRef(buffer));
  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);

  // Try and use the IMF2DBuffer interface if available, otherwise fallback
  // to the IMFMediaBuffer interface. Apparently IMF2DBuffer is more efficient,
  // but only some systems (Windows 8?) support it.
  BYTE* data = nullptr;
  LONG stride = 0;
  RefPtr<IMF2DBuffer> twoDBuffer;
  hr = buffer->QueryInterface(static_cast<IMF2DBuffer**>(byRef(twoDBuffer)));
  if (SUCCEEDED(hr)) {
    hr = twoDBuffer->Lock2D(&data, &stride);
    NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
  } else {
    hr = buffer->Lock(&data, nullptr, nullptr);
    NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
    stride = mVideoStride;
  }

  // YV12, planar format: [YYYY....][VVVV....][UUUU....]
  // i.e., Y, then V, then U.
  VideoData::YCbCrBuffer b;

  // Y (Y') plane
  b.mPlanes[0].mData = data;
  b.mPlanes[0].mStride = stride;
  b.mPlanes[0].mHeight = mVideoHeight;
  b.mPlanes[0].mWidth = mVideoWidth;
  b.mPlanes[0].mOffset = 0;
  b.mPlanes[0].mSkip = 0;

  // The V and U planes are stored 16-row-aligned, so we need to add padding
  // to the row heights to ensure the Y'CbCr planes are referenced properly.
  uint32_t padding = 0;
  if (mVideoHeight % 16 != 0) {
    padding = 16 - (mVideoHeight % 16);
  }
  uint32_t y_size = stride * (mVideoHeight + padding);
  uint32_t v_size = stride * (mVideoHeight + padding) / 4;
  uint32_t halfStride = (stride + 1) / 2;
  uint32_t halfHeight = (mVideoHeight + 1) / 2;
  uint32_t halfWidth = (mVideoWidth + 1) / 2;

  // U plane (Cb)
  b.mPlanes[1].mData = data + y_size + v_size;
  b.mPlanes[1].mStride = halfStride;
  b.mPlanes[1].mHeight = halfHeight;
  b.mPlanes[1].mWidth = halfWidth;
  b.mPlanes[1].mOffset = 0;
  b.mPlanes[1].mSkip = 0;

  // V plane (Cr)
  b.mPlanes[2].mData = data + y_size;
  b.mPlanes[2].mStride = halfStride;
  b.mPlanes[2].mHeight = halfHeight;
  b.mPlanes[2].mWidth = halfWidth;
  b.mPlanes[2].mOffset = 0;
  b.mPlanes[2].mSkip = 0;

  Microseconds pts = GetSampleTime(aSample);
  Microseconds duration = GetSampleDuration(aSample);
  nsRefPtr<VideoData> v = VideoData::Create(mVideoInfo,
                                            mImageContainer,
                                            aStreamOffset,
                                            std::max(0LL, pts),
                                            duration,
                                            b,
                                            false,
                                            -1,
                                            ToIntRect(mPictureRegion));
  if (twoDBuffer) {
    twoDBuffer->Unlock2D();
  } else {
    buffer->Unlock();
  }

  v.forget(aOutVideoData);
  return S_OK;
}
Ejemplo n.º 6
0
IntRect ToIntRect(const String& source)
{
    return ToIntRect(source.CString());
}
Ejemplo n.º 7
0
void Variant::FromString(VariantType type, const char* value)
{
    switch (type)
    {
    case VAR_INT:
        *this = ToInt(value);
        break;

    case VAR_INT64:
        *this = ToInt64(value);
        break;

    case VAR_BOOL:
        *this = ToBool(value);
        break;

    case VAR_FLOAT:
        *this = ToFloat(value);
        break;

    case VAR_VECTOR2:
        *this = ToVector2(value);
        break;

    case VAR_VECTOR3:
        *this = ToVector3(value);
        break;

    case VAR_VECTOR4:
        *this = ToVector4(value);
        break;

    case VAR_QUATERNION:
        *this = ToQuaternion(value);
        break;

    case VAR_COLOR:
        *this = ToColor(value);
        break;

    case VAR_STRING:
        *this = value;
        break;

    case VAR_BUFFER:
    {
        SetType(VAR_BUFFER);
        PODVector<unsigned char>& buffer = *(reinterpret_cast<PODVector<unsigned char>*>(&value_));
        StringToBuffer(buffer, value);
    }
        break;

    case VAR_VOIDPTR:
        // From string to void pointer not supported, set to null
        *this = (void*)0;
        break;

    case VAR_RESOURCEREF:
    {
        StringVector values = String::Split(value, ';');
        if (values.Size() == 2)
        {
            SetType(VAR_RESOURCEREF);
            ResourceRef& ref = *(reinterpret_cast<ResourceRef*>(&value_));
            ref.type_ = values[0];
            ref.name_ = values[1];
        }
    }
        break;

    case VAR_RESOURCEREFLIST:
    {
        StringVector values = String::Split(value, ';', true);
        if (values.Size() >= 1)
        {
            SetType(VAR_RESOURCEREFLIST);
            ResourceRefList& refList = *(reinterpret_cast<ResourceRefList*>(&value_));
            refList.type_ = values[0];
            refList.names_.Resize(values.Size() - 1);
            for (unsigned i = 1; i < values.Size(); ++i)
                refList.names_[i - 1] = values[i];
        }
    }
        break;

    case VAR_INTRECT:
        *this = ToIntRect(value);
        break;

    case VAR_INTVECTOR2:
        *this = ToIntVector2(value);
        break;

    case VAR_INTVECTOR3:
        *this = ToIntVector3(value);
        break;

    case VAR_PTR:
        // From string to RefCounted pointer not supported, set to null
        *this = (RefCounted*)0;
        break;

    case VAR_MATRIX3:
        *this = ToMatrix3(value);
        break;

    case VAR_MATRIX3X4:
        *this = ToMatrix3x4(value);
        break;

    case VAR_MATRIX4:
        *this = ToMatrix4(value);
        break;

    case VAR_DOUBLE:
        *this = ToDouble(value);
        break;

    case VAR_RECT:
        *this = ToRect(value);
        break;

    default:
        SetType(VAR_NONE);
    }
}
Ejemplo n.º 8
0
bool MediaOmxReader::DecodeVideoFrame(bool &aKeyframeSkip,
                                      int64_t aTimeThreshold)
{
  NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
  EnsureActive();

  // Record number of frames decoded and parsed. Automatically update the
  // stats counters using the AutoNotifyDecoded stack-based class.
  uint32_t parsed = 0, decoded = 0;
  AbstractMediaDecoder::AutoNotifyDecoded autoNotify(mDecoder, parsed, decoded);

  bool doSeek = mVideoSeekTimeUs != -1;
  if (doSeek) {
    aTimeThreshold = mVideoSeekTimeUs;
  }

  TimeStamp start = TimeStamp::Now();

  // Read next frame. Don't let this loop run for too long.
  while ((TimeStamp::Now() - start) < TimeDuration::FromSeconds(MAX_VIDEO_DECODE_SECONDS)) {
    MPAPI::VideoFrame frame;
    frame.mGraphicBuffer = nullptr;
    frame.mShouldSkip = false;
    if (!mOmxDecoder->ReadVideo(&frame, aTimeThreshold, aKeyframeSkip, doSeek)) {
      return false;
    }
    doSeek = false;
    mVideoSeekTimeUs = -1;

    // Ignore empty buffer which stagefright media read will sporadically return
    if (frame.mSize == 0 && !frame.mGraphicBuffer) {
      continue;
    }

    parsed++;
    if (frame.mShouldSkip && mSkipCount < MAX_DROPPED_FRAMES) {
      mSkipCount++;
      continue;
    }

    mSkipCount = 0;

    aKeyframeSkip = false;

    IntRect picture = ToIntRect(mPicture);
    if (frame.Y.mWidth != mInitialFrame.width ||
        frame.Y.mHeight != mInitialFrame.height) {

      // Frame size is different from what the container reports. This is legal,
      // and we will preserve the ratio of the crop rectangle as it
      // was reported relative to the picture size reported by the container.
      picture.x = (mPicture.x * frame.Y.mWidth) / mInitialFrame.width;
      picture.y = (mPicture.y * frame.Y.mHeight) / mInitialFrame.height;
      picture.width = (frame.Y.mWidth * mPicture.width) / mInitialFrame.width;
      picture.height = (frame.Y.mHeight * mPicture.height) / mInitialFrame.height;
    }

    // This is the approximate byte position in the stream.
    int64_t pos = mDecoder->GetResource()->Tell();

    nsRefPtr<VideoData> v;
    if (!frame.mGraphicBuffer) {

      VideoData::YCbCrBuffer b;
      b.mPlanes[0].mData = static_cast<uint8_t *>(frame.Y.mData);
      b.mPlanes[0].mStride = frame.Y.mStride;
      b.mPlanes[0].mHeight = frame.Y.mHeight;
      b.mPlanes[0].mWidth = frame.Y.mWidth;
      b.mPlanes[0].mOffset = frame.Y.mOffset;
      b.mPlanes[0].mSkip = frame.Y.mSkip;

      b.mPlanes[1].mData = static_cast<uint8_t *>(frame.Cb.mData);
      b.mPlanes[1].mStride = frame.Cb.mStride;
      b.mPlanes[1].mHeight = frame.Cb.mHeight;
      b.mPlanes[1].mWidth = frame.Cb.mWidth;
      b.mPlanes[1].mOffset = frame.Cb.mOffset;
      b.mPlanes[1].mSkip = frame.Cb.mSkip;

      b.mPlanes[2].mData = static_cast<uint8_t *>(frame.Cr.mData);
      b.mPlanes[2].mStride = frame.Cr.mStride;
      b.mPlanes[2].mHeight = frame.Cr.mHeight;
      b.mPlanes[2].mWidth = frame.Cr.mWidth;
      b.mPlanes[2].mOffset = frame.Cr.mOffset;
      b.mPlanes[2].mSkip = frame.Cr.mSkip;

      v = VideoData::Create(mInfo.mVideo,
                            mDecoder->GetImageContainer(),
                            pos,
                            frame.mTimeUs,
                            1, // We don't know the duration.
                            b,
                            frame.mKeyFrame,
                            -1,
                            picture);
    } else {
      v = VideoData::Create(mInfo.mVideo,
                            mDecoder->GetImageContainer(),
                            pos,
                            frame.mTimeUs,
                            1, // We don't know the duration.
                            frame.mGraphicBuffer,
                            frame.mKeyFrame,
                            -1,
                            picture);
    }

    if (!v) {
      NS_WARNING("Unable to create VideoData");
      return false;
    }

    decoded++;
    NS_ASSERTION(decoded <= parsed, "Expect to decode fewer frames than parsed in OMX decoder...");

    mVideoQueue.Push(v);

    break;
  }

  return true;
}
bool
SoftwareWebMVideoDecoder::DecodeVideoFrame(bool &aKeyframeSkip,
                                           int64_t aTimeThreshold)
{
  MOZ_ASSERT(mReader->OnTaskQueue());

  // Record number of frames decoded and parsed. Automatically update the
  // stats counters using the AutoNotifyDecoded stack-based class.
  AbstractMediaDecoder::AutoNotifyDecoded a(mReader->GetDecoder());

  nsRefPtr<NesteggPacketHolder> holder(mReader->NextPacket(WebMReader::VIDEO));
  if (!holder) {
    return false;
  }

  nestegg_packet* packet = holder->Packet();
  unsigned int track = 0;
  int r = nestegg_packet_track(packet, &track);
  if (r == -1) {
    return false;
  }

  unsigned int count = 0;
  r = nestegg_packet_count(packet, &count);
  if (r == -1) {
    return false;
  }

  if (count > 1) {
    NS_WARNING("Packet contains more than one video frame");
    return false;
  }

  int64_t tstamp = holder->Timestamp();

  // The end time of this frame is the start time of the next frame.  Fetch
  // the timestamp of the next packet for this track.  If we've reached the
  // end of the resource, use the file's duration as the end time of this
  // video frame.
  int64_t next_tstamp = 0;
  nsRefPtr<NesteggPacketHolder> next_holder(mReader->NextPacket(WebMReader::VIDEO));
  if (next_holder) {
    next_tstamp = next_holder->Timestamp();
    mReader->PushVideoPacket(next_holder);
  } else {
    next_tstamp = tstamp;
    next_tstamp += tstamp - mReader->GetLastVideoFrameTime();
  }
  mReader->SetLastVideoFrameTime(tstamp);

  unsigned char* data;
  size_t length;
  r = nestegg_packet_data(packet, 0, &data, &length);
  if (r == -1) {
    return false;
  }

  vpx_codec_stream_info_t si;
  memset(&si, 0, sizeof(si));
  si.sz = sizeof(si);
  if (mReader->GetVideoCodec() == NESTEGG_CODEC_VP8) {
    vpx_codec_peek_stream_info(vpx_codec_vp8_dx(), data, length, &si);
  } else if (mReader->GetVideoCodec() == NESTEGG_CODEC_VP9) {
    vpx_codec_peek_stream_info(vpx_codec_vp9_dx(), data, length, &si);
  }
  if (aKeyframeSkip && (!si.is_kf || tstamp < aTimeThreshold)) {
    // Skipping to next keyframe...
    a.mParsed++;
    a.mDropped++;
    return true;
  }

  if (aKeyframeSkip && si.is_kf) {
    aKeyframeSkip = false;
  }

  if (vpx_codec_decode(&mVPX, data, length, nullptr, 0)) {
    return false;
  }

  // If the timestamp of the video frame is less than
  // the time threshold required then it is not added
  // to the video queue and won't be displayed.
  if (tstamp < aTimeThreshold) {
    a.mParsed++;
    a.mDropped++;
    return true;
  }

   vpx_codec_iter_t  iter = nullptr;
  vpx_image_t      *img;

  while ((img = vpx_codec_get_frame(&mVPX, &iter))) {
    NS_ASSERTION(img->fmt == VPX_IMG_FMT_I420, "WebM image format not I420");

    // Chroma shifts are rounded down as per the decoding examples in the SDK
    VideoData::YCbCrBuffer b;
    b.mPlanes[0].mData = img->planes[0];
    b.mPlanes[0].mStride = img->stride[0];
    b.mPlanes[0].mHeight = img->d_h;
    b.mPlanes[0].mWidth = img->d_w;
    b.mPlanes[0].mOffset = b.mPlanes[0].mSkip = 0;

    b.mPlanes[1].mData = img->planes[1];
    b.mPlanes[1].mStride = img->stride[1];
    b.mPlanes[1].mHeight = (img->d_h + 1) >> img->y_chroma_shift;
    b.mPlanes[1].mWidth = (img->d_w + 1) >> img->x_chroma_shift;
    b.mPlanes[1].mOffset = b.mPlanes[1].mSkip = 0;

    b.mPlanes[2].mData = img->planes[2];
    b.mPlanes[2].mStride = img->stride[2];
    b.mPlanes[2].mHeight = (img->d_h + 1) >> img->y_chroma_shift;
    b.mPlanes[2].mWidth = (img->d_w + 1) >> img->x_chroma_shift;
    b.mPlanes[2].mOffset = b.mPlanes[2].mSkip = 0;

    nsIntRect pictureRect = mReader->GetPicture();
    IntRect picture = ToIntRect(pictureRect);
    nsIntSize initFrame = mReader->GetInitialFrame();
    if (img->d_w != static_cast<uint32_t>(initFrame.width) ||
        img->d_h != static_cast<uint32_t>(initFrame.height)) {
      // Frame size is different from what the container reports. This is
      // legal in WebM, and we will preserve the ratio of the crop rectangle
      // as it was reported relative to the picture size reported by the
      // container.
      picture.x = (pictureRect.x * img->d_w) / initFrame.width;
      picture.y = (pictureRect.y * img->d_h) / initFrame.height;
      picture.width = (img->d_w * pictureRect.width) / initFrame.width;
      picture.height = (img->d_h * pictureRect.height) / initFrame.height;
    }

    VideoInfo videoInfo = mReader->GetMediaInfo().mVideo;
    nsRefPtr<VideoData> v = VideoData::Create(videoInfo,
                                              mReader->GetDecoder()->GetImageContainer(),
                                              holder->Offset(),
                                              tstamp,
                                              next_tstamp - tstamp,
                                              b,
                                              si.is_kf,
                                              -1,
                                              picture);
    if (!v) {
      return false;
    }
    a.mParsed++;
    a.mDecoded++;
    NS_ASSERTION(a.mDecoded <= a.mParsed,
                 "Expect only 1 frame per chunk per packet in WebM...");
    mReader->VideoQueue().Push(v);
  }

  return true;
}
Ejemplo n.º 10
0
void Variant::FromString(VariantType type, const char* value)
{
    switch (type)
    {
    case VAR_INT:
        *this = ToInt(value);
        break;
        
    case VAR_BOOL:
        *this = ToBool(value);
        break;
        
    case VAR_FLOAT:
        *this = ToFloat(value);
        break;
        
    case VAR_VECTOR2:
        *this = ToVector2(value);
        break;
        
    case VAR_VECTOR3:
        *this = ToVector3(value);
        break;
        
    case VAR_VECTOR4:
        *this = ToVector4(value);
        break;
        
    case VAR_QUATERNION:
        *this = ToQuaternion(value);
        break;
        
    case VAR_COLOR:
        *this = ToColor(value);
        break;
        
    case VAR_STRING:
        *this = value;
        break;
        
    case VAR_BUFFER:
        {
            SetType(VAR_BUFFER);
            PODVector<unsigned char>& buffer = *(reinterpret_cast<PODVector<unsigned char>*>(&value_));
            Vector<String> values = String::Split(value, ' ');
            buffer.Resize(values.Size());
            for (unsigned i = 0; i < values.Size(); ++i)
                buffer[i] = ToInt(values[i]);
        }
        break;
        
    case VAR_PTR:
        *this = (void*)0;
        break;
        
    case VAR_RESOURCEREF:
        {
            Vector<String> values = String::Split(value, ';');
            if (values.Size() == 2)
            {
                SetType(VAR_RESOURCEREF);
                ResourceRef& ref = *(reinterpret_cast<ResourceRef*>(&value_));
                ref.type_ = ShortStringHash(values[0]);
                ref.id_ = StringHash(values[1]);
            }
        }
        break;
        
    case VAR_RESOURCEREFLIST:
        {
            Vector<String> values = String::Split(value, ';');
            if (values.Size() >= 1)
            {
                SetType(VAR_RESOURCEREFLIST);
                ResourceRefList& refList = *(reinterpret_cast<ResourceRefList*>(&value_));
                refList.type_ = ShortStringHash(values[0]);
                refList.ids_.Resize(values.Size() - 1);
                for (unsigned i = 1; i < values.Size(); ++i)
                    refList.ids_[i - 1] = StringHash(values[i]);
            }
        }
        break;
        
    case VAR_INTRECT:
        *this = ToIntRect(value);
        break;
        
    case VAR_INTVECTOR2:
        *this = ToIntVector2(value);
        break;
        
    default:
        SetType(VAR_NONE);
    }
}
Ejemplo n.º 11
0
IntRect JSONValue::GetIntRect(unsigned index) const
{
    return ToIntRect(GetCString(index));
}
Ejemplo n.º 12
0
IntRect JSONValue::GetIntRect(const String& name) const
{
    return ToIntRect(GetCString(name));
}