bool
SoftwareWebMVideoDecoder::DecodeVideoFrame(bool &aKeyframeSkip,
                                           int64_t aTimeThreshold)
{
  MOZ_ASSERT(mReader->OnTaskQueue());

  // Record number of frames decoded and parsed. Automatically update the
  // stats counters using the AutoNotifyDecoded stack-based class.
  AbstractMediaDecoder::AutoNotifyDecoded a(mReader->GetDecoder());

  nsAutoRef<NesteggPacketHolder> holder(mReader->NextPacket(WebMReader::VIDEO));
  if (!holder) {
    return false;
  }

  nestegg_packet* packet = holder->mPacket;
  unsigned int track = 0;
  int r = nestegg_packet_track(packet, &track);
  if (r == -1) {
    return false;
  }

  unsigned int count = 0;
  r = nestegg_packet_count(packet, &count);
  if (r == -1) {
    return false;
  }

  uint64_t tstamp = 0;
  r = nestegg_packet_tstamp(packet, &tstamp);
  if (r == -1) {
    return false;
  }

  // The end time of this frame is the start time of the next frame.  Fetch
  // the timestamp of the next packet for this track.  If we've reached the
  // end of the resource, use the file's duration as the end time of this
  // video frame.
  uint64_t next_tstamp = 0;
  nsAutoRef<NesteggPacketHolder> next_holder(mReader->NextPacket(WebMReader::VIDEO));
  if (next_holder) {
    r = nestegg_packet_tstamp(next_holder->mPacket, &next_tstamp);
    if (r == -1) {
      return false;
    }
    mReader->PushVideoPacket(next_holder.disown());
  } else {
    next_tstamp = tstamp;
    next_tstamp += tstamp - mReader->GetLastVideoFrameTime();
  }
  mReader->SetLastVideoFrameTime(tstamp);

  int64_t tstamp_usecs = tstamp / NS_PER_USEC;
  for (uint32_t i = 0; i < count; ++i) {
    unsigned char* data;
    size_t length;
    r = nestegg_packet_data(packet, i, &data, &length);
    if (r == -1) {
      return false;
    }

    vpx_codec_stream_info_t si;
    memset(&si, 0, sizeof(si));
    si.sz = sizeof(si);
    if (mReader->GetVideoCodec() == NESTEGG_CODEC_VP8) {
      vpx_codec_peek_stream_info(vpx_codec_vp8_dx(), data, length, &si);
    } else if (mReader->GetVideoCodec() == NESTEGG_CODEC_VP9) {
      vpx_codec_peek_stream_info(vpx_codec_vp9_dx(), data, length, &si);
    }
    if (aKeyframeSkip && (!si.is_kf || tstamp_usecs < aTimeThreshold)) {
      // Skipping to next keyframe...
      a.mParsed++; // Assume 1 frame per chunk.
      a.mDropped++;
      continue;
    }

    if (aKeyframeSkip && si.is_kf) {
      aKeyframeSkip = false;
    }

    if (vpx_codec_decode(&mVPX, data, length, nullptr, 0)) {
      return false;
    }

    // If the timestamp of the video frame is less than
    // the time threshold required then it is not added
    // to the video queue and won't be displayed.
    if (tstamp_usecs < aTimeThreshold) {
      a.mParsed++; // Assume 1 frame per chunk.
      a.mDropped++;
      continue;
    }

    vpx_codec_iter_t  iter = nullptr;
    vpx_image_t      *img;

    while ((img = vpx_codec_get_frame(&mVPX, &iter))) {
      NS_ASSERTION(img->fmt == VPX_IMG_FMT_I420, "WebM image format not I420");

      // Chroma shifts are rounded down as per the decoding examples in the SDK
      VideoData::YCbCrBuffer b;
      b.mPlanes[0].mData = img->planes[0];
      b.mPlanes[0].mStride = img->stride[0];
      b.mPlanes[0].mHeight = img->d_h;
      b.mPlanes[0].mWidth = img->d_w;
      b.mPlanes[0].mOffset = b.mPlanes[0].mSkip = 0;

      b.mPlanes[1].mData = img->planes[1];
      b.mPlanes[1].mStride = img->stride[1];
      b.mPlanes[1].mHeight = (img->d_h + 1) >> img->y_chroma_shift;
      b.mPlanes[1].mWidth = (img->d_w + 1) >> img->x_chroma_shift;
      b.mPlanes[1].mOffset = b.mPlanes[1].mSkip = 0;

      b.mPlanes[2].mData = img->planes[2];
      b.mPlanes[2].mStride = img->stride[2];
      b.mPlanes[2].mHeight = (img->d_h + 1) >> img->y_chroma_shift;
      b.mPlanes[2].mWidth = (img->d_w + 1) >> img->x_chroma_shift;
      b.mPlanes[2].mOffset = b.mPlanes[2].mSkip = 0;

      nsIntRect pictureRect = mReader->GetPicture();
      IntRect picture = pictureRect;
      nsIntSize initFrame = mReader->GetInitialFrame();
      if (img->d_w != static_cast<uint32_t>(initFrame.width) ||
          img->d_h != static_cast<uint32_t>(initFrame.height)) {
        // Frame size is different from what the container reports. This is
        // legal in WebM, and we will preserve the ratio of the crop rectangle
        // as it was reported relative to the picture size reported by the
        // container.
        picture.x = (pictureRect.x * img->d_w) / initFrame.width;
        picture.y = (pictureRect.y * img->d_h) / initFrame.height;
        picture.width = (img->d_w * pictureRect.width) / initFrame.width;
        picture.height = (img->d_h * pictureRect.height) / initFrame.height;
      }

      VideoInfo videoInfo = mReader->GetMediaInfo().mVideo;
      nsRefPtr<VideoData> v = VideoData::Create(videoInfo,
                                                mReader->GetDecoder()->GetImageContainer(),
                                                holder->mOffset,
                                                tstamp_usecs,
                                                (next_tstamp / NS_PER_USEC) - tstamp_usecs,
                                                b,
                                                si.is_kf,
                                                -1,
                                                picture);
      if (!v) {
        return false;
      }
      a.mParsed++;
      a.mDecoded++;
      NS_ASSERTION(a.mDecoded <= a.mParsed,
        "Expect only 1 frame per chunk per packet in WebM...");
      mReader->VideoQueue().Push(v);
    }
  }

  return true;
}
Esempio n. 2
0
static GstFlowReturn
open_codec (GstVP9Dec * dec, GstVideoCodecFrame * frame)
{
  int flags = 0;
  vpx_codec_stream_info_t stream_info;
  vpx_codec_caps_t caps;
  vpx_codec_dec_cfg_t cfg;
  vpx_codec_err_t status;
  GstMapInfo minfo;

  memset (&stream_info, 0, sizeof (stream_info));
  memset (&cfg, 0, sizeof (cfg));
  stream_info.sz = sizeof (stream_info);

  if (!gst_buffer_map (frame->input_buffer, &minfo, GST_MAP_READ)) {
    GST_ERROR_OBJECT (dec, "Failed to map input buffer");
    return GST_FLOW_ERROR;
  }

  status = vpx_codec_peek_stream_info (&vpx_codec_vp9_dx_algo,
      minfo.data, minfo.size, &stream_info);

  gst_buffer_unmap (frame->input_buffer, &minfo);

  if (status != VPX_CODEC_OK) {
    GST_WARNING_OBJECT (dec, "VPX preprocessing error: %s",
        gst_vpx_error_name (status));
    gst_video_decoder_drop_frame (GST_VIDEO_DECODER (dec), frame);
    return GST_FLOW_CUSTOM_SUCCESS_1;
  }
  if (!stream_info.is_kf) {
    GST_WARNING_OBJECT (dec, "No keyframe, skipping");
    gst_video_decoder_drop_frame (GST_VIDEO_DECODER (dec), frame);
    return GST_FLOW_CUSTOM_SUCCESS_1;
  }

  /* FIXME: peek_stream_info() does not return valid values, take input caps */
  stream_info.w = dec->input_state->info.width;
  stream_info.h = dec->input_state->info.height;

  cfg.w = stream_info.w;
  cfg.h = stream_info.h;
  cfg.threads = dec->threads;

  caps = vpx_codec_get_caps (&vpx_codec_vp9_dx_algo);

  if (dec->post_processing) {
    if (!(caps & VPX_CODEC_CAP_POSTPROC)) {
      GST_WARNING_OBJECT (dec, "Decoder does not support post processing");
    } else {
      flags |= VPX_CODEC_USE_POSTPROC;
    }
  }

  status =
      vpx_codec_dec_init (&dec->decoder, &vpx_codec_vp9_dx_algo, &cfg, flags);
  if (status != VPX_CODEC_OK) {
    GST_ELEMENT_ERROR (dec, LIBRARY, INIT,
        ("Failed to initialize VP9 decoder"), ("%s",
            gst_vpx_error_name (status)));
    return GST_FLOW_ERROR;
  }

  if ((caps & VPX_CODEC_CAP_POSTPROC) && dec->post_processing) {
    vp8_postproc_cfg_t pp_cfg = { 0, };

    pp_cfg.post_proc_flag = dec->post_processing_flags;
    pp_cfg.deblocking_level = dec->deblocking_level;
    pp_cfg.noise_level = dec->noise_level;

    status = vpx_codec_control (&dec->decoder, VP8_SET_POSTPROC, &pp_cfg);
    if (status != VPX_CODEC_OK) {
      GST_WARNING_OBJECT (dec, "Couldn't set postprocessing settings: %s",
          gst_vpx_error_name (status));
    }
  }

  dec->decoder_inited = TRUE;

  return GST_FLOW_OK;
}