Exemple #1
0
static int lavc_open(const char *name, AVCodecContext *params,
                     struct frame_format *ff)
{
    int x_off, y_off;
    int edge_width;
    AVCodec *codec;
    int err;

    codec = avcodec_find_decoder(params->codec_id);
    if (!codec) {
        fprintf(stderr, "Can't find codec %x\n", params->codec_id);
        return -1;
    }

    avc = avcodec_alloc_context();

    avc->width          = params->width;
    avc->height         = params->height;
    avc->time_base      = params->time_base;
    avc->extradata      = params->extradata;
    avc->extradata_size = params->extradata_size;

    avc->get_buffer     = get_buffer;
    avc->release_buffer = release_buffer;
    avc->reget_buffer   = reget_buffer;

    err = avcodec_open(avc, codec);
    if (err) {
        fprintf(stderr, "avcodec_open: %d\n", err);
        return err;
    }

    edge_width = avcodec_get_edge_width();
    x_off      = ALIGN(edge_width, 32);
    y_off      = edge_width;

    ff->width  = ALIGN(params->width  + 2 * x_off, 32);
    ff->height = ALIGN(params->height + 2 * y_off, 32);
    ff->disp_x = x_off;
    ff->disp_y = y_off;
    ff->disp_w = params->width;
    ff->disp_h = params->height;
    ff->pixfmt = params->pix_fmt;

    return 0;
}
Exemple #2
0
int
FFmpegH264Decoder<LIBAV_VER>::AllocateYUV420PVideoBuffer(
  AVCodecContext* aCodecContext, AVFrame* aFrame)
{
  bool needAlign = aCodecContext->codec->capabilities & CODEC_CAP_DR1;
  bool needEdge = !(aCodecContext->flags & CODEC_FLAG_EMU_EDGE);
  int edgeWidth = needEdge ? avcodec_get_edge_width() : 0;

  int decodeWidth = aCodecContext->width + edgeWidth * 2;
  int decodeHeight = aCodecContext->height + edgeWidth * 2;

  if (needAlign) {
    // Align width and height to account for CODEC_CAP_DR1.
    // Make sure the decodeWidth is a multiple of 64, so a UV plane stride will be
    // a multiple of 32. FFmpeg uses SSE3 accelerated code to copy a frame line by
    // line.
    // VP9 decoder uses MOVAPS/VEX.256 which requires 32-bytes aligned memory.
    decodeWidth = (decodeWidth + 63) & ~63;
    decodeHeight = (decodeHeight + 63) & ~63;
  }

  PodZero(&aFrame->data[0], AV_NUM_DATA_POINTERS);
  PodZero(&aFrame->linesize[0], AV_NUM_DATA_POINTERS);

  int pitch = decodeWidth;
  int chroma_pitch  = (pitch + 1) / 2;
  int chroma_height = (decodeHeight +1) / 2;

  // Get strides for each plane.
  aFrame->linesize[0] = pitch;
  aFrame->linesize[1] = aFrame->linesize[2] = chroma_pitch;

  size_t allocSize = pitch * decodeHeight + (chroma_pitch * chroma_height) * 2;

  RefPtr<Image> image =
    mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR);
  PlanarYCbCrImage* ycbcr = static_cast<PlanarYCbCrImage*>(image.get());
  uint8_t* buffer = ycbcr->AllocateAndGetNewBuffer(allocSize + 64);
  // FFmpeg requires a 16/32 bytes-aligned buffer, align it on 64 to be safe
  buffer = reinterpret_cast<uint8_t*>((reinterpret_cast<uintptr_t>(buffer) + 63) & ~63);

  if (!buffer) {
    NS_WARNING("Failed to allocate buffer for FFmpeg video decoding");
    return -1;
  }

  int offsets[3] = {
    0,
    pitch * decodeHeight,
    pitch * decodeHeight + chroma_pitch * chroma_height };

  // Add a horizontal bar |edgeWidth| pixels high at the
  // top of the frame, plus |edgeWidth| pixels from the left of the frame.
  int planesEdgeWidth[3] = {
    edgeWidth * aFrame->linesize[0] + edgeWidth,
    edgeWidth / 2 * aFrame->linesize[1] + edgeWidth / 2,
    edgeWidth / 2 * aFrame->linesize[2] + edgeWidth / 2 };

  for (uint32_t i = 0; i < 3; i++) {
    aFrame->data[i] = buffer + offsets[i] + planesEdgeWidth[i];
  }

  aFrame->extended_data = aFrame->data;
  aFrame->width = aCodecContext->width;
  aFrame->height = aCodecContext->height;

  aFrame->opaque = static_cast<void*>(image.forget().take());

  aFrame->type = FF_BUFFER_TYPE_USER;
  aFrame->reordered_opaque = aCodecContext->reordered_opaque;
#if LIBAVCODEC_VERSION_MAJOR == 53
  if (aCodecContext->pkt) {
    aFrame->pkt_pts = aCodecContext->pkt->pts;
  }
#endif
  return 0;
}
int
FFmpegH264Decoder<LIBAV_VER>::AllocateYUV420PVideoBuffer(
  AVCodecContext* aCodecContext, AVFrame* aFrame)
{
  bool needAlign = aCodecContext->codec->capabilities & CODEC_CAP_DR1;
  int edgeWidth =  needAlign ? avcodec_get_edge_width() : 0;
  int decodeWidth = aCodecContext->width + edgeWidth * 2;
  // Make sure the decodeWidth is a multiple of 32, so a UV plane stride will be
  // a multiple of 16. FFmpeg uses SSE2 accelerated code to copy a frame line by
  // line.
  decodeWidth = (decodeWidth + 31) & ~31;
  int decodeHeight = aCodecContext->height + edgeWidth * 2;

  if (needAlign) {
    // Align width and height to account for CODEC_FLAG_EMU_EDGE.
    int stride_align[AV_NUM_DATA_POINTERS];
    avcodec_align_dimensions2(aCodecContext, &decodeWidth, &decodeHeight,
                              stride_align);
  }

  // Get strides for each plane.
  av_image_fill_linesizes(aFrame->linesize, aCodecContext->pix_fmt,
                          decodeWidth);

  // Let FFmpeg set up its YUV plane pointers and tell us how much memory we
  // need.
  // Note that we're passing |nullptr| here as the base address as we haven't
  // allocated our image yet. We will adjust |aFrame->data| below.
  size_t allocSize =
    av_image_fill_pointers(aFrame->data, aCodecContext->pix_fmt, decodeHeight,
                           nullptr /* base address */, aFrame->linesize);

  nsRefPtr<Image> image =
    mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR);
  PlanarYCbCrImage* ycbcr = static_cast<PlanarYCbCrImage*>(image.get());
  uint8_t* buffer = ycbcr->AllocateAndGetNewBuffer(allocSize + 64);
  // FFmpeg requires a 16/32 bytes-aligned buffer, align it on 64 to be safe
  buffer = reinterpret_cast<uint8_t*>((reinterpret_cast<uintptr_t>(buffer) + 63) & ~63);

  if (!buffer) {
    NS_WARNING("Failed to allocate buffer for FFmpeg video decoding");
    return -1;
  }

  // Now that we've allocated our image, we can add its address to the offsets
  // set by |av_image_fill_pointers| above. We also have to add |edgeWidth|
  // pixels of padding here.
  for (uint32_t i = 0; i < AV_NUM_DATA_POINTERS; i++) {
    // The C planes are half the resolution of the Y plane, so we need to halve
    // the edge width here.
    uint32_t planeEdgeWidth = edgeWidth / (i ? 2 : 1);

    // Add buffer offset, plus a horizontal bar |edgeWidth| pixels high at the
    // top of the frame, plus |edgeWidth| pixels from the left of the frame.
    aFrame->data[i] += reinterpret_cast<ptrdiff_t>(
      buffer + planeEdgeWidth * aFrame->linesize[i] + planeEdgeWidth);
  }

  // Unused, but needs to be non-zero to keep ffmpeg happy.
  aFrame->type = GECKO_FRAME_TYPE;

  aFrame->extended_data = aFrame->data;
  aFrame->width = aCodecContext->width;
  aFrame->height = aCodecContext->height;

  aFrame->opaque = static_cast<void*>(image.forget().take());

  return 0;
}
int
FFmpegH264Decoder::AllocateYUV420PVideoBuffer(AVCodecContext* aCodecContext,
                                              AVFrame* aFrame)
{
  // Older versions of ffmpeg require that edges be allocated* around* the
  // actual image.
  int edgeWidth = avcodec_get_edge_width();
  int decodeWidth = aCodecContext->width + edgeWidth * 2;
  int decodeHeight = aCodecContext->height + edgeWidth * 2;

  // Align width and height to possibly speed up decode.
  int stride_align[AV_NUM_DATA_POINTERS];
  avcodec_align_dimensions2(aCodecContext, &decodeWidth, &decodeHeight,
                            stride_align);

  // Get strides for each plane.
  av_image_fill_linesizes(aFrame->linesize, aCodecContext->pix_fmt,
                          decodeWidth);

  // Let FFmpeg set up its YUV plane pointers and tell us how much memory we
  // need.
  // Note that we're passing |nullptr| here as the base address as we haven't
  // allocated our image yet. We will adjust |aFrame->data| below.
  size_t allocSize =
    av_image_fill_pointers(aFrame->data, aCodecContext->pix_fmt, decodeHeight,
                           nullptr /* base address */, aFrame->linesize);

  nsRefPtr<Image> image =
    mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR);
  PlanarYCbCrImage* ycbcr = reinterpret_cast<PlanarYCbCrImage*>(image.get());
  uint8_t* buffer = ycbcr->AllocateAndGetNewBuffer(allocSize);

  if (!buffer) {
    NS_WARNING("Failed to allocate buffer for FFmpeg video decoding");
    return -1;
  }

  // Now that we've allocated our image, we can add its address to the offsets
  // set by |av_image_fill_pointers| above. We also have to add |edgeWidth|
  // pixels of padding here.
  for (uint32_t i = 0; i < AV_NUM_DATA_POINTERS; i++) {
    // The C planes are half the resolution of the Y plane, so we need to halve
    // the edge width here.
    uint32_t planeEdgeWidth = edgeWidth / (i ? 2 : 1);

    // Add buffer offset, plus a horizontal bar |edgeWidth| pixels high at the
    // top of the frame, plus |edgeWidth| pixels from the left of the frame.
    aFrame->data[i] += reinterpret_cast<ptrdiff_t>(
      buffer + planeEdgeWidth * aFrame->linesize[i] + planeEdgeWidth);
  }

  // Unused, but needs to be non-zero to keep ffmpeg happy.
  aFrame->type = GECKO_FRAME_TYPE;

  aFrame->extended_data = aFrame->data;
  aFrame->width = aCodecContext->width;
  aFrame->height = aCodecContext->height;

  mozilla::layers::PlanarYCbCrData data;
  PlanarYCbCrDataFromAVFrame(data, aFrame);
  ycbcr->SetDataNoCopy(data);

  mCurrentImage.swap(image);

  return 0;
}