예제 #1
0
STDMETHODIMP CDecAvcodec::ConvertPixFmt(AVFrame *pFrame, LAVFrame *pOutFrame)
{
    // Allocate the buffers to write into
    AllocLAVFrameBuffers(pOutFrame);

    // Map to swscale compatible format
    AVPixelFormat dstFormat = getFFPixelFormatFromLAV(pOutFrame->format, pOutFrame->bpp);

    // Get a context
    m_pSwsContext = sws_getCachedContext(m_pSwsContext, pFrame->width, pFrame->height, (AVPixelFormat)pFrame->format, pFrame->width, pFrame->height, dstFormat, SWS_BILINEAR | SWS_PRINT_INFO, nullptr, nullptr, nullptr);

    ptrdiff_t linesize[4];
    for (int i = 0; i < 4; i++)
        linesize[i] = pFrame->linesize[i];

    // Perform conversion
    sws_scale2(m_pSwsContext, pFrame->data, linesize, 0, pFrame->height, pOutFrame->data, pOutFrame->stride);

    return S_OK;
}
예제 #2
0
STDMETHODIMP CLAVSubtitleConsumer::ProcessSubtitleBitmap(LAVPixelFormat pixFmt, int bpp, RECT videoRect, BYTE *videoData[4], ptrdiff_t videoStride[4], RECT subRect, POINT subPosition, SIZE subSize, const uint8_t *rgbData, ptrdiff_t pitch)
{
  if (subRect.left != 0 || subRect.top != 0) {
    DbgLog((LOG_ERROR, 10, L"ProcessSubtitleBitmap(): Left/Top in SubRect non-zero"));
  }

  BOOL bNeedScaling = FALSE;

  // We need scaling if the width is not the same, or the subtitle rect is higher then the video rect
  if (subRect.right != videoRect.right || subRect.bottom > videoRect.bottom) {
    bNeedScaling = TRUE;
  }

  if (pixFmt != LAVPixFmt_RGB32 && pixFmt != LAVPixFmt_RGB24) {
    bNeedScaling = TRUE;
  }

  if (m_PixFmt != pixFmt) {
    m_PixFmt = pixFmt;
    SelectBlendFunction();
  }

  // P010 is handled like its 16 bpp to compensate for having the data in the high bits
  if (pixFmt == LAVPixFmt_P010)
    bpp = 16;

  BYTE *subData[4] = { nullptr, nullptr, nullptr, nullptr };
  ptrdiff_t subStride[4] = { 0, 0, 0, 0 };

  // If we need scaling (either scaling or pixel conversion), do it here before starting the blend process
  if (bNeedScaling) {
    uint8_t *tmpBuf = nullptr;
    const AVPixelFormat avPixFmt = getFFPixFmtForSubtitle(pixFmt);

    // Calculate scaled size
    // We must ensure that the scaled subs still fit into the video

    // HACK: Scale to video size. In the future, we should take AR and the likes into account
    RECT newRect = videoRect;
    /*
    float subAR = (float)subRect.right / (float)subRect.bottom;
    if (newRect.right != videoRect.right) {
      newRect.right = videoRect.right;
      newRect.bottom = (LONG)(newRect.right / subAR);
    }
    if (newRect.bottom > videoRect.bottom) {
      newRect.bottom = videoRect.bottom;
      newRect.right = (LONG)(newRect.bottom * subAR);
    }*/

    SIZE newSize;
    newSize.cx = (LONG)av_rescale(subSize.cx, newRect.right, subRect.right);
    newSize.cy = (LONG)av_rescale(subSize.cy, newRect.bottom, subRect.bottom);

    // And scaled position
    subPosition.x = (LONG)av_rescale(subPosition.x, newSize.cx, subSize.cx);
    subPosition.y = (LONG)av_rescale(subPosition.y, newSize.cy, subSize.cy);

    m_pSwsContext = sws_getCachedContext(m_pSwsContext, subSize.cx, subSize.cy, AV_PIX_FMT_BGRA, newSize.cx, newSize.cy, avPixFmt, SWS_BILINEAR|SWS_FULL_CHR_H_INP, nullptr, nullptr, nullptr);

    const uint8_t *src[4] = { (const uint8_t *)rgbData, nullptr, nullptr, nullptr };
    const ptrdiff_t srcStride[4] = { pitch, 0, 0, 0 };

    const LAVPixFmtDesc desc = getFFSubPixelFormatDesc(avPixFmt);
    const ptrdiff_t stride = FFALIGN(newSize.cx, 64) * desc.codedbytes;

    for (int plane = 0; plane < desc.planes; plane++) {
      subStride[plane]  = stride / desc.planeWidth[plane];
      const size_t size = subStride[plane] * FFALIGN(newSize.cy, 2) / desc.planeHeight[plane];
      subData[plane]    = (BYTE *)av_mallocz(size + FF_INPUT_BUFFER_PADDING_SIZE);
    }

    // Un-pre-multiply alpha for YUV formats
    // TODO: Can we SIMD this? See ARGBUnattenuateRow_C/SSE2 in libyuv
    if (avPixFmt != AV_PIX_FMT_BGRA) {
      tmpBuf = (uint8_t *)av_malloc(pitch * subSize.cy);
      memcpy(tmpBuf, rgbData, pitch * subSize.cy);
      for (int line = 0; line < subSize.cy; line++) {
        uint8_t *p = tmpBuf + line * pitch;
        for (int col = 0; col < subSize.cx; col++) {
          if (p[3] != 0 && p[3] != 255) {
            p[0] = av_clip_uint8(p[0] * 255 / p[3]);
            p[1] = av_clip_uint8(p[1] * 255 / p[3]);
            p[2] = av_clip_uint8(p[2] * 255 / p[3]);
          }
          p += 4;
        }
      }
      src[0] = tmpBuf;
    }

    int ret = sws_scale2(m_pSwsContext, src, srcStride, 0, subSize.cy, subData, subStride);
    subSize = newSize;

    if (tmpBuf)
      av_free(tmpBuf);
  } else {
    subData[0] = (BYTE *)rgbData;
    subStride[0] = pitch;
  }

  ASSERT((subPosition.x + subSize.cx) <= videoRect.right);
  ASSERT((subPosition.y + subSize.cy) <= videoRect.bottom);

  if (blend)
    (this->*blend)(videoData, videoStride, videoRect, subData, subStride, subPosition, subSize, pixFmt, bpp);

  if (bNeedScaling) {
    for (int i = 0; i < 4; i++) {
      av_freep(&subData[i]);
    }
  }

  return S_OK;
}