Esempio n. 1
0
CRenderPicture *CProcessorHD::Convert(DVDVideoPicture* picture)
{
  // RENDER_FMT_YUV420P -> DXGI_FORMAT_NV12
  // RENDER_FMT_YUV420P10 -> DXGI_FORMAT_P010
  // RENDER_FMT_YUV420P16 -> DXGI_FORMAT_P016
  if ( picture->format != RENDER_FMT_YUV420P
    && picture->format != RENDER_FMT_YUV420P10
    && picture->format != RENDER_FMT_YUV420P16)
  {
    CLog::Log(LOGERROR, "%s - colorspace not supported by processor, skipping frame.", __FUNCTION__);
    return nullptr;
  }

  ID3D11View* pView = m_context->GetFree(nullptr);
  if (!pView)
  {
    CLog::Log(LOGERROR, "%s - no free video surface", __FUNCTION__);
    return nullptr;
  }

  ID3D11VideoProcessorInputView* view = reinterpret_cast<ID3D11VideoProcessorInputView*>(pView);

  ID3D11Resource* pResource = nullptr;
  view->GetResource(&pResource);

  D3D11_VIDEO_PROCESSOR_INPUT_VIEW_DESC vpivd;
  view->GetDesc(&vpivd);
  UINT subresource = D3D11CalcSubresource(0, vpivd.Texture2D.ArraySlice, 1);

  D3D11_MAPPED_SUBRESOURCE rectangle;
  ID3D11DeviceContext* pContext = g_Windowing.GetImmediateContext();
  if (FAILED(pContext->Map(pResource, subresource, D3D11_MAP_WRITE_DISCARD, 0, &rectangle)))
  {
    CLog::Log(LOGERROR, "%s - could not lock rect", __FUNCTION__);
    m_context->ClearReference(view);
    return nullptr;
  }

  uint8_t*  pData = static_cast<uint8_t*>(rectangle.pData);
  uint8_t*  dst[] = { pData, pData + m_texDesc.Height * rectangle.RowPitch };
  int dstStride[] = { rectangle.RowPitch, rectangle.RowPitch };

  if (picture->format == RENDER_FMT_YUV420P)
  {
    convert_yuv420_nv12(picture->data, picture->iLineSize, picture->iHeight, picture->iWidth, dst, dstStride);
  }
  else
  {
    convert_yuv420_p01x(picture->data, picture->iLineSize, picture->iHeight, picture->iWidth, dst, dstStride
                      , picture->format == RENDER_FMT_YUV420P10 ? 10 : 16);
  }
  pContext->Unmap(pResource, subresource);
  SAFE_RELEASE(pResource);

  m_context->ClearReference(view);
  m_context->MarkRender(view);
  CRenderPicture *pic = new CRenderPicture(m_context);
  pic->view           = view;
  return pic;
}
Esempio n. 2
0
ID3D11VideoProcessorInputView* CProcessorHD::GetInputView(ID3D11View* view) 
{
  ID3D11VideoProcessorInputView* inputView = nullptr;
  if (m_context) // we have own context so the view will be processor input view
  {
    inputView = reinterpret_cast<ID3D11VideoProcessorInputView*>(view);
    inputView->AddRef(); // it will be released in Render method

    return inputView;
  }

  // the view came from decoder
  ID3D11VideoDecoderOutputView* decoderView = reinterpret_cast<ID3D11VideoDecoderOutputView*>(view);
  if (!decoderView) 
  {
    CLog::Log(LOGERROR, __FUNCTION__" - cannot get view.");
    return nullptr;
  }

  ID3D11Resource* resource = nullptr;
  D3D11_VIDEO_DECODER_OUTPUT_VIEW_DESC vdovd;
  decoderView->GetDesc(&vdovd);
  decoderView->GetResource(&resource);

  D3D11_VIDEO_PROCESSOR_INPUT_VIEW_DESC vpivd = { 0 };
  vpivd.FourCC = 0; // if zero, the driver uses the DXGI format; must be 0 on level 9.x
  vpivd.ViewDimension = D3D11_VPIV_DIMENSION_TEXTURE2D;
  vpivd.Texture2D.ArraySlice = vdovd.Texture2D.ArraySlice;
  vpivd.Texture2D.MipSlice = 0;

  if (FAILED(m_pVideoDevice->CreateVideoProcessorInputView(resource, m_pEnumerator, &vpivd, &inputView)))
  {
    CLog::Log(LOGERROR, __FUNCTION__" - cannot create processor view.");
  }
  resource->Release();

  return inputView;
}
Esempio n. 3
0
ID3D11VideoProcessorInputView* CProcessorHD::GetInputView(ID3D11View* view) 
{
  ID3D11VideoProcessorInputView* inputView = nullptr;
  if (m_eViewType == PROCESSOR_VIEW_TYPE_PROCESSOR)
  {
    inputView = reinterpret_cast<ID3D11VideoProcessorInputView*>(view);
    inputView->AddRef(); // it will be released later
  }
  else if (m_eViewType == PROCESSOR_VIEW_TYPE_DECODER)
  {
    // the view cames from decoder
    ID3D11VideoDecoderOutputView* decoderView = reinterpret_cast<ID3D11VideoDecoderOutputView*>(view);
    if (!decoderView)
    {
      CLog::Log(LOGERROR, "%s - cannot get view.", __FUNCTION__);
      return nullptr;
    }

    ID3D11Resource* resource = nullptr;
    D3D11_VIDEO_DECODER_OUTPUT_VIEW_DESC vdovd;
    decoderView->GetDesc(&vdovd);
    decoderView->GetResource(&resource);

    D3D11_VIDEO_PROCESSOR_INPUT_VIEW_DESC vpivd = { { 0 } };
    vpivd.FourCC = 0;
    vpivd.ViewDimension = D3D11_VPIV_DIMENSION_TEXTURE2D;
    vpivd.Texture2D.ArraySlice = vdovd.Texture2D.ArraySlice;
    vpivd.Texture2D.MipSlice = 0;

    if (FAILED(m_pVideoDevice->CreateVideoProcessorInputView(resource, m_pEnumerator, &vpivd, &inputView)))
      CLog::Log(LOGERROR, "%s - cannot create processor view.", __FUNCTION__);

    resource->Release();
  }
  return inputView;
}
Esempio n. 4
0
CRenderPicture *CProcessorHD::Convert(DVDVideoPicture* picture)
{
  // RENDER_FMT_YUV420P -> DXGI_FORMAT_NV12
  // RENDER_FMT_YUV420P10 -> DXGI_FORMAT_P010
  // RENDER_FMT_YUV420P16 -> DXGI_FORMAT_P016
  if ( picture->format != RENDER_FMT_YUV420P
    && picture->format != RENDER_FMT_YUV420P10
    && picture->format != RENDER_FMT_YUV420P16)
  {
    CLog::Log(LOGERROR, "%s - colorspace not supported by processor, skipping frame.", __FUNCTION__);
    return nullptr;
  }

  ID3D11View* pView = m_context->GetFree(nullptr);
  if (!pView)
  {
    CLog::Log(LOGERROR, "%s - no free video surface", __FUNCTION__);
    return nullptr;
  }

  ID3D11VideoProcessorInputView* view = reinterpret_cast<ID3D11VideoProcessorInputView*>(pView);

  ID3D11Resource* pResource = nullptr;
  view->GetResource(&pResource);

  D3D11_VIDEO_PROCESSOR_INPUT_VIEW_DESC vpivd;
  view->GetDesc(&vpivd);
  UINT subresource = D3D11CalcSubresource(0, vpivd.Texture2D.ArraySlice, 1);

  D3D11_MAPPED_SUBRESOURCE rectangle;
  ID3D11DeviceContext* pContext = g_Windowing.GetImmediateContext();
  if (FAILED(pContext->Map(pResource, subresource, D3D11_MAP_WRITE_DISCARD, 0, &rectangle)))
  {
    CLog::Log(LOGERROR, "%s - could not lock rect", __FUNCTION__);
    m_context->ClearReference(view);
    return nullptr;
  }

  if (picture->format == RENDER_FMT_YUV420P)
  {
    uint8_t*  pData = static_cast<uint8_t*>(rectangle.pData);
    uint8_t*  dst[] = { pData, pData + m_texDesc.Height * rectangle.RowPitch };
    int dstStride[] = { rectangle.RowPitch, rectangle.RowPitch };
    convert_yuv420_nv12(picture->data, picture->iLineSize, picture->iHeight, picture->iWidth, dst, dstStride);
  }
  else
  {
    // TODO: Optimize this later using sse2/sse4
    uint16_t * d_y = static_cast<uint16_t*>(rectangle.pData);
    uint16_t * d_uv = d_y + m_texDesc.Height * rectangle.RowPitch;
    // Convert to NV12 - Luma
    for (size_t line = 0; line < picture->iHeight; ++line)
    {
      uint16_t * y = (uint16_t*)(picture->data[0] + picture->iLineSize[0] * line);
      uint16_t * d = d_y + rectangle.RowPitch * line;
      memcpy(d, y, picture->iLineSize[0]);
    }
    // Convert to NV12 - Chroma
    size_t chromaWidth = (picture->iWidth + 1) >> 1;
    size_t chromaHeight = picture->iHeight >> 1;
    for (size_t line = 0; line < chromaHeight; ++line)
    {
      uint16_t * u = (uint16_t*)picture->data[1] + line * picture->iLineSize[1];
      uint16_t * v = (uint16_t*)picture->data[2] + line * picture->iLineSize[2];
      uint16_t * d = d_uv + line * rectangle.RowPitch;
      for (size_t x = 0; x < chromaWidth; x++)
      {
        *d++ = *u++; 
        *d++ = *v++;
      }
    }
  }
  pContext->Unmap(pResource, subresource);
  SAFE_RELEASE(pResource);

  m_context->ClearReference(view);
  m_context->MarkRender(view);
  CRenderPicture *pic = new CRenderPicture(m_context);
  pic->view           = view;
  return pic;
}