mfxStatus QSV_Encoder_Internal::Drain() { mfxStatus sts = MFX_ERR_NONE; // // Drain the buffered encoded frames // while (MFX_ERR_NONE <= sts) { int nTaskIdx = GetFreeTaskIndex(m_pTaskPool, m_nTaskPool); if (MFX_ERR_NOT_FOUND == nTaskIdx) { // No more free tasks, need to sync sts = m_session.SyncOperation(m_pTaskPool[m_nFirstSyncTask].syncp, 60000); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); m_pTaskPool[m_nFirstSyncTask].syncp = NULL; m_nFirstSyncTask = (m_nFirstSyncTask + 1) % m_nTaskPool; } else { for (;;) { // Encode a frame asychronously (returns immediately) sts = m_pmfxENC->EncodeFrameAsync(NULL, NULL, &m_pTaskPool[nTaskIdx].mfxBS, &m_pTaskPool[nTaskIdx].syncp); if (MFX_ERR_NONE < sts && !m_pTaskPool[nTaskIdx].syncp) { // Repeat the call if warning and no output if (MFX_WRN_DEVICE_BUSY == sts) MSDK_SLEEP(1); // Wait if device is busy, then repeat the same call } else if (MFX_ERR_NONE < sts && m_pTaskPool[nTaskIdx].syncp) { sts = MFX_ERR_NONE; // Ignore warnings if output is available break; } else break; } } } // MFX_ERR_MORE_DATA indicates that there are no more buffered frames, exit in case of other errors MSDK_IGNORE_MFX_STS(sts, MFX_ERR_MORE_DATA); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); // // Sync all remaining tasks in task pool // while (m_pTaskPool[m_nFirstSyncTask].syncp) { sts = m_session.SyncOperation(m_pTaskPool[m_nFirstSyncTask].syncp, 60000); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); m_pTaskPool[m_nFirstSyncTask].syncp = NULL; m_nFirstSyncTask = (m_nFirstSyncTask + 1) % m_nTaskPool; } return sts; }
mfxStatus QSV_Encoder_Internal::AllocateSurfaces() { // Query number of required surfaces for encoder mfxFrameAllocRequest EncRequest; memset(&EncRequest, 0, sizeof(EncRequest)); mfxStatus sts = m_pmfxENC->QueryIOSurf(&m_mfxEncParams, &EncRequest); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); EncRequest.NumFrameSuggested = EncRequest.NumFrameSuggested + m_mfxEncParams.AsyncDepth; EncRequest.Type |= WILL_WRITE; // Allocate required surfaces sts = m_mfxAllocator.Alloc(m_mfxAllocator.pthis, &EncRequest, &m_mfxResponse); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); m_nSurfNum = m_mfxResponse.NumFrameActual; m_pmfxSurfaces = new mfxFrameSurface1 *[m_nSurfNum]; MSDK_CHECK_POINTER(m_pmfxSurfaces, MFX_ERR_MEMORY_ALLOC); for (int i = 0; i < m_nSurfNum; i++) { m_pmfxSurfaces[i] = new mfxFrameSurface1; memset(m_pmfxSurfaces[i], 0, sizeof(mfxFrameSurface1)); memcpy(&(m_pmfxSurfaces[i]->Info), &(m_mfxEncParams.mfx.FrameInfo), sizeof(mfxFrameInfo)); m_pmfxSurfaces[i]->Data.MemId = m_mfxResponse.mids[i]; } return sts; }
mfxStatus IntelDecoder::QueryAndAllocRequiredSurfacesForHW() { mfxStatus sts = MFX_ERR_NONE; // Query number of required surfaces for decoder mfxFrameAllocRequest Request; memset(&Request, 0, sizeof(Request)); sts = mfxDEC->QueryIOSurf(&mfxVideoParams, &Request); MSDK_IGNORE_MFX_STS(sts, MFX_WRN_PARTIAL_ACCELERATION); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); numSurfaces = Request.NumFrameSuggested; Request.Type |= WILL_READ; // This line is only required for Windows DirectX11 to ensure that surfaces can be retrieved by the application // Allocate surfaces for decoder //mfxFrameAllocResponse mfxResponse; sts = pMfxAllocator->Alloc(pMfxAllocator->pthis, &Request, &mfxResponse); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); // Allocate surface headers (mfxFrameSurface1) for decoder pmfxSurfaces = new mfxFrameSurface1 *[numSurfaces]; MSDK_CHECK_POINTER(pmfxSurfaces, MFX_ERR_MEMORY_ALLOC); for (int i = 0; i < numSurfaces; i++) { pmfxSurfaces[i] = new mfxFrameSurface1; memset(pmfxSurfaces[i], 0, sizeof(mfxFrameSurface1)); memcpy(&(pmfxSurfaces[i]->Info), &(mfxVideoParams.mfx.FrameInfo), sizeof(mfxFrameInfo)); pmfxSurfaces[i]->Data.MemId = mfxResponse.mids[i]; // MID (memory id) represents one video NV12 surface } return sts; }
mfxStatus QSV_Encoder_Internal::AllocateSurfaces() { // Query number of required surfaces for encoder mfxFrameAllocRequest EncRequest; memset(&EncRequest, 0, sizeof(EncRequest)); mfxStatus sts = m_pmfxENC->QueryIOSurf(&m_mfxEncParams, &EncRequest); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); EncRequest.Type |= WILL_WRITE; // SNB hack. On some SNB, it seems to require more surfaces EncRequest.NumFrameSuggested += m_mfxEncParams.AsyncDepth; // Allocate required surfaces if (m_bUseD3D11) { sts = m_mfxAllocator.Alloc(m_mfxAllocator.pthis, &EncRequest, &m_mfxResponse); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); m_nSurfNum = m_mfxResponse.NumFrameActual; m_pmfxSurfaces = new mfxFrameSurface1 *[m_nSurfNum]; MSDK_CHECK_POINTER(m_pmfxSurfaces, MFX_ERR_MEMORY_ALLOC); for (int i = 0; i < m_nSurfNum; i++) { m_pmfxSurfaces[i] = new mfxFrameSurface1; memset(m_pmfxSurfaces[i], 0, sizeof(mfxFrameSurface1)); memcpy(&(m_pmfxSurfaces[i]->Info), &(m_mfxEncParams.mfx.FrameInfo), sizeof(mfxFrameInfo)); m_pmfxSurfaces[i]->Data.MemId = m_mfxResponse.mids[i]; } } else { mfxU16 width = (mfxU16)MSDK_ALIGN32(EncRequest.Info.Width); mfxU16 height = (mfxU16)MSDK_ALIGN32(EncRequest.Info.Height); mfxU8 bitsPerPixel = 12; mfxU32 surfaceSize = width * height * bitsPerPixel / 8; m_nSurfNum = EncRequest.NumFrameSuggested; m_pmfxSurfaces = new mfxFrameSurface1 *[m_nSurfNum]; for (int i = 0; i < m_nSurfNum; i++) { m_pmfxSurfaces[i] = new mfxFrameSurface1; memset(m_pmfxSurfaces[i], 0, sizeof(mfxFrameSurface1)); memcpy(&(m_pmfxSurfaces[i]->Info), &(m_mfxEncParams.mfx.FrameInfo), sizeof(mfxFrameInfo)); mfxU8* pSurface = (mfxU8*) new mfxU8[surfaceSize]; m_pmfxSurfaces[i]->Data.Y = pSurface; m_pmfxSurfaces[i]->Data.U = pSurface + width * height; m_pmfxSurfaces[i]->Data.V = pSurface + width * height + 1; m_pmfxSurfaces[i]->Data.Pitch = width; } } blog(LOG_INFO, "\tm_nSurfNum: %d", m_nSurfNum); return sts; }
mfxStatus GeneralAllocator::Init(mfxAllocatorParams *pParams) { mfxStatus sts = MFX_ERR_NONE; #if defined(_WIN32) || defined(_WIN64) D3DAllocatorParams *d3dAllocParams = dynamic_cast<D3DAllocatorParams*>(pParams); if (d3dAllocParams) m_D3DAllocator.reset(new D3DFrameAllocator); #if MFX_D3D11_SUPPORT D3D11AllocatorParams *d3d11AllocParams = dynamic_cast<D3D11AllocatorParams*>(pParams); if (d3d11AllocParams) m_D3DAllocator.reset(new D3D11FrameAllocator); #endif #endif #ifdef LIBVA_SUPPORT vaapiAllocatorParams *vaapiAllocParams = dynamic_cast<vaapiAllocatorParams*>(pParams); if (vaapiAllocParams) m_D3DAllocator.reset(new vaapiFrameAllocator); #endif if (m_D3DAllocator.get()) { sts = m_D3DAllocator.get()->Init(pParams); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); } m_SYSAllocator.reset(new SysMemFrameAllocator); sts = m_SYSAllocator.get()->Init(0); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); return sts; }
mfxStatus CRendererPipeline::CreateHWDevice() { mfxStatus sts = MFX_ERR_NONE; #if D3D_SURFACES_SUPPORT POINT point = {0, 0}; HWND window = m_hParentWnd;// WindowFromPoint(point); #if MFX_D3D11_SUPPORT if (D3D11_MEMORY == m_memType) m_hwdev = new CD3D11Device(); else #endif // #if MFX_D3D11_SUPPORT m_hwdev = new CD3D9Device(); if (NULL == m_hwdev) return MFX_ERR_MEMORY_ALLOC; sts = m_hwdev->Init( window, 1, MSDKAdapter::GetNumber(m_mfxSession)/* MSDKAdapter::GetNumber(GetFirstSession())*/); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); #elif LIBVA_SUPPORT m_hwdev = CreateVAAPIDevice(); if (NULL == m_hwdev) { return MFX_ERR_MEMORY_ALLOC; } sts = m_hwdev->Init(NULL, 0, MSDKAdapter::GetNumber(GetFirstSession())); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); #endif return MFX_ERR_NONE; }
mfxStatus Initialize(mfxIMPL impl, mfxVersion ver, mfxSession *pSession, mfxFrameAllocator* pmfxAllocator, bool bCreateSharedHandles) { mfxStatus sts = MFX_ERR_NONE; // Initialize Intel Media SDK Session sts = MFXInit(impl, &ver, pSession); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); // Create VA display mfxHDL displayHandle = { 0 }; sts = CreateVAEnvDRM(&displayHandle); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); // Provide VA display handle to Media SDK sts = MFXVideoCORE_SetHandle(*pSession, MFX_HANDLE_VA_DISPLAY, displayHandle); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); // If mfxFrameAllocator is provided it means we need to setup memory allocator if (pmfxAllocator) { pmfxAllocator->pthis = *pSession; // We use Media SDK session ID as the allocation identifier pmfxAllocator->Alloc = simple_alloc; pmfxAllocator->Free = simple_free; pmfxAllocator->Lock = simple_lock; pmfxAllocator->Unlock = simple_unlock; pmfxAllocator->GetHDL = simple_gethdl; // Since we are using video memory we must provide Media SDK with an external allocator sts = MFXVideoCORE_SetFrameAllocator(*pSession, pmfxAllocator); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); } return sts; }
mfxStatus QSV_Encoder_Internal::Reset(qsv_param_t *pParams) { mfxStatus sts = ClearData(); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); sts = Open(pParams); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); return sts; }
int main(int argc, char *argv[]) #endif { sInputParams Params; // input parameters from command line CDecodingPipeline Pipeline; // pipeline for decoding, includes input file reader, decoder and output file writer mfxStatus sts = MFX_ERR_NONE; // return value check sts = ParseInputString(argv, (mfxU8)argc, &Params); MSDK_CHECK_PARSE_RESULT(sts, MFX_ERR_NONE, 1); if (Params.bIsMVC) Pipeline.SetMultiView(); sts = Pipeline.Init(&Params); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, 1); // print stream info Pipeline.PrintInfo(); msdk_printf(MSDK_STRING("Decoding started\n")); for (;;) { sts = Pipeline.RunDecoding(); if (MFX_ERR_INCOMPATIBLE_VIDEO_PARAM == sts || MFX_ERR_DEVICE_LOST == sts || MFX_ERR_DEVICE_FAILED == sts) { if (MFX_ERR_INCOMPATIBLE_VIDEO_PARAM == sts) { msdk_printf(MSDK_STRING("\nERROR: Incompatible video parameters detected. Recovering...\n")); } else { msdk_printf(MSDK_STRING("\nERROR: Hardware device was lost or returned unexpected error. Recovering...\n")); sts = Pipeline.ResetDevice(); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, 1); } sts = Pipeline.ResetDecoder(&Params); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, 1); continue; } else { MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, 1); break; } } msdk_printf(MSDK_STRING("\nDecoding finished\n")); return 0; }
mfxStatus GeneralAllocator::Close() { mfxStatus sts = MFX_ERR_NONE; if (m_D3DAllocator.get()) { sts = m_D3DAllocator.get()->Close(); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); } sts = m_SYSAllocator.get()->Close(); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); return sts; }
mfxStatus CD3D11Device::Reset() { // Changing video mode back to the original state if (2 == m_nViews && !m_bDefaultStereoEnabled) m_pDisplayControl->SetStereoEnabled(FALSE); MSDK_CHECK_POINTER (m_pDXGIFactory.p, MFX_ERR_NULL_PTR); DXGI_SWAP_CHAIN_DESC1 swapChainDesc = {0}; mfxStatus sts = FillSCD1(swapChainDesc); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); HRESULT hres = S_OK; hres = m_pDXGIFactory->CreateSwapChainForHwnd(m_pD3D11Device, (HWND)m_HandleWindow, &swapChainDesc, NULL, NULL, reinterpret_cast<IDXGISwapChain1**>(&m_pSwapChain)); if (FAILED(hres)) return MFX_ERR_DEVICE_FAILED; return MFX_ERR_NONE; }
mfxStatus IntelDecoder::FlushDecoderAndRender() { mfxStatus sts = MFX_ERR_NONE; mfxGetTime(&tStart); // // Stage 2: Retrieve the buffered decoded frames // while (MFX_ERR_NONE <= sts || MFX_ERR_MORE_SURFACE == sts) { if (MFX_WRN_DEVICE_BUSY == sts) MSDK_SLEEP(1); // Wait if device is busy, then repeat the same call to DecodeFrameAsync nIndex = GetFreeSurfaceIndex(pmfxSurfaces, numSurfaces); // Find free frame surface MSDK_CHECK_ERROR(MFX_ERR_NOT_FOUND, nIndex, MFX_ERR_MEMORY_ALLOC); // Decode a frame asychronously (returns immediately) sts = mfxDEC->DecodeFrameAsync(NULL, pmfxSurfaces[nIndex], &pmfxOutSurface, &syncp); // Ignore warnings if output is available, // if no output and no action required just repeat the DecodeFrameAsync call if (MFX_ERR_NONE < sts && syncp) sts = MFX_ERR_NONE; if (MFX_ERR_NONE == sts) sts = pSession->SyncOperation(syncp, 60000); // Synchronize. Waits until decoded frame is ready if (MFX_ERR_NONE == sts) { ++nFrame; if (impl_type == MFX_IMPL_SOFTWARE) { outMan.Render(pmfxOutSurface); } else { // Surface locking required when read/write D3D surfaces sts = pMfxAllocator->Lock(pMfxAllocator->pthis, pmfxOutSurface->Data.MemId, &(pmfxOutSurface->Data)); MSDK_BREAK_ON_ERROR(sts); outMan.Render(pmfxOutSurface); sts = pMfxAllocator->Unlock(pMfxAllocator->pthis, pmfxOutSurface->Data.MemId, &(pmfxOutSurface->Data)); } printf("Frame number: %d\r", nFrame); fflush(stdout); } } // MFX_ERR_MORE_DATA indicates that all buffers has been fetched, exit in case of other errors MSDK_IGNORE_MFX_STS(sts, MFX_ERR_MORE_DATA); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); mfxGetTime(&tEnd); elapsed += TimeDiffMsec(tEnd, tStart) / 1000; double fps = ((double)nFrame / elapsed); printf("\nExecution time: %3.2f s (%3.2f fps)\n", elapsed, fps); return sts; }
mfxStatus QSV_Encoder_Internal::GetVideoParam() { memset(&m_parameter, 0, sizeof(m_parameter)); mfxExtCodingOptionSPSPPS opt; memset(&m_parameter, 0, sizeof(m_parameter)); opt.Header.BufferId = MFX_EXTBUFF_CODING_OPTION_SPSPPS; opt.Header.BufferSz = sizeof(mfxExtCodingOptionSPSPPS); static mfxExtBuffer* extendedBuffers[1]; extendedBuffers[0] = (mfxExtBuffer*)& opt; m_parameter.ExtParam = extendedBuffers; m_parameter.NumExtParam = 1; opt.SPSBuffer = m_SPSBuffer; opt.PPSBuffer = m_PPSBuffer; opt.SPSBufSize = 100; // m_nSPSBufferSize; opt.PPSBufSize = 100; // m_nPPSBufferSize; mfxStatus sts = m_pmfxENC->GetVideoParam(&m_parameter); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); m_nSPSBufferSize = opt.SPSBufSize; m_nPPSBufferSize = opt.PPSBufSize; return sts; }
mfxStatus Rotate::PluginClose() { if (!m_bInited) return MFX_ERR_NONE; memset(&m_Param, 0, sizeof(RotateParam)); MSDK_SAFE_DELETE_ARRAY(m_pChunks); MSDK_SAFE_DELETE_ARRAY(m_pTasks); mfxStatus sts = MFX_ERR_NONE; mfxExtOpaqueSurfaceAlloc* pluginOpaqueAlloc = NULL; if (m_bIsInOpaque || m_bIsOutOpaque) { pluginOpaqueAlloc = (mfxExtOpaqueSurfaceAlloc*) GetExtBuffer(m_VideoParam.ExtParam, m_VideoParam.NumExtParam, MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION); MSDK_CHECK_POINTER(pluginOpaqueAlloc, MFX_ERR_INVALID_VIDEO_PARAM); } // check existence of corresponding allocs if ((m_bIsInOpaque && ! pluginOpaqueAlloc->In.Surfaces) || (m_bIsOutOpaque && !pluginOpaqueAlloc->Out.Surfaces)) return MFX_ERR_INVALID_VIDEO_PARAM; MSDK_CHECK_POINTER(m_pmfxCore, MFX_ERR_NULL_PTR); if (m_bIsInOpaque) { sts = m_pmfxCore->UnmapOpaqueSurface(m_pmfxCore->pthis, pluginOpaqueAlloc->In.NumSurface, pluginOpaqueAlloc->In.Type, pluginOpaqueAlloc->In.Surfaces); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, MFX_ERR_MEMORY_ALLOC); } if (m_bIsOutOpaque) { sts = m_pmfxCore->UnmapOpaqueSurface(m_pmfxCore->pthis, pluginOpaqueAlloc->Out.NumSurface, pluginOpaqueAlloc->Out.Type, pluginOpaqueAlloc->Out.Surfaces); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, MFX_ERR_MEMORY_ALLOC); } MSDK_SAFE_DELETE(m_pmfxCore); m_bInited = false; return MFX_ERR_NONE; }
mfxStatus Rotate::SetAuxParams(void* auxParam, int auxParamSize) { RotateParam *pRotatePar = (RotateParam *)auxParam; MSDK_CHECK_POINTER(pRotatePar, MFX_ERR_NULL_PTR); // check validity of parameters mfxStatus sts = CheckParam(&m_VideoParam, pRotatePar); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); m_Param = *pRotatePar; return MFX_ERR_NONE; }
mfxStatus CD3D9Device::Init( mfxHDL hWindow, mfxU16 nViews, mfxU32 nAdapterNum) { mfxStatus sts = MFX_ERR_NONE; if (2 < nViews) return MFX_ERR_UNSUPPORTED; m_nViews = nViews; HRESULT hr = Direct3DCreate9Ex(D3D_SDK_VERSION, &m_pD3D9); if (!m_pD3D9 || FAILED(hr)) return MFX_ERR_DEVICE_FAILED; ZeroMemory(&m_D3DPP, sizeof(m_D3DPP)); sts = FillD3DPP(hWindow, nViews, m_D3DPP); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); hr = m_pD3D9->CreateDeviceEx( nAdapterNum, D3DDEVTYPE_HAL, (HWND)hWindow, D3DCREATE_SOFTWARE_VERTEXPROCESSING | D3DCREATE_MULTITHREADED | D3DCREATE_FPU_PRESERVE, &m_D3DPP, NULL, &m_pD3DD9); if (FAILED(hr)) return MFX_ERR_NULL_PTR; if(hWindow) { hr = m_pD3DD9->ResetEx(&m_D3DPP, NULL); if (FAILED(hr)) return MFX_ERR_UNDEFINED_BEHAVIOR; hr = m_pD3DD9->Clear(0, NULL, D3DCLEAR_TARGET, D3DCOLOR_XRGB(0, 0, 0), 1.0f, 0); if (FAILED(hr)) return MFX_ERR_UNDEFINED_BEHAVIOR; } UINT resetToken = 0; hr = DXVA2CreateDirect3DDeviceManager9(&resetToken, &m_pDeviceManager9); if (FAILED(hr)) return MFX_ERR_NULL_PTR; hr = m_pDeviceManager9->ResetDevice(m_pD3DD9, resetToken); if (FAILED(hr)) return MFX_ERR_UNDEFINED_BEHAVIOR; m_resetToken = resetToken; return sts; }
mfxStatus Initialize(mfxIMPL impl, mfxVersion ver, MFXVideoSession* pSession, mfxFrameAllocator* pmfxAllocator, bool bCreateSharedHandles) { bCreateSharedHandles; // (Hugh) Currently unused pmfxAllocator; // (Hugh) Currently unused mfxStatus sts = MFX_ERR_NONE; // If mfxFrameAllocator is provided it means we need to setup DirectX device and memory allocator if (pmfxAllocator) { // Initialize Intel Media SDK Session sts = pSession->Init(impl, &ver); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); // Create DirectX device context mfxHDL deviceHandle; sts = CreateHWDevice(*pSession, &deviceHandle, NULL, bCreateSharedHandles); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); // Provide device manager to Media SDK sts = pSession->SetHandle(DEVICE_MGR_TYPE, deviceHandle); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); pmfxAllocator->pthis = *pSession; // We use Media SDK session ID as the allocation identifier pmfxAllocator->Alloc = simple_alloc; pmfxAllocator->Free = simple_free; pmfxAllocator->Lock = simple_lock; pmfxAllocator->Unlock = simple_unlock; pmfxAllocator->GetHDL = simple_gethdl; // Since we are using video memory we must provide Media SDK with an external allocator sts = pSession->SetFrameAllocator(pmfxAllocator); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); } else { // Initialize Intel Media SDK Session sts = pSession->Init(impl, &ver); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); } return sts; }
mfxStatus QSV_Encoder_Internal::Open(qsv_param_t * pParams) { mfxStatus sts = MFX_ERR_NONE; if (m_bUseD3D11) // Use D3D11 surface sts = Initialize(m_impl, m_ver, &m_session, &m_mfxAllocator); else // Use system memory sts = Initialize(m_impl, m_ver, &m_session, NULL); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); m_pmfxENC = new MFXVideoENCODE(m_session); InitParams(pParams); sts = m_pmfxENC->Query(&m_mfxEncParams, &m_mfxEncParams); MSDK_IGNORE_MFX_STS(sts, MFX_WRN_INCOMPATIBLE_VIDEO_PARAM); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); sts = AllocateSurfaces(); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); sts = m_pmfxENC->Init(&m_mfxEncParams); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); sts = GetVideoParam(); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); sts = InitBitstream(); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); return sts; }
/* Methods required for integration with Media SDK */ mfxStatus Rotate::PluginInit(mfxCoreInterface *core) { MSDK_CHECK_POINTER(core, MFX_ERR_NULL_PTR); mfxStatus sts = MFX_ERR_NONE; MSDK_SAFE_DELETE(m_pmfxCore); m_pmfxCore = new mfxCoreInterface; MSDK_CHECK_POINTER(m_pmfxCore, MFX_ERR_MEMORY_ALLOC); *m_pmfxCore = *core; mfxCoreParam par = {0}; sts = m_pmfxCore->GetCoreParam(m_pmfxCore->pthis, &par); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); m_impl = par.Impl; mfxHDL hdl = 0; #if defined(_WIN32) || defined(_WIN64) if (MFX_IMPL_VIA_MASK(m_impl) == MFX_IMPL_VIA_D3D9) { sts = m_pmfxCore->GetHandle(m_pmfxCore->pthis, MFX_HANDLE_D3D9_DEVICE_MANAGER, &m_device); } else if (MFX_IMPL_VIA_MASK(m_impl) == MFX_IMPL_VIA_D3D11) { sts = m_pmfxCore->GetHandle(m_pmfxCore->pthis, MFX_HANDLE_D3D11_DEVICE, &m_device); } else { hdl = 0; } #else sts = m_pmfxCore->GetHandle(m_pmfxCore->pthis, MFX_HANDLE_VA_DISPLAY, &m_device); #endif // SW lib is used if GetHandle return MFX_ERR_NOT_FOUND MSDK_IGNORE_MFX_STS(sts, MFX_ERR_NOT_FOUND); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); // if external allocator not set use the one from core interface if (!m_pAlloc && m_pmfxCore->FrameAllocator.pthis) m_pAlloc = &m_pmfxCore->FrameAllocator; return MFX_ERR_NONE; }
mfxStatus CRendererPipeline::Init(mfxU16 nWidth, mfxU16 nHeight, MemType memType, HWND hParentWnd) { mfxStatus sts = MFX_ERR_NONE; mfxVersion min_version; mfxVersion version; // real API version with which library is initialized // we set version to 1.0 and later we will query actual version of the library which will got leaded min_version.Major = 1; min_version.Minor = 0; //sts = m_mfxSession.Init(MFX_IMPL_SOFTWARE, &min_version); //sts = m_mfxSession.Init(MFX_IMPL_HARDWARE, &min_version); sts = m_mfxSession.Init(MFX_IMPL_RUNTIME, &min_version); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); sts = MFXQueryVersion(m_mfxSession , &version); // get real API version of the loaded library MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); // set memory type m_nHeight = nHeight; m_nWidth = nWidth; m_memType = memType; m_hParentWnd=hParentWnd; m_nY = m_nWidth*m_nHeight; m_nUV = (m_nY / 4); // create and init frame allocator sts = CreateAllocator(); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); sts = AllocFrames(); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); return MFX_ERR_NONE; }
mfxStatus QSV_Encoder_Internal::Drain() { mfxStatus sts = MFX_ERR_NONE; while (m_pTaskPool[m_nFirstSyncTask].syncp) { sts = m_session.SyncOperation(m_pTaskPool[m_nFirstSyncTask].syncp, 60000); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); m_pTaskPool[m_nFirstSyncTask].syncp = NULL; m_nFirstSyncTask = (m_nFirstSyncTask + 1) % m_nTaskPool; } return sts; }
mfxStatus CRendererPipeline::Run(unsigned char*pData, int nLen) { mfxStatus sts = MFX_ERR_NONE; mfxFrameSurface1* pSurf = NULL; // dispatching pointer mfxU16 nEncSurfIdx = 0; // index of free surface for encoder input (vpp output) sts = MFX_ERR_NONE; nEncSurfIdx = GetFreeSurface(m_pEncSurfaces, m_EncResponse.NumFrameActual); MSDK_CHECK_ERROR(nEncSurfIdx, MSDK_INVALID_SURF_IDX, MFX_ERR_MEMORY_ALLOC); // point pSurf to encoder surface pSurf = &m_pEncSurfaces[nEncSurfIdx]; { // get YUV pointers sts = m_pMFXAllocator->Lock(m_pMFXAllocator->pthis, pSurf->Data.MemId, &(pSurf->Data)); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); } pSurf->Info.FrameId.ViewId = 0; sts = LoadNextFrame(pSurf, pData, nLen); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); // ... after we're done call Unlock { sts = m_pMFXAllocator->Unlock(m_pMFXAllocator->pthis, pSurf->Data.MemId, &(pSurf->Data)); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); } if (D3D11_MEMORY != m_memType) { RenderFrame(pSurf, m_pMFXAllocator); } return sts; }
mfxStatus QSV_Encoder_Internal::Open(qsv_param_t * pParams) { mfxStatus sts = MFX_ERR_NONE; InitParams(pParams); sts = m_pmfxENC->Query(&m_mfxEncParams, &m_mfxEncParams); MSDK_IGNORE_MFX_STS(sts, MFX_WRN_INCOMPATIBLE_VIDEO_PARAM); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); sts = AllocateSurfaces(); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); sts = m_pmfxENC->Init(&m_mfxEncParams); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); sts = GetVideoParam(); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); sts = InitBitstream(); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); return sts; }
mfxStatus CRendererPipeline::RenderFrame(mfxFrameSurface1 *pSurface, mfxFrameAllocator *pmfxAlloc) { if (m_hwdev==NULL) { return MFX_ERR_UNKNOWN; } RECT rect; GetClientRect(m_hParentWnd, &rect); if (IsRectEmpty(&rect)) return MFX_ERR_UNKNOWN; mfxStatus sts = m_hwdev->RenderFrame(pSurface, pmfxAlloc); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); return sts; }
mfxStatus Rotate::Execute(mfxThreadTask task, mfxU32 uid_p, mfxU32 uid_a) { MSDK_CHECK_ERROR(m_bInited, false, MFX_ERR_NOT_INITIALIZED); MSDK_CHECK_POINTER(m_pmfxCore, MFX_ERR_NOT_INITIALIZED); mfxStatus sts = MFX_ERR_NONE; RotateTask *current_task = (RotateTask *)task; if (uid_a < 1) { // there's data to process sts = current_task->pProcessor->Process(&m_pChunks[uid_a]); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); } return MFX_TASK_DONE; }
mfxStatus IntelDecoder::QueryAndAllocRequiredSurfacesForSW() { mfxStatus sts = MFX_ERR_NONE; // Query number of required surfaces for decoder mfxFrameAllocRequest DecRequest; memset(&DecRequest, 0, sizeof(DecRequest)); sts = mfxDEC->QueryIOSurf(&mfxVideoParams, &DecRequest); MSDK_IGNORE_MFX_STS(sts, MFX_WRN_PARTIAL_ACCELERATION); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); numSurfaces = DecRequest.NumFrameSuggested; //VPPRequest[0].Type |= WILL_WRITE; // This line is only required for Windows DirectX11 to ensure that surfaces can be written to by the application //DecRequest.Type |= WILL_READ; // This line is only required for Windows DirectX11 to ensure that surfaces can be retrieved by the application // Allocate surfaces for decoder // - Width and height of buffer must be aligned, a multiple of 32 // - Frame surface array keeps pointers all surface planes and general frame info mfxU16 width = (mfxU16)MSDK_ALIGN(DecRequest.Info.Width); mfxU16 height = (mfxU16)MSDK_ALIGN16(DecRequest.Info.Height); mfxU8 bitsPerPixel = 12; // NV12 format is a 12 bits per pixel format mfxU32 surfaceSize = width * height * bitsPerPixel / 8; mfxU8* surfaceBuffers = (mfxU8*) new mfxU8[surfaceSize * numSurfaces]; // Allocate surface headers (mfxFrameSurface1) for decoder pmfxSurfaces = new mfxFrameSurface1 *[numSurfaces]; MSDK_CHECK_POINTER(pmfxSurfaces, MFX_ERR_MEMORY_ALLOC); for (int i = 0; i < numSurfaces; i++) { pmfxSurfaces[i] = new mfxFrameSurface1; memset(pmfxSurfaces[i], 0, sizeof(mfxFrameSurface1)); memcpy(&(pmfxSurfaces[i]->Info), &(mfxVideoParams.mfx.FrameInfo), sizeof(mfxFrameInfo)); pmfxSurfaces[i]->Data.Y = &surfaceBuffers[surfaceSize * i]; pmfxSurfaces[i]->Data.U = pmfxSurfaces[i]->Data.Y + width * height; pmfxSurfaces[i]->Data.V = pmfxSurfaces[i]->Data.U + 1; pmfxSurfaces[i]->Data.Pitch = width; } return sts; }
mfxStatus IntelDecoder::InitializeX(HWND hWnd) { if (SetDecodeOptions() == MFX_ERR_NULL_PTR) { fprintf_s(stdout, "Source file couldn't be found."); return MFX_ERR_NULL_PTR; } // Open input H.264 elementary stream (ES) file MSDK_FOPEN(fSource, options.SourceName, "rb"); MSDK_CHECK_POINTER(fSource, MFX_ERR_NULL_PTR); mfxIMPL impl = options.impl; //Version 1.3 is selected for Video Conference Mode compatibility. mfxVersion ver = { { 3, 1 } }; pSession = new MFXVideoSession(); pMfxAllocator = (mfxFrameAllocator*)malloc(sizeof(mfxFrameAllocator)); memset(pMfxAllocator, 0, sizeof(mfxFrameAllocator)); mfxStatus sts = Initialize(impl, ver, pSession, pMfxAllocator); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); sts = pSession->QueryIMPL(&impl_type); if (impl_type == MFX_IMPL_SOFTWARE) { printf("Implementation type is : SOFTWARE\n"); } else { printf("Implementation type is : HARDWARE\n"); } //impl_type = 2; // Create Media SDK decoder mfxDEC = new MFXVideoDECODE(*pSession); SetDecParameters(); // Prepare Media SDK bit stream buffer memset(&mfxBS, 0, sizeof(mfxBS)); mfxBS.DataFlag = MFX_BITSTREAM_COMPLETE_FRAME; mfxBS.MaxLength = 1024 * 1024; mfxBS.Data = new mfxU8[mfxBS.MaxLength]; MSDK_CHECK_POINTER(mfxBS.Data, MFX_ERR_MEMORY_ALLOC); // Read a chunk of data from stream file into bit stream buffer // - Parse bit stream, searching for header and fill video parameters structure // - Abort if bit stream header is not found in the first bit stream buffer chunk sts = ReadBitStreamData(&mfxBS, fSource); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); sts = mfxDEC->DecodeHeader(&mfxBS, &mfxVideoParams); MSDK_IGNORE_MFX_STS(sts, MFX_WRN_PARTIAL_ACCELERATION); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); mfxVideoParams.AsyncDepth = 1; outMan.InitD3D(hWnd, mfxVideoParams.mfx.FrameInfo.CropW, mfxVideoParams.mfx.FrameInfo.CropH); // Query selected implementation and version if (impl_type == MFX_IMPL_SOFTWARE) { sts = QueryAndAllocRequiredSurfacesForSW(); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); } else { sts = QueryAndAllocRequiredSurfacesForHW(); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); } // Initialize the Media SDK decoder sts = mfxDEC->Init(&mfxVideoParams); MSDK_IGNORE_MFX_STS(sts, MFX_WRN_PARTIAL_ACCELERATION); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); }
mfxStatus CD3D11Device::Init( mfxHDL hWindow, mfxU16 nViews, mfxU32 nAdapterNum) { mfxStatus sts = MFX_ERR_NONE; HRESULT hres = S_OK; m_nViews = nViews; if (2 < nViews) return MFX_ERR_UNSUPPORTED; m_bDefaultStereoEnabled = FALSE; static D3D_FEATURE_LEVEL FeatureLevels[] = { D3D_FEATURE_LEVEL_11_1, D3D_FEATURE_LEVEL_11_0, D3D_FEATURE_LEVEL_10_1, D3D_FEATURE_LEVEL_10_0 }; D3D_FEATURE_LEVEL pFeatureLevelsOut; hres = CreateDXGIFactory(__uuidof(IDXGIFactory2), (void**)(m_pDXGIFactory.Assign()) ); if (FAILED(hres)) return MFX_ERR_DEVICE_FAILED; if (m_nViews == 2 && hWindow) { hres = m_pDXGIFactory->QueryInterface(__uuidof(IDXGIDisplayControl), (void **)m_pDisplayControl.Assign()); if (FAILED(hres)) return MFX_ERR_DEVICE_FAILED; m_bDefaultStereoEnabled = m_pDisplayControl->IsStereoEnabled(); if (!m_bDefaultStereoEnabled) m_pDisplayControl->SetStereoEnabled(TRUE); } hres = m_pDXGIFactory->EnumAdapters(nAdapterNum, m_pAdapter.Assign()); if (FAILED(hres)) return MFX_ERR_DEVICE_FAILED; hres = D3D11CreateDevice(m_pAdapter , D3D_DRIVER_TYPE_UNKNOWN, NULL, 0, FeatureLevels, MSDK_ARRAY_LEN(FeatureLevels), D3D11_SDK_VERSION, m_pD3D11Device.Assign(), &pFeatureLevelsOut, m_pD3D11Ctx.Assign()); if (FAILED(hres)) return MFX_ERR_DEVICE_FAILED; m_pDXGIDev = m_pD3D11Device; m_pDX11VideoDevice = m_pD3D11Device; m_pVideoContext = m_pD3D11Ctx; MSDK_CHECK_POINTER(!!m_pDXGIDev, MFX_ERR_NULL_PTR); MSDK_CHECK_POINTER(!!m_pDX11VideoDevice, MFX_ERR_NULL_PTR); MSDK_CHECK_POINTER(!!m_pVideoContext, MFX_ERR_NULL_PTR); // turn on multithreading for the Context ComPtr<ID3D10Multithread> p_mt; p_mt = m_pVideoContext; if (p_mt) p_mt->SetMultithreadProtected(true); else return MFX_ERR_DEVICE_FAILED; // create swap chain only for rendering use case (hWindow != 0) DXGI_SWAP_CHAIN_DESC scd; if (hWindow) { ZeroMemory(&scd, sizeof(scd)); sts = FillSCD(hWindow, scd); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); MSDK_CHECK_POINTER (!!m_pDXGIFactory, MFX_ERR_NULL_PTR); DXGI_SWAP_CHAIN_DESC1 swapChainDesc = {0}; swapChainDesc.Width = 0; // Use automatic sizing. swapChainDesc.Height = 0; swapChainDesc.Format = DXGI_FORMAT_B8G8R8A8_UNORM; // This is the most common swap chain format. swapChainDesc.Stereo = m_nViews == 2 ? TRUE : FALSE; swapChainDesc.SampleDesc.Count = 1; // Don't use multi-sampling. swapChainDesc.SampleDesc.Quality = 0; swapChainDesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT; swapChainDesc.BufferCount = 2; // Use double buffering to minimize latency. swapChainDesc.Scaling = DXGI_SCALING_STRETCH; swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL; swapChainDesc.Flags = DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH; hres = m_pDXGIFactory->CreateSwapChainForHwnd(m_pD3D11Device, (HWND)hWindow, &swapChainDesc, NULL, NULL, reinterpret_cast<IDXGISwapChain1**>(&m_pSwapChain) ); if (FAILED(hres)) return MFX_ERR_DEVICE_FAILED; } return sts; }
mfxStatus CD3D11Device::RenderFrame(mfxFrameSurface1 * pSrf, mfxFrameAllocator * pAlloc) { HRESULT hres = S_OK; mfxStatus sts; sts = CreateVideoProcessor(pSrf); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); hres = m_pSwapChain->GetBuffer(0, __uuidof( ID3D11Texture2D ), (void**)m_pDXGIBackBuffer.Assign()); if (FAILED(hres)) return MFX_ERR_DEVICE_FAILED; D3D11_VIDEO_PROCESSOR_OUTPUT_VIEW_DESC OutputViewDesc; if (2 == m_nViews) { m_pVideoContext->VideoProcessorSetStreamStereoFormat(m_pVideoProcessor, 0, TRUE,D3D11_VIDEO_PROCESSOR_STEREO_FORMAT_SEPARATE, TRUE, TRUE, D3D11_VIDEO_PROCESSOR_STEREO_FLIP_NONE, NULL); m_pVideoContext->VideoProcessorSetOutputStereoMode(m_pVideoProcessor,TRUE); OutputViewDesc.ViewDimension = D3D11_VPOV_DIMENSION_TEXTURE2DARRAY; OutputViewDesc.Texture2DArray.ArraySize = 2; OutputViewDesc.Texture2DArray.MipSlice = 0; OutputViewDesc.Texture2DArray.FirstArraySlice = 0; } else { OutputViewDesc.ViewDimension = D3D11_VPOV_DIMENSION_TEXTURE2D; OutputViewDesc.Texture2D.MipSlice = 0; } if (1 == m_nViews || 0 == pSrf->Info.FrameId.ViewId) { hres = m_pDX11VideoDevice->CreateVideoProcessorOutputView( m_pDXGIBackBuffer, m_VideoProcessorEnum, &OutputViewDesc, m_pOutputView.Assign() ); if (FAILED(hres)) return MFX_ERR_DEVICE_FAILED; } D3D11_VIDEO_PROCESSOR_INPUT_VIEW_DESC InputViewDesc; InputViewDesc.FourCC = 0; InputViewDesc.ViewDimension = D3D11_VPIV_DIMENSION_TEXTURE2D; InputViewDesc.Texture2D.MipSlice = 0; InputViewDesc.Texture2D.ArraySlice = 0; mfxHDLPair pair = {NULL}; sts = pAlloc->GetHDL(pAlloc->pthis, pSrf->Data.MemId, (mfxHDL*)&pair); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); ID3D11Texture2D *pRTTexture2D = reinterpret_cast<ID3D11Texture2D*>(pair.first); D3D11_TEXTURE2D_DESC RTTexture2DDesc; if(!m_pTempTexture && m_nViews == 2) { pRTTexture2D->GetDesc(&RTTexture2DDesc); hres = m_pD3D11Device->CreateTexture2D(&RTTexture2DDesc,NULL, m_pTempTexture.Assign()); if (FAILED(hres)) return MFX_ERR_DEVICE_FAILED; } // Creating input views for left and righ eyes if (1 == m_nViews) { hres = m_pDX11VideoDevice->CreateVideoProcessorInputView( pRTTexture2D, m_VideoProcessorEnum, &InputViewDesc, m_pInputViewLeft.Assign() ); } else if (2 == m_nViews && 0 == pSrf->Info.FrameId.ViewId) { m_pD3D11Ctx->CopyResource(m_pTempTexture,pRTTexture2D); hres = m_pDX11VideoDevice->CreateVideoProcessorInputView( m_pTempTexture, m_VideoProcessorEnum, &InputViewDesc, m_pInputViewLeft.Assign() ); } else { hres = m_pDX11VideoDevice->CreateVideoProcessorInputView( pRTTexture2D, m_VideoProcessorEnum, &InputViewDesc, m_pInputViewRight.Assign() ); } if (FAILED(hres)) return MFX_ERR_DEVICE_FAILED; // NV12 surface to RGB backbuffer RECT rect = {0}; rect.right = pSrf->Info.CropW; rect.bottom = pSrf->Info.CropH; D3D11_VIDEO_PROCESSOR_STREAM StreamData; if (1 == m_nViews || pSrf->Info.FrameId.ViewId == 1) { StreamData.Enable = TRUE; StreamData.OutputIndex = 0; StreamData.InputFrameOrField = 0; StreamData.PastFrames = 0; StreamData.FutureFrames = 0; StreamData.ppPastSurfaces = NULL; StreamData.ppFutureSurfaces = NULL; StreamData.pInputSurface = m_pInputViewLeft; StreamData.ppPastSurfacesRight = NULL; StreamData.ppFutureSurfacesRight = NULL; StreamData.pInputSurfaceRight = m_nViews == 2 ? m_pInputViewRight : NULL; m_pVideoContext->VideoProcessorSetStreamSourceRect(m_pVideoProcessor, 0, true, &rect); m_pVideoContext->VideoProcessorSetStreamFrameFormat( m_pVideoProcessor, 0, D3D11_VIDEO_FRAME_FORMAT_PROGRESSIVE); hres = m_pVideoContext->VideoProcessorBlt( m_pVideoProcessor, m_pOutputView, 0, 1, &StreamData ); if (FAILED(hres)) return MFX_ERR_DEVICE_FAILED; } if (1 == m_nViews || 1 == pSrf->Info.FrameId.ViewId) { DXGI_PRESENT_PARAMETERS parameters = {0}; hres = m_pSwapChain->Present1(0, 0, ¶meters); if (FAILED(hres)) return MFX_ERR_DEVICE_FAILED; } return MFX_ERR_NONE; }
mfxStatus IntelDecoder::RunDecodeAndRender() { mfxStatus sts = MFX_ERR_NONE; // =============================================================== // Start decoding the frames from the stream // mfxGetTime(&tStart); pmfxOutSurface = NULL; pmfxOutSurface_sw = NULL; nIndex = 0; nIndex2 = 0; nFrame = 0; // // Stage 1: Main decoding loop // while (MFX_ERR_NONE <= sts || MFX_ERR_MORE_DATA == sts || MFX_ERR_MORE_SURFACE == sts) { if (MFX_WRN_DEVICE_BUSY == sts) MSDK_SLEEP(1); // Wait if device is busy, then repeat the same call to DecodeFrameAsync if (MFX_ERR_MORE_DATA == sts) { sts = ReadBitStreamData(&mfxBS, fSource); // Read more data into input bit stream MSDK_BREAK_ON_ERROR(sts); } if (MFX_ERR_MORE_SURFACE == sts || MFX_ERR_NONE == sts) { nIndex = GetFreeSurfaceIndex(pmfxSurfaces, numSurfaces); // Find free frame surface MSDK_CHECK_ERROR(MFX_ERR_NOT_FOUND, nIndex, MFX_ERR_MEMORY_ALLOC); } // Decode a frame asychronously (returns immediately) // - If input bitstream contains multiple frames DecodeFrameAsync will start decoding multiple frames, and remove them from bitstream sts = mfxDEC->DecodeFrameAsync(&mfxBS, pmfxSurfaces[nIndex], &pmfxOutSurface, &syncp); // Ignore warnings if output is available, // if no output and no action required just repeat the DecodeFrameAsync call if (MFX_ERR_NONE < sts && syncp) sts = MFX_ERR_NONE; if (MFX_ERR_NONE == sts) sts = pSession->SyncOperation(syncp, 60000); // Synchronize. Wait until decoded frame is ready if (MFX_ERR_NONE == sts) { ++nFrame; if (impl_type == MFX_IMPL_SOFTWARE) { outMan.Render(pmfxOutSurface); } else { // Surface locking required when read/write video surfaces sts = pMfxAllocator->Lock(pMfxAllocator->pthis, pmfxOutSurface->Data.MemId, &(pmfxOutSurface->Data)); MSDK_BREAK_ON_ERROR(sts); outMan.Render(pmfxOutSurface); sts = pMfxAllocator->Unlock(pMfxAllocator->pthis, pmfxOutSurface->Data.MemId, &(pmfxOutSurface->Data)); MSDK_BREAK_ON_ERROR(sts); } printf("Frame number: %d\r", nFrame); fflush(stdout); } } // MFX_ERR_MORE_DATA means that file has ended, need to go to buffering loop, exit in case of other errors MSDK_IGNORE_MFX_STS(sts, MFX_ERR_MORE_DATA); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); mfxGetTime(&tEnd); elapsed = TimeDiffMsec(tEnd, tStart) / 1000; double fps = ((double)nFrame / elapsed); printf("\nExecution time: %3.2f s (%3.2f fps)\n", elapsed, fps); return sts; }