Exemplo n.º 1
0
mfxStatus ParseInputString(msdk_char* strInput[], mfxU8 nArgNum, sInputParams* pParams)
{
    if (1 == nArgNum)
    {
        PrintHelp(strInput[0], NULL);
        return MFX_ERR_UNSUPPORTED;
    }

    sResetParams resPar;

    MSDK_CHECK_POINTER(pParams, MFX_ERR_NULL_PTR);

    for (mfxU8 i = 1; i < nArgNum; i++)
    {
        // multi-character options
        if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-plugin_version")))
        {
            msdk_opt_read(strInput[++i], pParams->CameraPluginVersion);
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-accel")))
        {
            if(i + 1 >= nArgNum)
            {
                PrintHelp(strInput[0], MSDK_STRING("Not enough parameters for -accel key"));
                return MFX_ERR_UNSUPPORTED;
        }

            if (0 == msdk_strcmp(strInput[i+1], MSDK_STRING("d3d9")))
        {
                pParams->accelType = D3D9;
        }
            else if (0 == msdk_strcmp(strInput[i+1], MSDK_STRING("d3d11")))
        {
                pParams->accelType = D3D11;
            }
            else
            {
                PrintHelp(strInput[0], MSDK_STRING("Unsupported value for -accel key"));
                return MFX_ERR_UNSUPPORTED;
            }
            i++;
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-imem")))
        {
            if(i + 1 >= nArgNum)
            {
                PrintHelp(strInput[0], MSDK_STRING("Not enough parameters for -imem key"));
                return MFX_ERR_UNSUPPORTED;
            }
            if (0 == msdk_strcmp(strInput[i+1], MSDK_STRING("system")))
            {
                pParams->memTypeIn = SYSTEM;
        }
            else if (0 == msdk_strcmp(strInput[i+1], MSDK_STRING("video")))
        {
                pParams->memTypeIn = VIDEO;
            }
            else
            {
                PrintHelp(strInput[0], MSDK_STRING("Unsupported value for -imem key"));
                return MFX_ERR_UNSUPPORTED;
            }
            i++;
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-omem")))
        {
            if(i + 1 >= nArgNum)
            {
                PrintHelp(strInput[0], MSDK_STRING("Not enough parameters for -omem key"));
                return MFX_ERR_UNSUPPORTED;
            }
            if (0 == msdk_strcmp(strInput[i+1], MSDK_STRING("system")))
            {
                pParams->memTypeOut = SYSTEM;
        }
            else if (0 == msdk_strcmp(strInput[i+1], MSDK_STRING("video")))
        {
                pParams->memTypeOut = VIDEO;
            }
            else
            {
                PrintHelp(strInput[0], MSDK_STRING("Unsupported value for -omem key"));
                return MFX_ERR_UNSUPPORTED;
            }
            i++;
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-r")) || 0 == msdk_strcmp(strInput[i], MSDK_STRING("-render")))
        {
            pParams->bRendering = true;
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-a")) || 0 == msdk_strcmp(strInput[i], MSDK_STRING("-asyncDepth")))
        {
            msdk_opt_read(strInput[++i], pParams->asyncDepth);
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-n")) || 0 == msdk_strcmp(strInput[i], MSDK_STRING("-numFramesToProcess")))
        {
            msdk_opt_read(strInput[++i], pParams->nFramesToProceed);
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-ng")) || 0 == msdk_strcmp(strInput[i], MSDK_STRING("-noGamma")))
        {
            pParams->bGamma = false;
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-3DLUT_gamma")))
        {
            pParams->b3DLUTGamma = false;
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-bdn")) || 0 == msdk_strcmp(strInput[i], MSDK_STRING("-bayerDenoise")))
        {
            if(i + 1 >= nArgNum)
            {
                PrintHelp(strInput[0], MSDK_STRING("Not enough parameters for -bdn key"));
                return MFX_ERR_UNSUPPORTED;
            }
            pParams->bBayerDenoise = true;
            msdk_opt_read(strInput[++i], pParams->denoiseThreshold);
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-bbl")) || 0 == msdk_strcmp(strInput[i], MSDK_STRING("-bayerBlackLevel")))
        {
            if(i + 4 >= nArgNum)
            {
                PrintHelp(strInput[0], MSDK_STRING("Not enough parameters for -bbl key"));
                return MFX_ERR_UNSUPPORTED;
            }
            pParams->bBlackLevel = true;
            msdk_opt_read(strInput[++i], pParams->black_level_B);
            msdk_opt_read(strInput[++i], pParams->black_level_G0);
            msdk_opt_read(strInput[++i], pParams->black_level_G1);
            msdk_opt_read(strInput[++i], pParams->black_level_R);
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-hot_pixel")))
        {
            if(i + 2 >= nArgNum)
            {
                PrintHelp(strInput[0], MSDK_STRING("Not enough parameters for -hot_pixel key"));
                return MFX_ERR_UNSUPPORTED;
            }
            pParams->bHP = true;
            msdk_opt_read(strInput[++i], pParams->hp_diff);
            msdk_opt_read(strInput[++i], pParams->hp_num);
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-bwb")) || 0 == msdk_strcmp(strInput[i], MSDK_STRING("-bayerWhiteBalance")))
        {
            if(i + 4 >= nArgNum)
            {
                PrintHelp(strInput[0], MSDK_STRING("Not enough parameters for -bwb key"));
                return MFX_ERR_UNSUPPORTED;
            }
            pParams->bWhiteBalance = true;
            msdk_opt_read(strInput[++i], pParams->white_balance_B);
            msdk_opt_read(strInput[++i], pParams->white_balance_G0);
            msdk_opt_read(strInput[++i], pParams->white_balance_G1);
            msdk_opt_read(strInput[++i], pParams->white_balance_R);
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-lens")) )
        {
            if(i + 4 >= nArgNum)
            {
                PrintHelp(strInput[0], MSDK_STRING("Not enough parameters for -lens key"));
                return MFX_ERR_UNSUPPORTED;
            }
            pParams->bLens = true;
            msdk_opt_read(strInput[++i], pParams->lens_aR);
            msdk_opt_read(strInput[++i], pParams->lens_bR);
            msdk_opt_read(strInput[++i], pParams->lens_cR);
            msdk_opt_read(strInput[++i], pParams->lens_dR);
            pParams->lens_aB = pParams->lens_aG = pParams->lens_aR;
            pParams->lens_bB = pParams->lens_bG = pParams->lens_bR;
            pParams->lens_cB = pParams->lens_cG = pParams->lens_cR;
            pParams->lens_dB = pParams->lens_dG = pParams->lens_dR;
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-chroma_aberration")) )
        {
            if(i + 12 >= nArgNum)
            {
                PrintHelp(strInput[0], MSDK_STRING("Not enough parameters for -chroma_aberration key"));
                return MFX_ERR_UNSUPPORTED;
            }
            pParams->bLens = true;
            msdk_opt_read(strInput[++i], pParams->lens_aR);
            msdk_opt_read(strInput[++i], pParams->lens_bR);
            msdk_opt_read(strInput[++i], pParams->lens_cR);
            msdk_opt_read(strInput[++i], pParams->lens_dR);
            msdk_opt_read(strInput[++i], pParams->lens_aB);
            msdk_opt_read(strInput[++i], pParams->lens_bB);
            msdk_opt_read(strInput[++i], pParams->lens_cB);
            msdk_opt_read(strInput[++i], pParams->lens_dB);
            msdk_opt_read(strInput[++i], pParams->lens_aG);
            msdk_opt_read(strInput[++i], pParams->lens_bG);
            msdk_opt_read(strInput[++i], pParams->lens_cG);
            msdk_opt_read(strInput[++i], pParams->lens_dG);
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-ccm")))
        {
            if(i + 9 >= nArgNum)
            {
                PrintHelp(strInput[0], MSDK_STRING("Not enough parameters for -ccm key."));
                return MFX_ERR_UNSUPPORTED;
            }
            pParams->bCCM = true;
            for(int k = 0; k < 3; k++)
                for (int z = 0; z < 3; z++)
                    msdk_opt_read(strInput[++i], pParams->CCM[k][z]);
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-gamma_points")))
        {
            if(i + 64 >= nArgNum)
            {
                PrintHelp(strInput[0], MSDK_STRING("There should be 64 points provided."));
                return MFX_ERR_UNSUPPORTED;
            }
            for(int k = 0; k < 64; k++)
                msdk_opt_read(strInput[++i],  pParams->gamma_point[k]);

            pParams->bExternalGammaLUT = true;
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-gamma_corrected")))
        {
            if(i + 64 >= nArgNum)
            {
                PrintHelp(strInput[0], MSDK_STRING("There should be 64 points provided."));
                return MFX_ERR_UNSUPPORTED;
            }
            for(int k = 0; k < 64; k++)
                msdk_opt_read(strInput[++i],  pParams->gamma_corrected[k]);

            pParams->bExternalGammaLUT = true;
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-pd")) || 0 == msdk_strcmp(strInput[i], MSDK_STRING("-padding")))
        {
            pParams->bDoPadding = true;
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-vignette")))
        {
            pParams->bVignette = true;
            msdk_strcopy(pParams->strVignetteMaskFile, strInput[++i]);
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-i")))
        {
            msdk_strcopy(pParams->strSrcFile, strInput[++i]);
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-o")))
        {
            msdk_strcopy(pParams->strDstFile, strInput[++i]);
            pParams->bOutput = true;
            if (i + 1 < nArgNum)  {
                int n;
                if (msdk_opt_read(strInput[++i], n) == MFX_ERR_NONE) {
                    pParams->maxNumBmpFiles = n;
                }
            }
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-f")) || 0 == msdk_strcmp(strInput[i], MSDK_STRING("-format")))
        {
            i++;
            if (0 == msdk_strcmp(strInput[i], MSDK_STRING("bggr")))
                pParams->bayerType     = MFX_CAM_BAYER_BGGR;
            else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("rggb")))
                pParams->bayerType     = MFX_CAM_BAYER_RGGB;
            else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("grbg")))
                pParams->bayerType     = MFX_CAM_BAYER_GRBG;
            else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("gbrg")))
                pParams->bayerType     = MFX_CAM_BAYER_GBRG;
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-b")) || 0 == msdk_strcmp(strInput[i], MSDK_STRING("-bitDepth")))
        {
            msdk_opt_read(strInput[++i], pParams->bitDepth);
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-of")) || 0 == msdk_strcmp(strInput[i], MSDK_STRING("-outFormat")))
        {
            i++;
            if (0 == msdk_strcmp(strInput[i], MSDK_STRING("argb16")) || 0 == msdk_strcmp(strInput[i], MSDK_STRING("16")))
                pParams->frameInfo[VPP_OUT].FourCC = MFX_FOURCC_ARGB16;
            else
                pParams->frameInfo[VPP_OUT].FourCC = MFX_FOURCC_RGB4;
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-w")) || 0 == msdk_strcmp(strInput[i], MSDK_STRING("-width")))
        {
            msdk_opt_read(strInput[++i], pParams->frameInfo[VPP_IN].nWidth);
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-3dlut")))
        {
            pParams->b3DLUT = true;
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-h")) || 0 == msdk_strcmp(strInput[i], MSDK_STRING("-height")))
        {
            msdk_opt_read(strInput[++i], pParams->frameInfo[VPP_IN].nHeight);
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-cropW")))
        {
            msdk_opt_read(strInput[++i], pParams->frameInfo[VPP_IN].CropW);
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-cropH")))
        {
            msdk_opt_read(strInput[++i], pParams->frameInfo[VPP_IN].CropH);
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-cropX")))
        {
            msdk_opt_read(strInput[++i], pParams->frameInfo[VPP_IN].CropX);
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-cropY")))
        {
            msdk_opt_read(strInput[++i], pParams->frameInfo[VPP_IN].CropY);
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-alpha")))
        {
            msdk_opt_read(strInput[++i], pParams->alphaValue);
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-reset")))
        {
            resPar.bayerType  = pParams->bayerType;
            msdk_strcopy(resPar.strSrcFile, pParams->strSrcFile);
            msdk_strcopy(resPar.strDstFile, pParams->strDstFile);
            resPar.width = pParams->frameInfo[VPP_IN].nWidth;
            resPar.height = pParams->frameInfo[VPP_IN].nHeight;
            resPar.bHP     = pParams->bHP;
            resPar.hp_diff = pParams->hp_diff;
            resPar.hp_num  = pParams->hp_num;

            resPar.bBlackLevel    = pParams->bBlackLevel;
            resPar.black_level_B  = pParams->black_level_B;
            resPar.black_level_G0 = pParams->black_level_G0;
            resPar.black_level_G1 = pParams->black_level_G1;
            resPar.black_level_R  = pParams->black_level_R;
            i++;
            for (;i < nArgNum; i++)
            {
                if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-f")))
                {
                    i++;
                    if (0 == msdk_strcmp(strInput[i], MSDK_STRING("bggr")))
                        resPar.bayerType     = MFX_CAM_BAYER_BGGR;
                    else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("rggb")))
                        resPar.bayerType     = MFX_CAM_BAYER_RGGB;
                    else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("grbg")))
                        resPar.bayerType     = MFX_CAM_BAYER_GRBG;
                    else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("gbrg")))
                        resPar.bayerType     = MFX_CAM_BAYER_GBRG;
                }
                else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-bbl")))
                {
                    if(i + 4 >= nArgNum)
                    {
                        PrintHelp(strInput[0], MSDK_STRING("Not enough parameters for -bbl key"));
                        return MFX_ERR_UNSUPPORTED;
                    }
                    resPar.bBlackLevel = true;
                    msdk_opt_read(strInput[++i], resPar.black_level_B);
                    msdk_opt_read(strInput[++i], resPar.black_level_G0);
                    msdk_opt_read(strInput[++i], resPar.black_level_G1);
                    msdk_opt_read(strInput[++i], resPar.black_level_R);
                }
                else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-hot_pixel")))
                {
                    if(i + 2 >= nArgNum)
                    {
                        PrintHelp(strInput[0], MSDK_STRING("Not enough parameters for -hot_pixel key"));
                        return MFX_ERR_UNSUPPORTED;
                    }
                    resPar.bHP = true;
                    msdk_opt_read(strInput[++i], resPar.hp_diff);
                    msdk_opt_read(strInput[++i], resPar.hp_num);
                }
                else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-bdn")))
                {
                    if(i + 1 >= nArgNum)
                    {
                        PrintHelp(strInput[0], MSDK_STRING("Not enough parameters for -bdn key"));
                        return MFX_ERR_UNSUPPORTED;
                    }
                    resPar.bDenoise = true;
                    msdk_opt_read(strInput[++i], resPar.denoiseThreshold);
                }
                else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-bwb")))
                {
                    if(i + 4 >= nArgNum)
                    {
                        PrintHelp(strInput[0], MSDK_STRING("Not enough parameters for -bwb key"));
                        return MFX_ERR_UNSUPPORTED;
                    }
                    resPar.bWhiteBalance = true;
                    msdk_opt_read(strInput[++i], resPar.white_balance_B);
                    msdk_opt_read(strInput[++i], resPar.white_balance_G0);
                    msdk_opt_read(strInput[++i], resPar.white_balance_G1);
                    msdk_opt_read(strInput[++i], resPar.white_balance_R);
                }
                else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-ccm")))
                {
                    if(i + 9 >= nArgNum)
                    {
                        PrintHelp(strInput[0], MSDK_STRING("Not enough parameters for -ccm key."));
                        return MFX_ERR_UNSUPPORTED;
                    }
                    resPar.bCCM = true;
                    for(int k = 0; k < 3; k++)
                        for (int z = 0; z < 3; z++)
                            msdk_opt_read(strInput[++i], resPar.CCM[k][z]);
                }
                else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-w")))
                {
                    msdk_opt_read(strInput[++i], resPar.width);
                }
                else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-h")))
                {
                    msdk_opt_read(strInput[++i], resPar.height);
                }
                else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-cropW")))
                {
                    msdk_opt_read(strInput[++i], resPar.cropW);
                }
                else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-cropH")))
                {
                    msdk_opt_read(strInput[++i], resPar.cropH);
                }
                else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-cropX")))
                {
                    msdk_opt_read(strInput[++i], resPar.cropX);
                }
                else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-cropY")))
                {
                    msdk_opt_read(strInput[++i], resPar.cropY);
                }
                else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-vignette")))
                {
                    resPar.bVignette = true;
                    msdk_strcopy(resPar.strVignetteMaskFile, strInput[++i]);
                }
                else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-i")))
                {
                    msdk_strcopy(resPar.strSrcFile, strInput[++i]);
                }
                else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-o")))
                {
                    msdk_strcopy(resPar.strDstFile, strInput[++i]);
                }
                else
                {
                    i--;
                    break;
                }
            }
            pParams->resetParams.push_back(resPar);
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-resetInterval")))
        {
            msdk_opt_read(strInput[++i], pParams->resetInterval);
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-?")))
        {
            PrintHelp(strInput[0], NULL);
            return MFX_ERR_UNSUPPORTED;
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-wall")))
        {
            if(i + 7 >= nArgNum)
            {
                PrintHelp(strInput[0], MSDK_STRING("Not enough parameters for -wall key"));
                return MFX_ERR_UNSUPPORTED;
            }
            pParams->bRendering = true;

            msdk_opt_read(strInput[++i], pParams->nWallW);
            msdk_opt_read(strInput[++i], pParams->nWallH);
            msdk_opt_read(strInput[++i], pParams->nWallCell);
            msdk_opt_read(strInput[++i], pParams->nWallMonitor);
            msdk_opt_read(strInput[++i], pParams->nWallFPS);

            int nTitle;
            msdk_opt_read(strInput[++i], nTitle);

            pParams->bWallNoTitle = 0 == nTitle;

           msdk_opt_read(strInput[++i], pParams->nWallTimeout);
        }
        else // 1-character options
        {
            std::basic_stringstream<msdk_char> stream;
            stream << MSDK_STRING("Unknown option: ") << strInput[i];
            PrintHelp(strInput[0], stream.str().c_str());
            return MFX_ERR_UNSUPPORTED;
        }
    }

    if (0 == msdk_strlen(pParams->strSrcFile))
    {
        PrintHelp(strInput[0], MSDK_STRING("Source file name not found"));
        return MFX_ERR_UNSUPPORTED;
    }

    if (0 == msdk_strlen(pParams->strDstFile))
    {
        pParams->bOutput = false;
    }

    return MFX_ERR_NONE;
}
mfxStatus Rotate::Submit(const mfxHDL *in, mfxU32 in_num, const mfxHDL *out, mfxU32 out_num, mfxThreadTask *task)
{
    MSDK_CHECK_POINTER(in, MFX_ERR_NULL_PTR);
    MSDK_CHECK_POINTER(out, MFX_ERR_NULL_PTR);
    MSDK_CHECK_POINTER(*in, MFX_ERR_NULL_PTR);
    MSDK_CHECK_POINTER(*out, MFX_ERR_NULL_PTR);
    MSDK_CHECK_POINTER(task, MFX_ERR_NULL_PTR);
    MSDK_CHECK_NOT_EQUAL(in_num, 1, MFX_ERR_UNSUPPORTED);
    MSDK_CHECK_NOT_EQUAL(out_num, 1, MFX_ERR_UNSUPPORTED);
    MSDK_CHECK_POINTER(m_pmfxCore, MFX_ERR_NOT_INITIALIZED);
    MSDK_CHECK_ERROR(m_bInited, false, MFX_ERR_NOT_INITIALIZED);

    mfxFrameSurface1 *surface_in = (mfxFrameSurface1 *)in[0];
    mfxFrameSurface1 *surface_out = (mfxFrameSurface1 *)out[0];
    mfxFrameSurface1 *real_surface_in = surface_in;
    mfxFrameSurface1 *real_surface_out = surface_out;

    mfxStatus sts = MFX_ERR_NONE;

    if (m_bIsInOpaque)
    {
        sts = m_pmfxCore->GetRealSurface(m_pmfxCore->pthis, surface_in, &real_surface_in);
        MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, MFX_ERR_MEMORY_ALLOC);
    }

    if (m_bIsOutOpaque)
    {
        sts = m_pmfxCore->GetRealSurface(m_pmfxCore->pthis, surface_out, &real_surface_out);
        MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, MFX_ERR_MEMORY_ALLOC);
    }

    // check validity of parameters
    sts = CheckInOutFrameInfo(&real_surface_in->Info, &real_surface_out->Info);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

    mfxU32 ind = FindFreeTaskIdx();

    if (ind >= m_MaxNumTasks)
    {
        return MFX_WRN_DEVICE_BUSY; // currently there are no free tasks available
    }

    m_pmfxCore->IncreaseReference(m_pmfxCore->pthis, &(real_surface_in->Data));
    m_pmfxCore->IncreaseReference(m_pmfxCore->pthis, &(real_surface_out->Data));

    m_pTasks[ind].In = real_surface_in;
    m_pTasks[ind].Out = real_surface_out;
    m_pTasks[ind].bBusy = true;

    switch (m_Param.Angle)
    {
    case 180:
        if (m_bOpenCLSurfaceSharing)
        {
            m_pTasks[ind].pProcessor = new OpenCLFilterRotator180(m_OpenCLFilter.get());
        }
        else
        {
            m_pTasks[ind].pProcessor = new OpenCLRotator180(m_pOpenCLRotator180Context.get());
        }
        MSDK_CHECK_POINTER(m_pTasks[ind].pProcessor, MFX_ERR_MEMORY_ALLOC);
        break;
    default:
        return MFX_ERR_UNSUPPORTED;
    }

    m_pTasks[ind].pProcessor->SetAllocator(m_pAlloc);
    m_pTasks[ind].pProcessor->Init(real_surface_in, real_surface_out);

    *task = (mfxThreadTask)&m_pTasks[ind];

    return MFX_ERR_NONE;
}
DWORD WINAPI TranscodeThread(LPVOID arg)
{
    ThreadData *pData = (ThreadData *)arg;
    int id = pData->id;

    mfxStatus sts = MFX_ERR_NONE;

    // =====================================================================
    // Intel Media SDK transcode opaque pipeline setup
    // - Transcode H.264 to H.264, resizing the encoded stream to half the resolution using VPP
    // - Multiple streams are transcoded concurrently
    // - Same input stream is used for all concurrent threadcoding threads
    //

    // Open input H.264 elementary stream (ES) file
    FILE* fSource;
    char inFile[100] = "bbb640x480.264";
    fopen_s(&fSource, inFile, "rb");
    MSDK_CHECK_POINTER(fSource, MFX_ERR_NULL_PTR);

    // Create output elementary stream (ES) H.264 file
    FILE* fSink;
    char outFile[100] = "bbb320x240_xx.264";
    outFile[11] = '0' + (char)(id/10);
    outFile[12] = '0' + (char)(id%10);
    fopen_s(&fSink, outFile, "wb");
    MSDK_CHECK_POINTER(fSink, MFX_ERR_NULL_PTR);

    MFXVideoSession* pmfxSession = NULL;

    // Initialize Media SDK session
    // - MFX_IMPL_AUTO_ANY selects HW accelaration if available (on any adapter)
    // - Version 1.3 is selected since the opaque memory feature was added in this API release
    //   If more recent API features are needed, change the version accordingly
    mfxIMPL impl = MFX_IMPL_AUTO_ANY;
    mfxVersion ver = {3, 1}; // Note: API 1.3 !
    pmfxSession = new MFXVideoSession;
    MSDK_CHECK_POINTER(pmfxSession, MFX_ERR_NULL_PTR);
    sts = pmfxSession->Init(impl, &ver);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

    // Create Media SDK decoder & encoder & VPP
    MFXVideoDECODE* pmfxDEC = new MFXVideoDECODE(*pmfxSession);
    MSDK_CHECK_POINTER(pmfxDEC, MFX_ERR_NULL_PTR);
    MFXVideoENCODE* pmfxENC = new MFXVideoENCODE(*pmfxSession); 
    MSDK_CHECK_POINTER(pmfxENC, MFX_ERR_NULL_PTR);
    MFXVideoVPP* pmfxVPP = new MFXVideoVPP(*pmfxSession); 
    MSDK_CHECK_POINTER(pmfxVPP, MFX_ERR_NULL_PTR);

    // Set required video parameters for decode
    mfxVideoParam mfxDecParams;
    memset(&mfxDecParams, 0, sizeof(mfxDecParams));
    mfxDecParams.mfx.CodecId = MFX_CODEC_AVC;
    mfxDecParams.IOPattern = MFX_IOPATTERN_OUT_OPAQUE_MEMORY;

    // Configure Media SDK to keep more operations in flight
    // - AsyncDepth represents the number of tasks that can be submitted, before synchronizing is required
    // - The choice of AsyncDepth = 3 is quite arbitrary but has proven to result in good performance
    mfxDecParams.AsyncDepth = 3;

    // Prepare Media SDK bit stream buffer for decoder
    // - Arbitrary buffer size for this example
    mfxBitstream mfxBS; 
    memset(&mfxBS, 0, sizeof(mfxBS));
    mfxBS.MaxLength = 1024 * 1024;
    mfxBS.Data = new mfxU8[mfxBS.MaxLength];
    MSDK_CHECK_POINTER(mfxBS.Data, MFX_ERR_MEMORY_ALLOC);

    // Read a chunk of data from stream file into bit stream buffer
    // - Parse bit stream, searching for header and fill video parameters structure
    // - Abort if bit stream header is not found in the first bit stream buffer chunk
    sts = ReadBitStreamData(&mfxBS, fSource);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
    
    sts = pmfxDEC->DecodeHeader(&mfxBS, &mfxDecParams);
    MSDK_IGNORE_MFX_STS(sts, MFX_WRN_PARTIAL_ACCELERATION);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

    
    // Initialize VPP parameters
    mfxVideoParam VPPParams;
    memset(&VPPParams, 0, sizeof(VPPParams));
    // Input data
    VPPParams.vpp.In.FourCC         = MFX_FOURCC_NV12;
    VPPParams.vpp.In.ChromaFormat   = MFX_CHROMAFORMAT_YUV420;  
    VPPParams.vpp.In.CropX          = 0;
    VPPParams.vpp.In.CropY          = 0; 
    VPPParams.vpp.In.CropW          = mfxDecParams.mfx.FrameInfo.CropW;
    VPPParams.vpp.In.CropH          = mfxDecParams.mfx.FrameInfo.CropH;
    VPPParams.vpp.In.PicStruct      = MFX_PICSTRUCT_PROGRESSIVE;
    VPPParams.vpp.In.FrameRateExtN  = 30;
    VPPParams.vpp.In.FrameRateExtD  = 1;
    // width must be a multiple of 16 
    // height must be a multiple of 16 in case of frame picture and a multiple of 32 in case of field picture  
    VPPParams.vpp.In.Width  = MSDK_ALIGN16(VPPParams.vpp.In.CropW);
    VPPParams.vpp.In.Height = (MFX_PICSTRUCT_PROGRESSIVE == VPPParams.vpp.In.PicStruct)?
                                 MSDK_ALIGN16(VPPParams.vpp.In.CropH) : MSDK_ALIGN32(VPPParams.vpp.In.CropH);
    // Output data
    VPPParams.vpp.Out.FourCC        = MFX_FOURCC_NV12;     
    VPPParams.vpp.Out.ChromaFormat  = MFX_CHROMAFORMAT_YUV420;             
    VPPParams.vpp.Out.CropX         = 0;
    VPPParams.vpp.Out.CropY         = 0; 
    VPPParams.vpp.Out.CropW         = VPPParams.vpp.In.CropW/2;  // Half the resolution of decode stream
    VPPParams.vpp.Out.CropH         = VPPParams.vpp.In.CropH/2;
    VPPParams.vpp.Out.PicStruct     = MFX_PICSTRUCT_PROGRESSIVE;
    VPPParams.vpp.Out.FrameRateExtN = 30;
    VPPParams.vpp.Out.FrameRateExtD = 1;
    // width must be a multiple of 16 
    // height must be a multiple of 16 in case of frame picture and a multiple of 32 in case of field picture  
    VPPParams.vpp.Out.Width  = MSDK_ALIGN16(VPPParams.vpp.Out.CropW); 
    VPPParams.vpp.Out.Height = (MFX_PICSTRUCT_PROGRESSIVE == VPPParams.vpp.Out.PicStruct)?
                                    MSDK_ALIGN16(VPPParams.vpp.Out.CropH) : MSDK_ALIGN32(VPPParams.vpp.Out.CropH);

    VPPParams.IOPattern = MFX_IOPATTERN_IN_OPAQUE_MEMORY | MFX_IOPATTERN_OUT_OPAQUE_MEMORY;

    // Configure Media SDK to keep more operations in flight
    // - AsyncDepth represents the number of tasks that can be submitted, before synchronizing is required
    VPPParams.AsyncDepth = mfxDecParams.AsyncDepth;


    // Initialize encoder parameters
    mfxVideoParam mfxEncParams;
    memset(&mfxEncParams, 0, sizeof(mfxEncParams));
    mfxEncParams.mfx.CodecId                    = MFX_CODEC_AVC;
    mfxEncParams.mfx.TargetUsage                = MFX_TARGETUSAGE_BALANCED;
    mfxEncParams.mfx.TargetKbps                 = 500;
    mfxEncParams.mfx.RateControlMethod          = MFX_RATECONTROL_VBR; 
    mfxEncParams.mfx.FrameInfo.FrameRateExtN    = 30;
    mfxEncParams.mfx.FrameInfo.FrameRateExtD    = 1;
    mfxEncParams.mfx.FrameInfo.FourCC           = MFX_FOURCC_NV12;
    mfxEncParams.mfx.FrameInfo.ChromaFormat     = MFX_CHROMAFORMAT_YUV420;
    mfxEncParams.mfx.FrameInfo.PicStruct        = MFX_PICSTRUCT_PROGRESSIVE;
    mfxEncParams.mfx.FrameInfo.CropX            = 0; 
    mfxEncParams.mfx.FrameInfo.CropY            = 0;
    mfxEncParams.mfx.FrameInfo.CropW            = VPPParams.vpp.Out.CropW; // Half the resolution of decode stream
    mfxEncParams.mfx.FrameInfo.CropH            = VPPParams.vpp.Out.CropH;
    // width must be a multiple of 16 
    // height must be a multiple of 16 in case of frame picture and a multiple of 32 in case of field picture
    mfxEncParams.mfx.FrameInfo.Width = MSDK_ALIGN16(mfxEncParams.mfx.FrameInfo.CropW);
    mfxEncParams.mfx.FrameInfo.Height = (MFX_PICSTRUCT_PROGRESSIVE == mfxEncParams.mfx.FrameInfo.PicStruct)?
        MSDK_ALIGN16(mfxEncParams.mfx.FrameInfo.CropH) : MSDK_ALIGN32(mfxEncParams.mfx.FrameInfo.CropH);
    
    mfxEncParams.IOPattern = MFX_IOPATTERN_IN_OPAQUE_MEMORY;

    // Configure Media SDK to keep more operations in flight
    // - AsyncDepth represents the number of tasks that can be submitted, before synchronizing is required
    mfxEncParams.AsyncDepth = mfxDecParams.AsyncDepth;


    // Query number required surfaces for decoder
    mfxFrameAllocRequest DecRequest;
    memset(&DecRequest, 0, sizeof(DecRequest));
    sts = pmfxDEC->QueryIOSurf(&mfxDecParams, &DecRequest);
    MSDK_IGNORE_MFX_STS(sts, MFX_WRN_PARTIAL_ACCELERATION);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

    // Query number required surfaces for encoder
    mfxFrameAllocRequest EncRequest;
    memset(&EncRequest, 0, sizeof(EncRequest));
    sts = pmfxENC->QueryIOSurf(&mfxEncParams, &EncRequest);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);            

    // Query number of required surfaces for VPP
    mfxFrameAllocRequest VPPRequest[2];// [0] - in, [1] - out
    memset(&VPPRequest, 0, sizeof(mfxFrameAllocRequest)*2);
    sts = pmfxVPP->QueryIOSurf(&VPPParams, VPPRequest);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);     


    // Determine the required number of surfaces for decoder output (VPP input) and for VPP output (encoder input)
    mfxU16 nSurfNumDecVPP = DecRequest.NumFrameSuggested + VPPRequest[0].NumFrameSuggested + VPPParams.AsyncDepth;
    mfxU16 nSurfNumVPPEnc = EncRequest.NumFrameSuggested + VPPRequest[1].NumFrameSuggested + VPPParams.AsyncDepth;


    // Initialize shared surfaces for decoder, VPP and encode 
    // - Note that no buffer memory is allocated, for opaque memory this is handled by Media SDK internally
    // - Frame surface array keeps reference to all surfaces
    // - Opaque memory is configured with the mfxExtOpaqueSurfaceAlloc extended buffers 
    mfxFrameSurface1** pSurfaces = new mfxFrameSurface1*[nSurfNumDecVPP];
    MSDK_CHECK_POINTER(pSurfaces, MFX_ERR_MEMORY_ALLOC);
    for (int i = 0; i < nSurfNumDecVPP; i++)
    {       
        pSurfaces[i] = new mfxFrameSurface1;
        MSDK_CHECK_POINTER(pSurfaces[i], MFX_ERR_MEMORY_ALLOC);
        memset(pSurfaces[i], 0, sizeof(mfxFrameSurface1));
        memcpy(&(pSurfaces[i]->Info), &(DecRequest.Info), sizeof(mfxFrameInfo));
    }

    mfxFrameSurface1** pSurfaces2 = new mfxFrameSurface1*[nSurfNumVPPEnc];
    MSDK_CHECK_POINTER(pSurfaces2, MFX_ERR_MEMORY_ALLOC);
    for (int i = 0; i < nSurfNumVPPEnc; i++)
    {       
        pSurfaces2[i] = new mfxFrameSurface1;
        MSDK_CHECK_POINTER(pSurfaces2[i], MFX_ERR_MEMORY_ALLOC);
        memset(pSurfaces2[i], 0, sizeof(mfxFrameSurface1));
        memcpy(&(pSurfaces2[i]->Info), &(EncRequest.Info), sizeof(mfxFrameInfo));
    }

    
    mfxExtOpaqueSurfaceAlloc extOpaqueAllocDec;
    memset(&extOpaqueAllocDec, 0, sizeof(extOpaqueAllocDec));
    extOpaqueAllocDec.Header.BufferId = MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION;
    extOpaqueAllocDec.Header.BufferSz = sizeof(mfxExtOpaqueSurfaceAlloc);
    mfxExtBuffer* pExtParamsDec = (mfxExtBuffer*)&extOpaqueAllocDec;

    mfxExtOpaqueSurfaceAlloc extOpaqueAllocVPP;
    memset(&extOpaqueAllocVPP, 0, sizeof(extOpaqueAllocVPP));
    extOpaqueAllocVPP.Header.BufferId = MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION;
    extOpaqueAllocVPP.Header.BufferSz = sizeof(mfxExtOpaqueSurfaceAlloc);
    mfxExtBuffer* pExtParamsVPP = (mfxExtBuffer*)&extOpaqueAllocVPP;

    mfxExtOpaqueSurfaceAlloc extOpaqueAllocEnc;
    memset(&extOpaqueAllocEnc, 0, sizeof(extOpaqueAllocEnc));
    extOpaqueAllocEnc.Header.BufferId = MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION;
    extOpaqueAllocEnc.Header.BufferSz = sizeof(mfxExtOpaqueSurfaceAlloc);
    mfxExtBuffer* pExtParamsENC = (mfxExtBuffer*)&extOpaqueAllocEnc;

    extOpaqueAllocDec.Out.Surfaces = pSurfaces;
    extOpaqueAllocDec.Out.NumSurface = nSurfNumDecVPP;
    extOpaqueAllocDec.Out.Type = DecRequest.Type;
    
    memcpy(&extOpaqueAllocVPP.In, &extOpaqueAllocDec.Out, sizeof(extOpaqueAllocDec.Out));
    extOpaqueAllocVPP.Out.Surfaces = pSurfaces2;
    extOpaqueAllocVPP.Out.NumSurface = nSurfNumVPPEnc;
    extOpaqueAllocVPP.Out.Type = EncRequest.Type;

    memcpy(&extOpaqueAllocEnc.In, &extOpaqueAllocVPP.Out, sizeof(extOpaqueAllocVPP.Out));

    mfxDecParams.ExtParam = &pExtParamsDec;
    mfxDecParams.NumExtParam = 1;
    VPPParams.ExtParam = &pExtParamsVPP;
    VPPParams.NumExtParam = 1;
    mfxEncParams.ExtParam = &pExtParamsENC;
    mfxEncParams.NumExtParam = 1;

    // Initialize the Media SDK decoder
    sts = pmfxDEC->Init(&mfxDecParams);
    MSDK_IGNORE_MFX_STS(sts, MFX_WRN_PARTIAL_ACCELERATION);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

    // Initialize the Media SDK encoder
    sts = pmfxENC->Init(&mfxEncParams);
    MSDK_IGNORE_MFX_STS(sts, MFX_WRN_PARTIAL_ACCELERATION);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);    

    // Initialize Media SDK VPP
    sts = pmfxVPP->Init(&VPPParams);
    MSDK_IGNORE_MFX_STS(sts, MFX_WRN_PARTIAL_ACCELERATION);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);    

    // Retrieve video parameters selected by encoder.
    // - BufferSizeInKB parameter is required to set bit stream buffer size
    mfxVideoParam par;
    memset(&par, 0, sizeof(par));
    sts = pmfxENC->GetVideoParam(&par);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); 

    // Create task pool to improve asynchronous performance (greater GPU utilization)
    mfxU16 taskPoolSize = mfxEncParams.AsyncDepth;  // number of tasks that can be submitted, before synchronizing is required
    Task* pTasks = new Task[taskPoolSize];
    memset(pTasks, 0, sizeof(Task) * taskPoolSize);
    for(int i=0;i<taskPoolSize;i++)
    {
        // Prepare Media SDK bit stream buffer
        pTasks[i].mfxBS.MaxLength = par.mfx.BufferSizeInKB * 1000;
        pTasks[i].mfxBS.Data = new mfxU8[pTasks[i].mfxBS.MaxLength];
        MSDK_CHECK_POINTER(pTasks[i].mfxBS.Data, MFX_ERR_MEMORY_ALLOC);
    }


    // ===================================
    // Start transcoding the frames
    //

#ifdef ENABLE_BENCHMARK
    LARGE_INTEGER tStart, tEnd;
    QueryPerformanceFrequency(&tStart);
    double freq = (double)tStart.QuadPart;
    QueryPerformanceCounter(&tStart);
#endif

    mfxSyncPoint syncpD, syncpV;
    mfxFrameSurface1* pmfxOutSurface = NULL;
    mfxU32 nFrame		= 0;
    int nIndex			= 0; 
    int nIndex2			= 0; 
    int nFirstSyncTask	= 0;
    int nTaskIdx		= 0;

    //
    // Stage 1: Main transcoding loop
    //
    while (MFX_ERR_NONE <= sts || MFX_ERR_MORE_DATA == sts || MFX_ERR_MORE_SURFACE == sts)          
    {
        nTaskIdx = GetFreeTaskIndex(pTasks, taskPoolSize); // Find free task
        if(MFX_ERR_NOT_FOUND == nTaskIdx)
        {
            // No more free tasks, need to sync
            sts = pmfxSession->SyncOperation(pTasks[nFirstSyncTask].syncp, 60000);
            MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

            sts = WriteBitStreamFrame(&pTasks[nFirstSyncTask].mfxBS, fSink);
            MSDK_BREAK_ON_ERROR(sts);

            pTasks[nFirstSyncTask].syncp = NULL;
            pTasks[nFirstSyncTask].mfxBS.DataLength = 0;
            pTasks[nFirstSyncTask].mfxBS.DataOffset = 0;
            nFirstSyncTask = (nFirstSyncTask + 1) % taskPoolSize;

            ++nFrame;
#ifdef ENABLE_OUTPUT
            if((nFrame % 100) == 0)
                printf("(%d) Frame number: %d\n", id, nFrame);
#endif
        }
        else
        {
            if (MFX_WRN_DEVICE_BUSY == sts)
                Sleep(1); // just wait and then repeat the same call to DecodeFrameAsync

            if (MFX_ERR_MORE_DATA == sts)
            {
                sts = ReadBitStreamData(&mfxBS, fSource); // Read more data to input bit stream
                MSDK_BREAK_ON_ERROR(sts);            
            }

            if (MFX_ERR_MORE_SURFACE == sts || MFX_ERR_NONE == sts)
            {
                nIndex = GetFreeSurfaceIndex(pSurfaces, nSurfNumDecVPP); // Find free frame surface 
                if (MFX_ERR_NOT_FOUND == nIndex)
                    return MFX_ERR_MEMORY_ALLOC;
            }
        
            // Decode a frame asychronously (returns immediately)
            sts = pmfxDEC->DecodeFrameAsync(&mfxBS, pSurfaces[nIndex], &pmfxOutSurface, &syncpD);

            // Ignore warnings if output is available, 
            // if no output and no action required just repeat the DecodeFrameAsync call
            if (MFX_ERR_NONE < sts && syncpD) 
                sts = MFX_ERR_NONE;               
        
            if (MFX_ERR_NONE == sts)
            {         
                nIndex2 = GetFreeSurfaceIndex(pSurfaces2, nSurfNumVPPEnc); // Find free frame surface 
                if (MFX_ERR_NOT_FOUND == nIndex)
                    return MFX_ERR_MEMORY_ALLOC;

                for (;;)
                {
                    // Process a frame asychronously (returns immediately)
                    sts = pmfxVPP->RunFrameVPPAsync(pmfxOutSurface, pSurfaces2[nIndex2], NULL, &syncpV);

                    if (MFX_ERR_NONE < sts && !syncpV) // repeat the call if warning and no output
                    {
                        if (MFX_WRN_DEVICE_BUSY == sts)
                            Sleep(1); // wait if device is busy
                    }
                    else if (MFX_ERR_NONE < sts && syncpV)                 
                    {
                        sts = MFX_ERR_NONE; // ignore warnings if output is available                                    
                        break;
                    }
                    else 
                        break; // not a warning               
                } 

                // VPP needs more data, let decoder decode another frame as input   
                if (MFX_ERR_MORE_DATA == sts)
                {
                    continue;
                }
                else if (MFX_ERR_MORE_SURFACE == sts)
                {
                    // Not relevant for the illustrated workload! Therefore not handled.
                    // Relevant for cases when VPP produces more frames at output than consumes at input. E.g. framerate conversion 30 fps -> 60 fps
                    break;
                }
                else
                    MSDK_BREAK_ON_ERROR(sts); 

                for (;;)
                {    
                    // Encode a frame asychronously (returns immediately)
                    sts = pmfxENC->EncodeFrameAsync(NULL, pSurfaces2[nIndex2], &pTasks[nTaskIdx].mfxBS, &pTasks[nTaskIdx].syncp); 		
            
                    if (MFX_ERR_NONE < sts && !pTasks[nTaskIdx].syncp) // repeat the call if warning and no output
                    {
                        if (MFX_WRN_DEVICE_BUSY == sts)                
                            Sleep(1); // wait if device is busy                
                    }
                    else if (MFX_ERR_NONE < sts && pTasks[nTaskIdx].syncp)                 
                    {
                        sts = MFX_ERR_NONE; // ignore warnings if output is available                                    
                        break;
                    }
                    else if (MFX_ERR_NOT_ENOUGH_BUFFER == sts)
                    {
                        // Allocate more bitstream buffer memory here if needed...
                        break;                
                    }
                    else
                        break;
                }  
            }
        }     
    }   

    // MFX_ERR_MORE_DATA means that file has ended, need to go to buffering loop, exit in case of other errors
    MSDK_IGNORE_MFX_STS(sts, MFX_ERR_MORE_DATA);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);          
      
    //
    // Stage 2: Retrieve the buffered decoded frames
    //
    while (MFX_ERR_NONE <= sts || MFX_ERR_MORE_SURFACE == sts)        
    {        
        nTaskIdx = GetFreeTaskIndex(pTasks, taskPoolSize); // Find free task
        if(MFX_ERR_NOT_FOUND == nTaskIdx)
        {
            // No more free tasks, need to sync
            sts = pmfxSession->SyncOperation(pTasks[nFirstSyncTask].syncp, 60000);
            MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

            sts = WriteBitStreamFrame(&pTasks[nFirstSyncTask].mfxBS, fSink);
            MSDK_BREAK_ON_ERROR(sts);

            pTasks[nFirstSyncTask].syncp = NULL;
            pTasks[nFirstSyncTask].mfxBS.DataLength = 0;
            pTasks[nFirstSyncTask].mfxBS.DataOffset = 0;
            nFirstSyncTask = (nFirstSyncTask + 1) % taskPoolSize;

            ++nFrame;
#ifdef ENABLE_OUTPUT
            if((nFrame % 100) == 0)
                printf("(%d) Frame number: %d\n", id, nFrame);
#endif
        }
        else
        {
            if (MFX_WRN_DEVICE_BUSY == sts)
                Sleep(1);

            nIndex = GetFreeSurfaceIndex(pSurfaces, nSurfNumDecVPP); // Find free frame surface
            if (MFX_ERR_NOT_FOUND == nIndex)
                return MFX_ERR_MEMORY_ALLOC;            

            // Decode a frame asychronously (returns immediately)
            sts = pmfxDEC->DecodeFrameAsync(NULL, pSurfaces[nIndex], &pmfxOutSurface, &syncpD);

            // Ignore warnings if output is available, 
            // if no output and no action required just repeat the DecodeFrameAsync call       
            if (MFX_ERR_NONE < sts && syncpD) 
                sts = MFX_ERR_NONE;

            if (MFX_ERR_NONE == sts)
            {
                nIndex2 = GetFreeSurfaceIndex(pSurfaces2, nSurfNumVPPEnc); // Find free frame surface 
                if (MFX_ERR_NOT_FOUND == nIndex)
                    return MFX_ERR_MEMORY_ALLOC;

                for (;;)
                {
                    // Process a frame asychronously (returns immediately)
                    sts = pmfxVPP->RunFrameVPPAsync(pmfxOutSurface, pSurfaces2[nIndex2], NULL, &syncpV);

                    if (MFX_ERR_NONE < sts && !syncpV) // repeat the call if warning and no output
                    {
                        if (MFX_WRN_DEVICE_BUSY == sts)
                            Sleep(1); // wait if device is busy
                    }
                    else if (MFX_ERR_NONE < sts && syncpV)                 
                    {
                        sts = MFX_ERR_NONE; // ignore warnings if output is available                                    
                        break;
                    }
                    else 
                        break; // not a warning               
                } 

                // VPP needs more data, let decoder decode another frame as input   
                if (MFX_ERR_MORE_DATA == sts)
                {
                    continue;
                }
                else if (MFX_ERR_MORE_SURFACE == sts)
                {
                    // Not relevant for the illustrated workload! Therefore not handled.
                    // Relevant for cases when VPP produces more frames at output than consumes at input. E.g. framerate conversion 30 fps -> 60 fps
                    break;
                }
                else
                    MSDK_BREAK_ON_ERROR(sts); 

                for (;;)
                {    
                    // Encode a frame asychronously (returns immediately)
                    sts = pmfxENC->EncodeFrameAsync(NULL, pSurfaces2[nIndex2], &pTasks[nTaskIdx].mfxBS, &pTasks[nTaskIdx].syncp); 		
            
                    if (MFX_ERR_NONE < sts && !pTasks[nTaskIdx].syncp) // repeat the call if warning and no output
                    {
                        if (MFX_WRN_DEVICE_BUSY == sts)                
                            Sleep(1); // wait if device is busy                
                    }
                    else if (MFX_ERR_NONE < sts && pTasks[nTaskIdx].syncp)                 
                    {
                        sts = MFX_ERR_NONE; // ignore warnings if output is available                                    
                        break;
                    }
                    else if (MFX_ERR_NOT_ENOUGH_BUFFER == sts)
                    {
                        // Allocate more bitstream buffer memory here if needed...
                        break;                
                    }
                    else
                        break;
                }     
            }
        }
    }

    // MFX_ERR_MORE_DATA indicates that all decode buffers has been fetched, exit in case of other errors
    MSDK_IGNORE_MFX_STS(sts, MFX_ERR_MORE_DATA);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

    //
    // Stage 3: Retrieve buffered frames from VPP
    //
    while (MFX_ERR_NONE <= sts || MFX_ERR_MORE_DATA == sts || MFX_ERR_MORE_SURFACE == sts)
    {
        nTaskIdx = GetFreeTaskIndex(pTasks, taskPoolSize); // Find free task
        if(MFX_ERR_NOT_FOUND == nTaskIdx)
        {
            // No more free tasks, need to sync
            sts = pmfxSession->SyncOperation(pTasks[nFirstSyncTask].syncp, 60000);
            MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

            sts = WriteBitStreamFrame(&pTasks[nFirstSyncTask].mfxBS, fSink);
            MSDK_BREAK_ON_ERROR(sts);

            pTasks[nFirstSyncTask].syncp = NULL;
            pTasks[nFirstSyncTask].mfxBS.DataLength = 0;
            pTasks[nFirstSyncTask].mfxBS.DataOffset = 0;
            nFirstSyncTask = (nFirstSyncTask + 1) % taskPoolSize;

            ++nFrame;
#ifdef ENABLE_OUTPUT
            if((nFrame % 100) == 0)
                printf("(%d) Frame number: %d\n", id, nFrame);
#endif
        }
        else
        {
            nIndex2 = GetFreeSurfaceIndex(pSurfaces2, nSurfNumVPPEnc); // Find free frame surface 
            if (MFX_ERR_NOT_FOUND == nIndex)
                return MFX_ERR_MEMORY_ALLOC;

            for (;;)
            {
                // Process a frame asychronously (returns immediately)
                sts = pmfxVPP->RunFrameVPPAsync(NULL, pSurfaces2[nIndex2], NULL, &syncpV);

                if (MFX_ERR_NONE < sts && !syncpV) // repeat the call if warning and no output
                {
                    if (MFX_WRN_DEVICE_BUSY == sts)
                        Sleep(1); // wait if device is busy
                }
                else if (MFX_ERR_NONE < sts && syncpV)                 
                {
                    sts = MFX_ERR_NONE; // ignore warnings if output is available                                    
                    break;
                }
                else 
                    break; // not a warning               
            } 

            if (MFX_ERR_MORE_SURFACE == sts)
            {
                // Not relevant for the illustrated workload! Therefore not handled.
                // Relevant for cases when VPP produces more frames at output than consumes at input. E.g. framerate conversion 30 fps -> 60 fps
                break;
            }
            else
                MSDK_BREAK_ON_ERROR(sts); 

            for (;;)
            {    
                // Encode a frame asychronously (returns immediately)
                sts = pmfxENC->EncodeFrameAsync(NULL, pSurfaces2[nIndex2], &pTasks[nTaskIdx].mfxBS, &pTasks[nTaskIdx].syncp); 		
            
                if (MFX_ERR_NONE < sts && !pTasks[nTaskIdx].syncp) // repeat the call if warning and no output
                {
                    if (MFX_WRN_DEVICE_BUSY == sts)                
                        Sleep(1); // wait if device is busy                
                }
                else if (MFX_ERR_NONE < sts && pTasks[nTaskIdx].syncp)                 
                {
                    sts = MFX_ERR_NONE; // ignore warnings if output is available                                    
                    break;
                }
                else if (MFX_ERR_NOT_ENOUGH_BUFFER == sts)
                {
                    // Allocate more bitstream buffer memory here if needed...
                    break;                
                }
                else
                    break;
            }     
            
        }
    }

    // MFX_ERR_MORE_DATA indicates that all VPP buffers has been fetched, exit in case of other errors
    MSDK_IGNORE_MFX_STS(sts, MFX_ERR_MORE_DATA);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

    //
    // Stage 4: Retrieve the buffered encoded frames
    //
    while (MFX_ERR_NONE <= sts)
    {       
        nTaskIdx = GetFreeTaskIndex(pTasks, taskPoolSize); // Find free task
        if(MFX_ERR_NOT_FOUND == nTaskIdx)
        {
            // No more free tasks, need to sync
            sts = pmfxSession->SyncOperation(pTasks[nFirstSyncTask].syncp, 60000);
            MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

            sts = WriteBitStreamFrame(&pTasks[nFirstSyncTask].mfxBS, fSink);
            MSDK_BREAK_ON_ERROR(sts);

            pTasks[nFirstSyncTask].syncp = NULL;
            pTasks[nFirstSyncTask].mfxBS.DataLength = 0;
            pTasks[nFirstSyncTask].mfxBS.DataOffset = 0;
            nFirstSyncTask = (nFirstSyncTask + 1) % taskPoolSize;

            ++nFrame;
#ifdef ENABLE_OUTPUT
            if((nFrame % 100) == 0)
                printf("(%d) Frame number: %d\n", id, nFrame);
#endif
        }
        else
        {
            for (;;)
            {                
                // Encode a frame asychronously (returns immediately)
                sts = pmfxENC->EncodeFrameAsync(NULL, NULL, &pTasks[nTaskIdx].mfxBS, &pTasks[nTaskIdx].syncp); 	

                if (MFX_ERR_NONE < sts && !pTasks[nTaskIdx].syncp) // repeat the call if warning and no output
                {
                    if (MFX_WRN_DEVICE_BUSY == sts)                
                        Sleep(1); // wait if device is busy                
                }
                else if (MFX_ERR_NONE < sts && pTasks[nTaskIdx].syncp)                 
                {
                    sts = MFX_ERR_NONE; // ignore warnings if output is available                                    
                    break;
                }
                else
                    break;
            }   
        }
    }    

    // MFX_ERR_MORE_DATA indicates that there are no more buffered frames, exit in case of other errors
    MSDK_IGNORE_MFX_STS(sts, MFX_ERR_MORE_DATA);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

    //
    // Stage 5: Sync all remaining tasks in task pool
    //
    while(pTasks[nFirstSyncTask].syncp)
    {
        sts = pmfxSession->SyncOperation(pTasks[nFirstSyncTask].syncp, 60000);
        MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

        sts = WriteBitStreamFrame(&pTasks[nFirstSyncTask].mfxBS, fSink);
        MSDK_BREAK_ON_ERROR(sts);

        pTasks[nFirstSyncTask].syncp = NULL;
        pTasks[nFirstSyncTask].mfxBS.DataLength = 0;
        pTasks[nFirstSyncTask].mfxBS.DataOffset = 0;
        nFirstSyncTask = (nFirstSyncTask + 1) % taskPoolSize;

        ++nFrame;
#ifdef ENABLE_OUTPUT
        if((nFrame % 100) == 0)
            printf("(%d) Frame number: %d\n", id, nFrame);
#endif
    }

#ifdef ENABLE_BENCHMARK
    QueryPerformanceCounter(&tEnd);
    double duration = ((double)tEnd.QuadPart - (double)tStart.QuadPart)  / freq;
    printf("\n[%d] Execution time: %3.2fs (%3.2ffps)\n", pData->id, duration, nFrame/duration);
#endif

    // ===================================================================
    // Clean up resources
    //  - It is recommended to close Media SDK components first, before releasing allocated surfaces, since
    //    some surfaces may still be locked by internal Media SDK resources.
    
    pmfxENC->Close();
    pmfxDEC->Close();
    pmfxVPP->Close();

    delete pmfxENC;
    delete pmfxDEC;
    delete pmfxVPP;

    pmfxSession->Close();
    delete pmfxSession;

    for (int i = 0; i < nSurfNumDecVPP; i++)
        delete pSurfaces[i];
    for (int i = 0; i < nSurfNumVPPEnc; i++)
        delete pSurfaces2[i];
    MSDK_SAFE_DELETE_ARRAY(pSurfaces);
    MSDK_SAFE_DELETE_ARRAY(pSurfaces2);
    MSDK_SAFE_DELETE_ARRAY(mfxBS.Data);
    for(int i=0;i<taskPoolSize;i++)
        MSDK_SAFE_DELETE_ARRAY(pTasks[i].mfxBS.Data);
    MSDK_SAFE_DELETE_ARRAY(pTasks);

    fclose(fSource);
    fclose(fSink);

    return 0;
}
Exemplo n.º 4
0
mfxStatus D3DFrameAllocator::AllocImpl(mfxFrameAllocRequest *request, mfxFrameAllocResponse *response)
{
    HRESULT hr;

    MSDK_CHECK_POINTER(request, MFX_ERR_NULL_PTR);
    if (request->NumFrameSuggested == 0)
        return MFX_ERR_UNKNOWN;

    D3DFORMAT format = ConvertMfxFourccToD3dFormat(request->Info.FourCC);

    if (format == D3DFMT_UNKNOWN)
    {
        msdk_printf(MSDK_STRING("D3D Allocator: invalid fourcc is provided (%#X), exitting\n"),request->Info.FourCC);
        return MFX_ERR_UNSUPPORTED;
    }

    DWORD   target;

    if (MFX_MEMTYPE_DXVA2_DECODER_TARGET & request->Type)
    {
        target = DXVA2_VideoDecoderRenderTarget;
    }
    else if (MFX_MEMTYPE_DXVA2_PROCESSOR_TARGET & request->Type)
    {
        target = DXVA2_VideoProcessorRenderTarget;
    }
    else
        return MFX_ERR_UNSUPPORTED;

    IDirectXVideoAccelerationService* videoService = NULL;

    if (target == DXVA2_VideoProcessorRenderTarget) {
        if (!m_hProcessor) {
            hr = m_manager->OpenDeviceHandle(&m_hProcessor);
            if (FAILED(hr))
                return MFX_ERR_MEMORY_ALLOC;

            hr = m_manager->GetVideoService(m_hProcessor, IID_IDirectXVideoProcessorService, (void**)&m_processorService);
            if (FAILED(hr))
                return MFX_ERR_MEMORY_ALLOC;
        }
        videoService = m_processorService;
    }
    else {
        if (!m_hDecoder)
        {
            hr = m_manager->OpenDeviceHandle(&m_hDecoder);
            if (FAILED(hr))
                return MFX_ERR_MEMORY_ALLOC;

            hr = m_manager->GetVideoService(m_hDecoder, IID_IDirectXVideoDecoderService, (void**)&m_decoderService);
            if (FAILED(hr))
                return MFX_ERR_MEMORY_ALLOC;
        }
        videoService = m_decoderService;
    }

    mfxHDLPair *dxMids = NULL, **dxMidPtrs = NULL;
    dxMids = (mfxHDLPair*)calloc(request->NumFrameSuggested, sizeof(mfxHDLPair));
    dxMidPtrs = (mfxHDLPair**)calloc(request->NumFrameSuggested, sizeof(mfxHDLPair*));

    if (!dxMids || !dxMidPtrs) {
        MSDK_SAFE_FREE(dxMids);
        MSDK_SAFE_FREE(dxMidPtrs);
        return MFX_ERR_MEMORY_ALLOC;
    }

    response->mids = (mfxMemId*)dxMidPtrs;
    response->NumFrameActual = request->NumFrameSuggested;

    if (request->Type & MFX_MEMTYPE_EXTERNAL_FRAME) {
        for (int i = 0; i < request->NumFrameSuggested; i++) {
            hr = videoService->CreateSurface(request->Info.Width, request->Info.Height, 0,  format,
                                             D3DPOOL_DEFAULT, m_surfaceUsage, target, (IDirect3DSurface9**)&dxMids[i].first, &dxMids[i].second);
            if (FAILED(hr)) {
                ReleaseResponse(response);
                MSDK_SAFE_FREE(dxMids);
                return MFX_ERR_MEMORY_ALLOC;
            }
            dxMidPtrs[i] = &dxMids[i];
        }
    } else {
        safe_array<IDirect3DSurface9*> dxSrf(new IDirect3DSurface9*[request->NumFrameSuggested]);
        if (!dxSrf.get())
        {
            MSDK_SAFE_FREE(dxMids);
            return MFX_ERR_MEMORY_ALLOC;
        }
        hr = videoService->CreateSurface(request->Info.Width, request->Info.Height, request->NumFrameSuggested - 1,  format,
                                         D3DPOOL_DEFAULT, m_surfaceUsage, target, dxSrf.get(), NULL);
        if (FAILED(hr))
        {
            MSDK_SAFE_FREE(dxMids);
            return MFX_ERR_MEMORY_ALLOC;
        }


        for (int i = 0; i < request->NumFrameSuggested; i++) {
            dxMids[i].first = dxSrf.get()[i];
            dxMidPtrs[i] = &dxMids[i];
        }
    }
    return MFX_ERR_NONE;
}
Exemplo n.º 5
0
/* Custom methods */
mfxStatus Rotate::Init(mfxVideoParam *mfxParam)
{
    MSDK_CHECK_POINTER(mfxParam, MFX_ERR_NULL_PTR);
    MSDK_CHECK_POINTER(m_pmfxCore, MFX_ERR_NULL_PTR);

    mfxStatus sts = MFX_ERR_NONE;
    // whether we use d3d memory on
    int bd3d[2] = { 0,       // input
                    0};      // output

    m_VideoParam = *mfxParam;

    // map opaque surfaces array in case of opaque surfaces
    m_bIsInOpaque = (m_VideoParam.IOPattern & MFX_IOPATTERN_IN_OPAQUE_MEMORY) ? true : false;
    m_bIsOutOpaque = (m_VideoParam.IOPattern & MFX_IOPATTERN_OUT_OPAQUE_MEMORY) ? true : false;
    mfxExtOpaqueSurfaceAlloc* pluginOpaqueAlloc = NULL;

    if (m_bIsInOpaque || m_bIsOutOpaque)
    {
        pluginOpaqueAlloc = (mfxExtOpaqueSurfaceAlloc*)GetExtBuffer(m_VideoParam.ExtParam,
            m_VideoParam.NumExtParam, MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION);
        MSDK_CHECK_POINTER(pluginOpaqueAlloc, MFX_ERR_INVALID_VIDEO_PARAM);
    }

    // check existence of corresponding allocs
    if ((m_bIsInOpaque && ! pluginOpaqueAlloc->In.Surfaces) || (m_bIsOutOpaque && !pluginOpaqueAlloc->Out.Surfaces))
       return MFX_ERR_INVALID_VIDEO_PARAM;

    if (m_bIsInOpaque)
    {
        sts = m_pmfxCore->MapOpaqueSurface(m_pmfxCore->pthis, pluginOpaqueAlloc->In.NumSurface,
            pluginOpaqueAlloc->In.Type, pluginOpaqueAlloc->In.Surfaces);
        MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, MFX_ERR_MEMORY_ALLOC);

        bd3d[0] = pluginOpaqueAlloc->In.Type &
            (MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET | MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET);
    }
    else
    {
        bd3d[0] = m_VideoParam.IOPattern & MFX_IOPATTERN_IN_VIDEO_MEMORY;
    }

    if (m_bIsOutOpaque)
    {
        sts = m_pmfxCore->MapOpaqueSurface(m_pmfxCore->pthis, pluginOpaqueAlloc->Out.NumSurface,
            pluginOpaqueAlloc->Out.Type, pluginOpaqueAlloc->Out.Surfaces);
        MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, MFX_ERR_MEMORY_ALLOC);

        bd3d[1] = pluginOpaqueAlloc->Out.Type &
            (MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET | MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET);
    }
    else
    {
        bd3d[1] = m_VideoParam.IOPattern & MFX_IOPATTERN_OUT_VIDEO_MEMORY;
    }

    m_MaxNumTasks = 1;

    m_pTasks = new RotateTask [m_MaxNumTasks];
    MSDK_CHECK_POINTER(m_pTasks, MFX_ERR_MEMORY_ALLOC);
    memset(m_pTasks, 0, sizeof(RotateTask) * m_MaxNumTasks);

    m_NumChunks = 1;
    m_pChunks = new DataChunk [m_NumChunks];
    MSDK_CHECK_POINTER(m_pChunks, MFX_ERR_MEMORY_ALLOC);
    memset(m_pChunks, 0, sizeof(DataChunk) * m_NumChunks);

    // divide frame into data chunks
    mfxU32 num_lines_in_chunk = mfxParam->vpp.In.CropH / m_NumChunks; // integer division
    mfxU32 remainder_lines = mfxParam->vpp.In.CropH % m_NumChunks; // get remainder
    // remaining lines are distributed among first chunks (+ extra 1 line each)
    for (mfxU32 i = 0; i < m_NumChunks; i++)
    {
        m_pChunks[i].StartLine = (i == 0) ? 0 : m_pChunks[i-1].EndLine + 1;
        m_pChunks[i].EndLine = (i < remainder_lines) ? (i + 1) * num_lines_in_chunk : (i + 1) * num_lines_in_chunk - 1;
    }

    // enable surface sharing in case both input and output are d3d surfaces
    if (bd3d[0] && bd3d[1])
        m_bOpenCLSurfaceSharing = true;

    if (m_bOpenCLSurfaceSharing)
    {
        // init OpenCLFilter
        cl_int error = CL_SUCCESS;

#if defined(_WIN32) || defined(_WIN64)
        if (MFX_IMPL_VIA_MASK(m_impl) == MFX_IMPL_VIA_D3D11) {
             m_OpenCLFilter.reset(new OpenCLFilterDX11());
        } else {
            m_OpenCLFilter.reset(new OpenCLFilterDX9());
        }
        error = m_OpenCLFilter.get()->AddKernel(readFile("ocl_rotate.cl").c_str(), "rotate_Y", "rotate_UV");
        if (error) return MFX_ERR_DEVICE_FAILED;

        error = m_OpenCLFilter.get()->OCLInit(m_device);

#else
        m_OpenCLFilter.reset(new OpenCLFilterVA());
        error = m_OpenCLFilter.get()->AddKernel(readFile("ocl_rotate.cl").c_str(), "rotate_Y", "rotate_UV");
        if (error) return MFX_ERR_DEVICE_FAILED;
        error = m_OpenCLFilter.get()->OCLInit(m_device);
#endif
        if (error)
        {
            error = CL_SUCCESS;
            std::cout << "\nWARNING: Initializing plugin with media sharing failed" << std::endl;
            m_bOpenCLSurfaceSharing = false; // try init plugin without sharing
        }
        else
        {
            error = m_OpenCLFilter->SelectKernel(0);
            if (error) return MFX_ERR_DEVICE_FAILED;
        }
    }

    if (!m_bOpenCLSurfaceSharing)
    {
        try
        {
            m_pOpenCLRotator180Context.reset(new OpenCLRotator180Context(readFile("ocl_rotate.cl").c_str()));
        }
        catch (const std::exception &err)
        {
            std::cout << "Error: The readFile method throws an exception: " << err.what() << std::endl;
            return MFX_ERR_DEVICE_FAILED;
        }
    }

    if (m_bOpenCLSurfaceSharing)
    {
        msdk_printf(MSDK_STRING("info: using GPU OpenCL device with media sharing extension\n"));
    }
    else
    {
        msdk_printf(MSDK_STRING("info: using CPU OpenCL device without media sharing extension\n"));
    }

    m_bInited = true;

    return MFX_ERR_NONE;
}
Exemplo n.º 6
0
int main()
{  
    mfxStatus sts = MFX_ERR_NONE;

    mfxU16 inputWidth = 1920;
    mfxU16 inputHeight = 1080;

    // =====================================================================
    // Intel Media SDK Video Pre/Post Processing (VPP) pipeline setup
    // - Showcasing two VPP features
    //   - Resize (frame width and height is halved)
    //   - ProcAmp: Increase brightness
    // - Video memory surfaces are used
    //

    // Open input YV12 YUV file
    FILE* fSource;
    fopen_s(&fSource, "bbb1920x1080.yuv", "rb");
    MSDK_CHECK_POINTER(fSource, MFX_ERR_NULL_PTR);

    // Create output YUV file
    FILE* fSink;
    fopen_s(&fSink, "bbb960x540_vpp_bright_d3d.yuv", "wb");
    MSDK_CHECK_POINTER(fSink, MFX_ERR_NULL_PTR);

    // Initialize Media SDK session
    // - MFX_IMPL_AUTO_ANY selects HW accelaration if available (on any adapter)
    // - Version 1.0 is selected for greatest backwards compatibility.
    //   If more recent API features are needed, change the version accordingly
    mfxIMPL impl = MFX_IMPL_AUTO_ANY;
#ifdef DX11_D3D
    impl |= MFX_IMPL_VIA_D3D11;
#endif
    mfxVersion ver = {0, 1};
    MFXVideoSession mfxSession;
    sts = mfxSession.Init(impl, &ver);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

    // Initialize VPP parameters
    mfxVideoParam VPPParams;
    memset(&VPPParams, 0, sizeof(VPPParams));
    // Input data
    VPPParams.vpp.In.FourCC         = MFX_FOURCC_NV12;
    VPPParams.vpp.In.ChromaFormat   = MFX_CHROMAFORMAT_YUV420;  
    VPPParams.vpp.In.CropX          = 0;
    VPPParams.vpp.In.CropY          = 0; 
    VPPParams.vpp.In.CropW          = inputWidth;
    VPPParams.vpp.In.CropH          = inputHeight;
    VPPParams.vpp.In.PicStruct      = MFX_PICSTRUCT_PROGRESSIVE;
    VPPParams.vpp.In.FrameRateExtN  = 30;
    VPPParams.vpp.In.FrameRateExtD  = 1;
    // width must be a multiple of 16 
    // height must be a multiple of 16 in case of frame picture and a multiple of 32 in case of field picture  
    VPPParams.vpp.In.Width  = MSDK_ALIGN16(inputWidth);
    VPPParams.vpp.In.Height = (MFX_PICSTRUCT_PROGRESSIVE == VPPParams.vpp.In.PicStruct)?
                                 MSDK_ALIGN16(inputHeight) : MSDK_ALIGN32(inputHeight);
    // Output data
    VPPParams.vpp.Out.FourCC        = MFX_FOURCC_NV12;     
    VPPParams.vpp.Out.ChromaFormat  = MFX_CHROMAFORMAT_YUV420;             
    VPPParams.vpp.Out.CropX         = 0;
    VPPParams.vpp.Out.CropY         = 0; 
    VPPParams.vpp.Out.CropW         = inputWidth/2;
    VPPParams.vpp.Out.CropH         = inputHeight/2;
    VPPParams.vpp.Out.PicStruct     = MFX_PICSTRUCT_PROGRESSIVE;
    VPPParams.vpp.Out.FrameRateExtN = 30;
    VPPParams.vpp.Out.FrameRateExtD = 1;
    // width must be a multiple of 16 
    // height must be a multiple of 16 in case of frame picture and a multiple of 32 in case of field picture  
    VPPParams.vpp.Out.Width  = MSDK_ALIGN16(VPPParams.vpp.Out.CropW); 
    VPPParams.vpp.Out.Height = (MFX_PICSTRUCT_PROGRESSIVE == VPPParams.vpp.Out.PicStruct)?
                                    MSDK_ALIGN16(VPPParams.vpp.Out.CropH) : MSDK_ALIGN32(VPPParams.vpp.Out.CropH);

    VPPParams.IOPattern = MFX_IOPATTERN_IN_VIDEO_MEMORY | MFX_IOPATTERN_OUT_VIDEO_MEMORY;

    
    // Create Media SDK VPP component
    MFXVideoVPP mfxVPP(mfxSession); 


    // Create DirectX device context
    mfxHDL deviceHandle;
    sts = CreateHWDevice(mfxSession, &deviceHandle, NULL);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);   

    // Provide device manager to Media SDK
    sts = mfxSession.SetHandle(DEVICE_MGR_TYPE, deviceHandle);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);    

    mfxFrameAllocator mfxAllocator;
    mfxAllocator.Alloc	= simple_alloc;
    mfxAllocator.Free	= simple_free;
    mfxAllocator.Lock	= simple_lock;
    mfxAllocator.Unlock = simple_unlock;
    mfxAllocator.GetHDL = simple_gethdl;

    // When using video memory we must provide Media SDK with an external allocator 
    sts = mfxSession.SetFrameAllocator(&mfxAllocator);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);


    // Query number of required surfaces for VPP
    mfxFrameAllocRequest VPPRequest[2];// [0] - in, [1] - out
    memset(&VPPRequest, 0, sizeof(mfxFrameAllocRequest)*2);
    sts = mfxVPP.QueryIOSurf(&VPPParams, VPPRequest);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);       

#ifdef DX11_D3D
    VPPRequest[0].Type |= WILL_WRITE; // Hint to DX11 memory handler that application will write data to input surfaces
    VPPRequest[1].Type |= WILL_READ; // Hint to DX11 memory handler that application will read data from output surfaces
#endif
    
    // Allocate required surfaces
    mfxFrameAllocResponse mfxResponseIn;
    mfxFrameAllocResponse mfxResponseOut;
    sts = mfxAllocator.Alloc(mfxAllocator.pthis, &VPPRequest[0], &mfxResponseIn);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
    sts = mfxAllocator.Alloc(mfxAllocator.pthis, &VPPRequest[1], &mfxResponseOut);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

    mfxU16 nVPPSurfNumIn = mfxResponseIn.NumFrameActual;
    mfxU16 nVPPSurfNumOut = mfxResponseOut.NumFrameActual;

    // Allocate surface headers (mfxFrameSurface1) for VPP
    mfxFrameSurface1** pVPPSurfacesIn = new mfxFrameSurface1*[nVPPSurfNumIn];
    MSDK_CHECK_POINTER(pVPPSurfacesIn, MFX_ERR_MEMORY_ALLOC);       
    for (int i = 0; i < nVPPSurfNumIn; i++)
    {       
        pVPPSurfacesIn[i] = new mfxFrameSurface1;
        memset(pVPPSurfacesIn[i], 0, sizeof(mfxFrameSurface1));
        memcpy(&(pVPPSurfacesIn[i]->Info), &(VPPParams.vpp.In), sizeof(mfxFrameInfo));
        pVPPSurfacesIn[i]->Data.MemId = mfxResponseIn.mids[i]; // MID (memory id) represent one D3D NV12 surface

#ifndef ENABLE_INPUT
        // In case simulating direct access to frames we initialize the allocated surfaces with default pattern
        // - For true benchmark comparisons to async workloads all surfaces must have the same data
#ifndef DX11_D3D
        IDirect3DSurface9 *pSurface;
        D3DSURFACE_DESC desc;
        D3DLOCKED_RECT locked;
        pSurface = (IDirect3DSurface9 *)mfxResponseIn.mids[i];
        pSurface->GetDesc(&desc);
        pSurface->LockRect(&locked, 0, D3DLOCK_NOSYSLOCK);
        memset((mfxU8 *)locked.pBits, 100, desc.Height*locked.Pitch);  // Y plane
        memset((mfxU8 *)locked.pBits + desc.Height * locked.Pitch, 50, (desc.Height*locked.Pitch)/2);  // UV plane
        pSurface->UnlockRect();
#else
        // For now, just leave D3D11 surface data uninitialized
#endif
#endif
    }  

    mfxFrameSurface1** pVPPSurfacesOut = new mfxFrameSurface1*[nVPPSurfNumOut];
    MSDK_CHECK_POINTER(pVPPSurfacesOut, MFX_ERR_MEMORY_ALLOC);       
    for (int i = 0; i < nVPPSurfNumOut; i++)
    {       
        pVPPSurfacesOut[i] = new mfxFrameSurface1;
        memset(pVPPSurfacesOut[i], 0, sizeof(mfxFrameSurface1));
        memcpy(&(pVPPSurfacesOut[i]->Info), &(VPPParams.vpp.Out), sizeof(mfxFrameInfo));
        pVPPSurfacesOut[i]->Data.MemId = mfxResponseOut.mids[i]; // MID (memory id) represent one D3D NV12 surface
    }  


    // Initialize extended buffer for frame processing
    // - Process amplifier (ProcAmp) used to control brightness 
    // - mfxExtVPPDoUse:   Define the processing algorithm to be used
    // - mfxExtVPPProcAmp: ProcAmp configuration
    // - mfxExtBuffer:     Add extended buffers to VPP parameter configuration
    mfxExtVPPDoUse extDoUse;
    mfxU32 tabDoUseAlg[1]; 
    extDoUse.Header.BufferId = MFX_EXTBUFF_VPP_DOUSE;
    extDoUse.Header.BufferSz = sizeof(mfxExtVPPDoUse);
    extDoUse.NumAlg  = 1;
    extDoUse.AlgList = tabDoUseAlg;
    tabDoUseAlg[0] = MFX_EXTBUFF_VPP_PROCAMP;

    mfxExtVPPProcAmp procampConfig;
    procampConfig.Header.BufferId = MFX_EXTBUFF_VPP_PROCAMP;
    procampConfig.Header.BufferSz = sizeof(mfxExtVPPProcAmp);
    procampConfig.Hue        = 0.0f;  // Default
    procampConfig.Saturation = 1.0f;  // Default
    procampConfig.Contrast   = 1.0;   // Default
    procampConfig.Brightness = 40.0;  // Adjust brightness

    mfxExtBuffer* ExtBuffer[2];
    ExtBuffer[0] = (mfxExtBuffer*)&extDoUse;
    ExtBuffer[1] = (mfxExtBuffer*)&procampConfig;
    VPPParams.NumExtParam = 2;
    VPPParams.ExtParam = (mfxExtBuffer**)&ExtBuffer[0];


    // Initialize Media SDK VPP
    sts = mfxVPP.Init(&VPPParams);
    MSDK_IGNORE_MFX_STS(sts, MFX_WRN_PARTIAL_ACCELERATION);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);    


    // ===================================
    // Start processing the frames
    //
 
#ifdef ENABLE_BENCHMARK
    LARGE_INTEGER tStart, tEnd;
    QueryPerformanceFrequency(&tStart);
    double freq = (double)tStart.QuadPart;
    QueryPerformanceCounter(&tStart);
#endif

    int nSurfIdxIn = 0, nSurfIdxOut = 0; 
    mfxSyncPoint syncp;
    mfxU32 nFrame = 0;

    //
    // Stage 1: Main processing loop
    //
    while (MFX_ERR_NONE <= sts || MFX_ERR_MORE_DATA == sts)        
    {        
        nSurfIdxIn = GetFreeSurfaceIndex(pVPPSurfacesIn, nVPPSurfNumIn); // Find free input frame surface
        if (MFX_ERR_NOT_FOUND == nSurfIdxIn)
            return MFX_ERR_MEMORY_ALLOC;

        // Surface locking required when read/write D3D surfaces
        sts = mfxAllocator.Lock(mfxAllocator.pthis, pVPPSurfacesIn[nSurfIdxIn]->Data.MemId, &(pVPPSurfacesIn[nSurfIdxIn]->Data));
        MSDK_BREAK_ON_ERROR(sts);

        sts = LoadRawFrame(pVPPSurfacesIn[nSurfIdxIn], fSource); // Load frame from file into surface
        MSDK_BREAK_ON_ERROR(sts);
           
        sts = mfxAllocator.Unlock(mfxAllocator.pthis, pVPPSurfacesIn[nSurfIdxIn]->Data.MemId, &(pVPPSurfacesIn[nSurfIdxIn]->Data));
        MSDK_BREAK_ON_ERROR(sts);

        nSurfIdxOut = GetFreeSurfaceIndex(pVPPSurfacesOut, nVPPSurfNumOut); // Find free output frame surface
        if (MFX_ERR_NOT_FOUND == nSurfIdxOut)
            return MFX_ERR_MEMORY_ALLOC;

        // Process a frame asychronously (returns immediately)
        sts = mfxVPP.RunFrameVPPAsync(pVPPSurfacesIn[nSurfIdxIn], pVPPSurfacesOut[nSurfIdxOut], NULL, &syncp);
        if (MFX_ERR_MORE_DATA == sts)
            continue;

        // MFX_ERR_MORE_SURFACE means output is ready but need more surface (example: Frame Rate Conversion 30->60)
        // * Not handled in this example!

        MSDK_BREAK_ON_ERROR(sts);

        sts = mfxSession.SyncOperation(syncp, 60000); // Synchronize. Wait until frame processing is ready
        MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

        ++nFrame;
#ifdef ENABLE_OUTPUT
        // Surface locking required when read/write D3D surfaces
        sts = mfxAllocator.Lock(mfxAllocator.pthis, pVPPSurfacesOut[nSurfIdxOut]->Data.MemId, &(pVPPSurfacesOut[nSurfIdxOut]->Data));
        MSDK_BREAK_ON_ERROR(sts);

        sts = WriteRawFrame(pVPPSurfacesOut[nSurfIdxOut], fSink);
        MSDK_BREAK_ON_ERROR(sts);

        sts = mfxAllocator.Unlock(mfxAllocator.pthis, pVPPSurfacesOut[nSurfIdxOut]->Data.MemId, &(pVPPSurfacesOut[nSurfIdxOut]->Data));
        MSDK_BREAK_ON_ERROR(sts);

        printf("Frame number: %d\r", nFrame);
#endif
    }

    // MFX_ERR_MORE_DATA means that the input file has ended, need to go to buffering loop, exit in case of other errors
    MSDK_IGNORE_MFX_STS(sts, MFX_ERR_MORE_DATA);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
    
    //
    // Stage 2: Retrieve the buffered VPP frames
    //
    while (MFX_ERR_NONE <= sts)
    {       
        nSurfIdxOut = GetFreeSurfaceIndex(pVPPSurfacesOut, nVPPSurfNumOut); // Find free frame surface
        if (MFX_ERR_NOT_FOUND == nSurfIdxOut)
            return MFX_ERR_MEMORY_ALLOC;

        // Process a frame asychronously (returns immediately)
        sts = mfxVPP.RunFrameVPPAsync(NULL, pVPPSurfacesOut[nSurfIdxOut], NULL, &syncp);
        MSDK_IGNORE_MFX_STS(sts, MFX_ERR_MORE_SURFACE);
        MSDK_BREAK_ON_ERROR(sts);
        
        sts = mfxSession.SyncOperation(syncp, 60000); // Synchronize. Wait until frame processing is ready
        MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

        ++nFrame;
#ifdef ENABLE_OUTPUT
        // Surface locking required when read/write D3D surfaces
        sts = mfxAllocator.Lock(mfxAllocator.pthis, pVPPSurfacesOut[nSurfIdxOut]->Data.MemId, &(pVPPSurfacesOut[nSurfIdxOut]->Data));
        MSDK_BREAK_ON_ERROR(sts);

        sts = WriteRawFrame(pVPPSurfacesOut[nSurfIdxOut], fSink);
        MSDK_BREAK_ON_ERROR(sts);

        sts = mfxAllocator.Unlock(mfxAllocator.pthis, pVPPSurfacesOut[nSurfIdxOut]->Data.MemId, &(pVPPSurfacesOut[nSurfIdxOut]->Data));
        MSDK_BREAK_ON_ERROR(sts);

        printf("Frame number: %d\r", nFrame);
#endif
    }    

    // MFX_ERR_MORE_DATA indicates that there are no more buffered frames, exit in case of other errors
    MSDK_IGNORE_MFX_STS(sts, MFX_ERR_MORE_DATA);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

#ifdef ENABLE_BENCHMARK
    QueryPerformanceCounter(&tEnd);
    double duration = ((double)tEnd.QuadPart - (double)tStart.QuadPart)  / freq;
    printf("\nExecution time: %3.2fs (%3.2ffps)\n", duration, nFrame/duration);
#endif

    // ===================================================================
    // Clean up resources
    //  - It is recommended to close Media SDK components first, before releasing allocated surfaces, since
    //    some surfaces may still be locked by internal Media SDK resources.

    mfxVPP.Close();
    //mfxSession closed automatically on destruction

    for (int i = 0; i < nVPPSurfNumIn; i++)
        delete pVPPSurfacesIn[i];
    MSDK_SAFE_DELETE_ARRAY(pVPPSurfacesIn);
    for (int i = 0; i < nVPPSurfNumOut; i++)
        delete pVPPSurfacesOut[i];
    MSDK_SAFE_DELETE_ARRAY(pVPPSurfacesOut);

    fclose(fSource);
    fclose(fSink);

    CleanupHWDevice();

    return 0;
}
Exemplo n.º 7
0
mfxStatus FFmpeg_Writer_Init(	const char *strFileName,
                                mfxU32 videoType,
                                mfxU16 nBitRate,
                                mfxU16 nDstWidth,
                                mfxU16 nDstHeight,
                                mfxU16 GopRefDist,
                                mfxU8* SPSbuf,
                                int SPSbufsize,
                                mfxU8* PPSbuf,
                                int PPSbufsize)
{
    MSDK_CHECK_POINTER(strFileName, MFX_ERR_NULL_PTR);

    g_GopRefDist = GopRefDist;

    // Initialize libavcodec, and register all codecs and formats
    avcodec_register_all();
    av_register_all();
    avformat_network_init();  //not necessary for file-only transcode

    // Get default output format config based on selected container type
    g_pFmt = av_guess_format(FORMAT_SHORT_NAME, FORMAT_FILENAME, NULL);

    // Sub title processing ignored
    g_pFmt->subtitle_codec = AV_CODEC_ID_NONE;

    switch (videoType)
    {
        case MFX_CODEC_AVC:
            g_pFmt->video_codec = AV_CODEC_ID_H264;
            break;
        case MFX_CODEC_MPEG2:
            g_pFmt->video_codec = AV_CODEC_ID_MPEG2VIDEO;
            break;
        default:
            printf("Unsupported video format\n");
            return MFX_ERR_UNSUPPORTED;
    }

    if (!g_pFmt) {
        printf("FFMPEG: Could not find suitable output format\n");
        return MFX_ERR_UNSUPPORTED;
    }

    // Allocate the output media context
    g_pFormatCtxMux = avformat_alloc_context();
    if (!g_pFormatCtxMux) {
        printf("FFMPEG: avformat_alloc_context error\n");
        return MFX_ERR_UNSUPPORTED;
    }

    g_pFormatCtxMux->oformat = g_pFmt;

    sprintf_s(g_pFormatCtxMux->filename, "%s", strFileName);
    
    if (g_pFmt->video_codec == CODEC_ID_NONE) 
        return MFX_ERR_UNSUPPORTED;

    g_pVideoStream = avformat_new_stream(g_pFormatCtxMux, NULL);
    if (!g_pVideoStream) {
        printf("FFMPEG: Could not alloc video stream\n");
        return MFX_ERR_UNKNOWN;
    }

    g_videoStreamMuxIdx = g_pVideoStream->index;

    AVCodecContext *c = g_pVideoStream->codec;
    c->codec_id		= g_pFmt->video_codec;
    c->codec_type	= AVMEDIA_TYPE_VIDEO;
    c->bit_rate		= nBitRate*1000;
    c->width		= nDstWidth; 
    c->height		= nDstHeight;

    // time base: this is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented. for fixed-fps content,
    //            timebase should be 1/framerate and timestamp increments should be identically 1.
    c->time_base.den = g_pFormatCtx->streams[g_videoStreamIdx]->r_frame_rate.num;
    c->time_base.num = g_pFormatCtx->streams[g_videoStreamIdx]->r_frame_rate.den;

    // Some formats want stream headers to be separate
    if(g_pFormatCtxMux->oformat->flags & AVFMT_GLOBALHEADER)
        c->flags |= CODEC_FLAG_GLOBAL_HEADER;

#ifdef PROCESS_AUDIO
    g_pFmt->audio_codec = g_pAudioStream->codec->codec_id;

    // Create new audio stream for the container
    g_pAudioStreamMux = avformat_new_stream(g_pFormatCtxMux, NULL);
    if (!g_pAudioStreamMux) {
        printf("FFMPEG: Could not alloc audio stream\n");
        return MFX_ERR_UNKNOWN;
    }

    g_audioStreamMuxIdx = g_pAudioStreamMux->index;
 
    // Copy audio codec config from input stream to output stream
    AVCodecContext *ca			= g_pAudioStreamMux->codec;
    ca->codec_id				= g_pAudioStream->codec->codec_id;
    ca->codec_type				= AVMEDIA_TYPE_AUDIO;
    ca->sample_rate				= g_pAudioStream->codec->sample_rate; 
    ca->channels				= g_pAudioStream->codec->channels;
    ca->bit_rate				= g_pAudioStream->codec->bit_rate; 
    ca->sample_fmt				= g_pAudioStream->codec->sample_fmt;
    ca->frame_size				= g_pAudioStream->codec->frame_size;
    ca->bits_per_coded_sample	= g_pAudioStream->codec->bits_per_coded_sample;
    ca->channel_layout			= g_pAudioStream->codec->channel_layout;

    ca->time_base = g_pAudioStream->codec->time_base;
    g_pAudioStreamMux->time_base = g_pAudioStream->codec->time_base;
    

    // Extra data apparently contains essential channel config info (must be copied!)
    ca->extradata_size = g_pAudioStream->codec->extradata_size;
    g_audioExtraData = (uint8_t*)av_malloc(ca->extradata_size);
    ca->extradata = g_audioExtraData;
    memcpy(ca->extradata, g_pAudioStream->codec->extradata, ca->extradata_size);


    // Some formats want stream headers to be separate
    if(g_pFormatCtxMux->oformat->flags & AVFMT_GLOBALHEADER)
        ca->flags |= CODEC_FLAG_GLOBAL_HEADER;
#endif

    // Open the output container file
    if (avio_open(&g_pFormatCtxMux->pb, g_pFormatCtxMux->filename, AVIO_FLAG_WRITE) < 0)
    {
        printf("FFMPEG: Could not open '%s'\n", g_pFormatCtxMux->filename);
        return MFX_ERR_UNKNOWN;
    }
 
    g_pExtDataBuffer = (mfxU8*)av_malloc(SPSbufsize + PPSbufsize);
    if(!g_pExtDataBuffer) {
        printf("FFMPEG: could not allocate required buffer\n");
        return MFX_ERR_UNKNOWN;
    }

    memcpy(g_pExtDataBuffer, SPSbuf, SPSbufsize);
    memcpy(g_pExtDataBuffer + SPSbufsize, PPSbuf, PPSbufsize);

    // Codec "extradata" conveys the H.264 stream SPS and PPS info (MPEG2: sequence header is housed in SPS buffer, PPS buffer is empty)
    c->extradata		= g_pExtDataBuffer;
    c->extradata_size	= SPSbufsize + PPSbufsize;

    // Write container header
    if(avformat_write_header(g_pFormatCtxMux, NULL)) {
        printf("FFMPEG: avformat_write_header error!\n");
        return MFX_ERR_UNKNOWN;
    }

    return MFX_ERR_NONE;
}
Exemplo n.º 8
0
mfxStatus FFmpeg_Reader_Init(const char *strFileName, mfxU32 videoType)
{
    MSDK_CHECK_POINTER(strFileName, MFX_ERR_NULL_PTR);

    int res;

    g_videoType = videoType;

    // Initialize libavcodec, and register all codecs and formats
    av_register_all();

    // Open input container
    res = avformat_open_input(&g_pFormatCtx, strFileName, NULL, NULL);
    if(res) {
        printf("FFMPEG: Could not open input container\n");
        return MFX_ERR_UNKNOWN;
    }

    // Retrieve stream information
    res = avformat_find_stream_info(g_pFormatCtx, NULL);
    if(res < 0) {
        printf("FFMPEG: Couldn't find stream information\n");
        return MFX_ERR_UNKNOWN;
    }
    

    // Dump container info to console
    av_dump_format(g_pFormatCtx, 0, strFileName, 0);

    // Find the streams in the container
    g_videoStreamIdx = -1;
    for(unsigned int i=0; i<g_pFormatCtx->nb_streams; i++)
    {
        if(g_pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && g_videoStreamIdx == -1)
        {
            g_videoStreamIdx = i;
 
            // save decoded stream timestamp time base
            g_dec_time_base = g_pFormatCtx->streams[i]->time_base;

            if(videoType == MFX_CODEC_AVC)
            {
                // Retrieve required h264_mp4toannexb filter
                g_pBsfc = av_bitstream_filter_init("h264_mp4toannexb");
                if (!g_pBsfc) {
                    printf("FFMPEG: Could not aquire h264_mp4toannexb filter\n");
                    return MFX_ERR_UNKNOWN;
                }
            }
        }
#ifdef PROCESS_AUDIO
        else if(g_pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
        {
            g_audioStreamIdx = i;
            g_pAudioStream = g_pFormatCtx->streams[i];
            g_audio_dec_time_base = g_pAudioStream->time_base;
        }
#endif
    }
    if(g_videoStreamIdx == -1)
        return MFX_ERR_UNKNOWN; // Didn't find any video streams in container

    return MFX_ERR_NONE;
}
Exemplo n.º 9
0
mfxStatus ParseInputString(msdk_char* strInput[], mfxU8 nArgNum, sInputParams* pParams)
{
    if (1 == nArgNum)
    {
        PrintHelp(strInput[0], NULL);
        return MFX_ERR_UNSUPPORTED;
    }

    MSDK_CHECK_POINTER(pParams, MFX_ERR_NULL_PTR);

    // set default implementation
    pParams->bUseHWLib = true;
    pParams->bUseFullColorRange = false;
#if defined(LIBVA_SUPPORT)
    pParams->libvaBackend = MFX_LIBVA_DRM;
#endif

    for (mfxU8 i = 1; i < nArgNum; i++)
    {
        if (MSDK_CHAR('-') != strInput[i][0])
        {
            mfxStatus sts = StrFormatToCodecFormatFourCC(strInput[i], pParams->videoType);
            if (sts != MFX_ERR_NONE)
            {
                PrintHelp(strInput[0], MSDK_STRING("Unknown codec"));
                return MFX_ERR_UNSUPPORTED;
            }
            if (!IsDecodeCodecSupported(pParams->videoType))
            {
                PrintHelp(strInput[0], MSDK_STRING("Unsupported codec"));
                return MFX_ERR_UNSUPPORTED;
            }
            if (pParams->videoType == CODEC_MVC)
            {
                pParams->videoType = MFX_CODEC_AVC;
                pParams->bIsMVC = true;
            }
            continue;
        }

        if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-sw")))
        {
            pParams->bUseHWLib = false;
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-hw")))
        {
            pParams->bUseHWLib = true;
        }
#if D3D_SURFACES_SUPPORT
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-d3d")))
        {
            pParams->memType = D3D9_MEMORY;
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-d3d11")))
        {
            pParams->memType = D3D11_MEMORY;
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-r")))
        {
            pParams->mode = MODE_RENDERING;
            // use d3d9 rendering by default
            if (SYSTEM_MEMORY == pParams->memType)
                pParams->memType = D3D9_MEMORY;
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-wall")))
        {
            if(i + 6 >= nArgNum)
            {
                PrintHelp(strInput[0], MSDK_STRING("Not enough parameters for -wall key"));
                return MFX_ERR_UNSUPPORTED;
            }
            // use d3d9 rendering by default
            if (SYSTEM_MEMORY == pParams->memType)
                pParams->memType = D3D9_MEMORY;

            pParams->mode = MODE_RENDERING;

            msdk_opt_read(strInput[++i], pParams->nWallW);
            msdk_opt_read(strInput[++i], pParams->nWallH);
            msdk_opt_read(strInput[++i], pParams->nWallCell);
            msdk_opt_read(strInput[++i], pParams->nWallMonitor);

            mfxU32 nTitle;
            msdk_opt_read(strInput[++i], nTitle);

            pParams->bWallNoTitle = 0 == nTitle;

            msdk_opt_read(strInput[++i], pParams->nWallTimeout);
        }
#endif
#if defined(LIBVA_SUPPORT)
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-vaapi")))
        {
            pParams->memType = D3D9_MEMORY;
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-r")))
        {
            pParams->memType = D3D9_MEMORY;
            pParams->mode = MODE_RENDERING;
            pParams->libvaBackend = MFX_LIBVA_X11;
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-rwld")))
        {
            pParams->memType = D3D9_MEMORY;
            pParams->mode = MODE_RENDERING;
            pParams->libvaBackend = MFX_LIBVA_WAYLAND;
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-perf")))
        {
            pParams->bPerfMode = true;
        }
        else if (0 == msdk_strncmp(strInput[i], MSDK_STRING("-rdrm"), 5))
        {
            pParams->memType = D3D9_MEMORY;
            pParams->mode = MODE_RENDERING;
            pParams->libvaBackend = MFX_LIBVA_DRM_MODESET;
            if (strInput[i][5]) {
                if (strInput[i][5] != '-') {
                    PrintHelp(strInput[0], MSDK_STRING("unsupported monitor type"));
                    return MFX_ERR_UNSUPPORTED;
                }
                pParams->monitorType = getMonitorType(&strInput[i][6]);
                if (pParams->monitorType >= MFX_MONITOR_MAXNUMBER) {
                    PrintHelp(strInput[0], MSDK_STRING("unsupported monitor type"));
                    return MFX_ERR_UNSUPPORTED;
                }
            } else {
                pParams->monitorType = MFX_MONITOR_AUTO; // that's case of "-rdrm" pure option
            }
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-window")))
        {
            if(i +4 >= nArgNum)
            {
                PrintHelp(strInput[0], MSDK_STRING("Not enough parameters for -window key"));
                return MFX_ERR_UNSUPPORTED;
            }
            msdk_opt_read(strInput[++i], pParams->nRenderWinX);
            msdk_opt_read(strInput[++i], pParams->nRenderWinY);
            msdk_opt_read(strInput[++i], pParams->Width);
            msdk_opt_read(strInput[++i], pParams->Height);

            if (0 == pParams->Width)
                pParams->Width = 320;
            if (0 == pParams->Height)
                pParams->Height = 240;

            pParams->bRenderWin = true;
        }
#endif
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-low_latency")))
        {
            switch (pParams->videoType)
            {
                case MFX_CODEC_HEVC:
                case MFX_CODEC_AVC:
                case MFX_CODEC_JPEG:
                {
                    pParams->bLowLat = true;
                    if (!pParams->bIsMVC)
                        break;
                }
                default:
                {
                     PrintHelp(strInput[0], MSDK_STRING("-low_latency mode is suppoted only for H.264 and JPEG codecs"));
                     return MFX_ERR_UNSUPPORTED;
                }
            }
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-jpeg_rotate")))
        {
            if(MFX_CODEC_JPEG != pParams->videoType)
                return MFX_ERR_UNSUPPORTED;

            if(i + 1 >= nArgNum)
            {
                PrintHelp(strInput[0], MSDK_STRING("Not enough parameters for -jpeg_rotate key"));
                return MFX_ERR_UNSUPPORTED;
            }

            msdk_opt_read(strInput[++i], pParams->nRotation);
            if((pParams->nRotation != 90)&&(pParams->nRotation != 180)&&(pParams->nRotation != 270))
            {
                PrintHelp(strInput[0], MSDK_STRING("-jpeg_rotate is supported only for 90, 180 and 270 angles"));
                return MFX_ERR_UNSUPPORTED;
            }
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-calc_latency")))
        {
            switch (pParams->videoType)
            {
                case MFX_CODEC_HEVC:
                case MFX_CODEC_AVC:
                case MFX_CODEC_JPEG:
                {
                    pParams->bCalLat = true;
                    if (!pParams->bIsMVC)
                        break;
                }
                default:
                {
                     PrintHelp(strInput[0], MSDK_STRING("-calc_latency mode is suppoted only for H.264 and JPEG codecs"));
                     return MFX_ERR_UNSUPPORTED;
                }
            }
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-async")))
        {
            if(i + 1 >= nArgNum)
            {
                PrintHelp(strInput[0], MSDK_STRING("Not enough parameters for -async key"));
                return MFX_ERR_UNSUPPORTED;
            }
            if (MFX_ERR_NONE != msdk_opt_read(strInput[++i], pParams->nAsyncDepth))
            {
                PrintHelp(strInput[0], MSDK_STRING("async is invalid"));
                return MFX_ERR_UNSUPPORTED;
            }
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-di")))
        {
            if(i + 1 >= nArgNum)
            {
                PrintHelp(strInput[0], MSDK_STRING("Not enough parameters for -di key"));
                return MFX_ERR_UNSUPPORTED;
            }
            msdk_char diMode[4] = {};
            if (MFX_ERR_NONE != msdk_opt_read(strInput[++i], diMode))
            {
                PrintHelp(strInput[0], MSDK_STRING("deinterlace value is not set"));
                return MFX_ERR_UNSUPPORTED;
            }

            if (0 == msdk_strcmp(diMode, MSDK_CHAR("bob")))
            {
                pParams->eDeinterlace = MFX_DEINTERLACING_BOB;
            }
            else if (0 == msdk_strcmp(diMode, MSDK_CHAR("adi")))
            {
                pParams->eDeinterlace = MFX_DEINTERLACING_ADVANCED;
            }
            else
            {
                PrintHelp(strInput[0], MSDK_STRING("deinterlace value is invalid"));
                return MFX_ERR_UNSUPPORTED;
            }
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-gpucopy::on")))
        {
            pParams->gpuCopy = MFX_GPUCOPY_ON;
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-gpucopy::off")))
        {
            pParams->gpuCopy = MFX_GPUCOPY_OFF;
        }
#if !defined(_WIN32) && !defined(_WIN64)
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-threads_num")))
        {
            if(i + 1 >= nArgNum)
            {
                PrintHelp(strInput[0], MSDK_STRING("Not enough parameters for -threads_num key"));
                return MFX_ERR_UNSUPPORTED;
            }
            if (MFX_ERR_NONE != msdk_opt_read(strInput[++i], pParams->nThreadsNum))
            {
                PrintHelp(strInput[0], MSDK_STRING("threads_num is invalid"));
                return MFX_ERR_UNSUPPORTED;
            }
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-threads_schedtype")))
        {
            if(i + 1 >= nArgNum)
            {
                PrintHelp(strInput[0], MSDK_STRING("Not enough parameters for -threads_schedtype key"));
                return MFX_ERR_UNSUPPORTED;
            }
            if (MFX_ERR_NONE != msdk_thread_get_schedtype(strInput[++i], pParams->SchedulingType))
            {
                PrintHelp(strInput[0], MSDK_STRING("threads_schedtype is invalid"));
                return MFX_ERR_UNSUPPORTED;
            }
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-threads_priority")))
        {
            if(i + 1 >= nArgNum)
            {
                PrintHelp(strInput[0], MSDK_STRING("Not enough parameters for -threads_priority key"));
                return MFX_ERR_UNSUPPORTED;
            }
            if (MFX_ERR_NONE != msdk_opt_read(strInput[++i], pParams->Priority))
            {
                PrintHelp(strInput[0], MSDK_STRING("threads_priority is invalid"));
                return MFX_ERR_UNSUPPORTED;
            }
        }
#endif // #if !defined(_WIN32) && !defined(_WIN64)
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-f")))
        {
            if(i + 1 >= nArgNum)
            {
                PrintHelp(strInput[0], MSDK_STRING("Not enough parameters for -f key"));
                return MFX_ERR_UNSUPPORTED;
            }
            if (MFX_ERR_NONE != msdk_opt_read(strInput[++i], pParams->nMaxFPS))
            {
                PrintHelp(strInput[0], MSDK_STRING("rendering frame rate is invalid"));
                return MFX_ERR_UNSUPPORTED;
            }
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-scr:w")))
        {
            if (i + 1 >= nArgNum)
            {
                PrintHelp(strInput[0], MSDK_STRING("Not enough parameters for -scr:w key"));
                return MFX_ERR_UNSUPPORTED;
            }
            if (MFX_ERR_NONE != msdk_opt_read(strInput[++i], pParams->scrWidth))
            {
                PrintHelp(strInput[0], MSDK_STRING("screen width rate is invalid"));
                return MFX_ERR_UNSUPPORTED;
            }
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-scr:h")))
        {
            if (i + 1 >= nArgNum)
            {
                PrintHelp(strInput[0], MSDK_STRING("Not enough parameters for -scr:h key"));
                return MFX_ERR_UNSUPPORTED;
            }
            if (MFX_ERR_NONE != msdk_opt_read(strInput[++i], pParams->scrHeight))
            {
                PrintHelp(strInput[0], MSDK_STRING("screen height is invalid"));
                return MFX_ERR_UNSUPPORTED;
            }
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-w")))
        {
            if (i + 1 >= nArgNum)
            {
                PrintHelp(strInput[0], MSDK_STRING("Not enough parameters for -w key"));
                return MFX_ERR_UNSUPPORTED;
            }
            if (MFX_ERR_NONE != msdk_opt_read(strInput[++i], pParams->Width))
            {
                PrintHelp(strInput[0], MSDK_STRING("width is invalid"));
                return MFX_ERR_UNSUPPORTED;
            }
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-h")))
        {
            if (i + 1 >= nArgNum)
            {
                PrintHelp(strInput[0], MSDK_STRING("Not enough parameters for -h key"));
                return MFX_ERR_UNSUPPORTED;
            }
            if (MFX_ERR_NONE != msdk_opt_read(strInput[++i], pParams->Height))
            {
                PrintHelp(strInput[0], MSDK_STRING("height is invalid"));
                return MFX_ERR_UNSUPPORTED;
            }
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-n")))
        {
            if(i + 1 >= nArgNum)
            {
                PrintHelp(strInput[0], MSDK_STRING("Not enough parameters for -n key"));
                return MFX_ERR_UNSUPPORTED;
            }
            if (MFX_ERR_NONE != msdk_opt_read(strInput[++i], pParams->nFrames))
            {
                PrintHelp(strInput[0], MSDK_STRING("rendering frame rate is invalid"));
                return MFX_ERR_UNSUPPORTED;
            }
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-jpeg_rgb")))
        {
            if(MFX_CODEC_JPEG == pParams->videoType)
            {
               pParams->chromaType = MFX_JPEG_COLORFORMAT_RGB;
            }
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-i420")))
        {
            pParams->fourcc = MFX_FOURCC_NV12;
            pParams->outI420 = true;
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-nv12")))
        {
            pParams->fourcc = MFX_FOURCC_NV12;
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-rgb4")))
        {
            pParams->fourcc = MFX_FOURCC_RGB4;
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-rgb4_fcr")))
        {
            pParams->fourcc = MFX_FOURCC_RGB4;
            pParams->bUseFullColorRange = true;
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-p010")))
        {
            pParams->fourcc = MFX_FOURCC_P010;
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-a2rgb10")))
        {
            pParams->fourcc = MFX_FOURCC_A2RGB10;
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-path")))
        {
            i++;
#if defined(_WIN32) || defined(_WIN64)
            msdk_char wchar[MSDK_MAX_FILENAME_LEN];
            msdk_opt_read(strInput[i], wchar);
            std::wstring wstr(wchar);
            std::string str(wstr.begin(), wstr.end());

            strcpy_s(pParams->pluginParams.strPluginPath, str.c_str());
#else
            msdk_opt_read(strInput[i], pParams->pluginParams.strPluginPath);
#endif
            pParams->pluginParams.type = MFX_PLUGINLOAD_TYPE_FILE;
        }
        else if (0 == msdk_strcmp(strInput[i], MSDK_STRING("-i:null")))
        {
            ;
        }
        else // 1-character options
        {
            switch (strInput[i][1])
            {
            case MSDK_CHAR('p'):
                if (++i < nArgNum) {
                   if (MFX_ERR_NONE == ConvertStringToGuid(strInput[i], pParams->pluginParams.pluginGuid))
                    {
                        pParams->pluginParams.type = MFX_PLUGINLOAD_TYPE_GUID;
                    }
                    else
                    {
                        PrintHelp(strInput[0], MSDK_STRING("Unknown options"));
                    }
                 }
                else {
                    msdk_printf(MSDK_STRING("error: option '-p' expects an argument\n"));
                }
                break;
            case MSDK_CHAR('i'):
                if (++i < nArgNum) {
                    msdk_opt_read(strInput[i], pParams->strSrcFile);
                }
                else {
                    msdk_printf(MSDK_STRING("error: option '-i' expects an argument\n"));
                }
                break;
            case MSDK_CHAR('o'):
                if (++i < nArgNum) {
                    pParams->mode = MODE_FILE_DUMP;
                    msdk_opt_read(strInput[i], pParams->strDstFile);
                }
                else {
                    msdk_printf(MSDK_STRING("error: option '-o' expects an argument\n"));
                }
                break;
            case MSDK_CHAR('?'):
                PrintHelp(strInput[0], NULL);
                return MFX_ERR_UNSUPPORTED;
            default:
                {
                    std::basic_stringstream<msdk_char> stream;
                    stream << MSDK_STRING("Unknown option: ") << strInput[i];
                    PrintHelp(strInput[0], stream.str().c_str());
                    return MFX_ERR_UNSUPPORTED;
                }
            }
        }
    }

    if (0 == msdk_strlen(pParams->strSrcFile) && MFX_CODEC_CAPTURE != pParams->videoType)
    {
        msdk_printf(MSDK_STRING("error: source file name not found"));
        return MFX_ERR_UNSUPPORTED;
    }

    if (MFX_CODEC_CAPTURE == pParams->videoType)
    {
        if (!pParams->scrWidth || !pParams->scrHeight)
        {
            msdk_printf(MSDK_STRING("error: for screen capture, width and height must be specified manually (-scr:w and -scr:h)"));
            return MFX_ERR_UNSUPPORTED;
        }
    }
    else if (pParams->scrWidth || pParams->scrHeight)
    {
        msdk_printf(MSDK_STRING("error: width and height parameters are supported only by screen capture decoder"));
        return MFX_ERR_UNSUPPORTED;
    }

    if ((pParams->mode == MODE_FILE_DUMP) && (0 == msdk_strlen(pParams->strDstFile)))
    {
        msdk_printf(MSDK_STRING("error: destination file name not found"));
        return MFX_ERR_UNSUPPORTED;
    }

    if (MFX_CODEC_MPEG2   != pParams->videoType &&
        MFX_CODEC_AVC     != pParams->videoType &&
        MFX_CODEC_HEVC    != pParams->videoType &&
        MFX_CODEC_VC1     != pParams->videoType &&
        MFX_CODEC_JPEG    != pParams->videoType &&
        MFX_CODEC_CAPTURE != pParams->videoType &&
        CODEC_VP8         != pParams->videoType)
    {
        PrintHelp(strInput[0], MSDK_STRING("Unknown codec"));
        return MFX_ERR_UNSUPPORTED;
    }

    if (pParams->nAsyncDepth == 0)
    {
        pParams->nAsyncDepth = 4; //set by default;
    }

    return MFX_ERR_NONE;
}
Exemplo n.º 10
0
mfxStatus FFmpeg_Reader_ReadNextFrame(mfxBitstream *pBS)
{
    MSDK_CHECK_POINTER(pBS, MFX_ERR_NULL_PTR);

    AVPacket packet;
    bool videoFrameFound = false;

    // Read until video frame is found or no more packets (audio or video) in container.
    while(!videoFrameFound)
    {
        if(!av_read_frame(g_pFormatCtx, &packet))
        {
            if(packet.stream_index == g_videoStreamIdx)
            {
                if(g_videoType == MFX_CODEC_AVC)
                {
                    //
                    // Apply MP4 to H264 Annex B filter on buffer
                    //
                    uint8_t *pOutBuf;
                    int outBufSize;
                    int isKeyFrame = packet.flags & AV_PKT_FLAG_KEY;
                    av_bitstream_filter_filter(g_pBsfc, g_pFormatCtx->streams[g_videoStreamIdx]->codec, NULL, &pOutBuf, &outBufSize, packet.data, packet.size, isKeyFrame);

                    // Current approach leads to a duplicate SPS and PPS....., does not seem to be an issue!

                    //
                    // Copy filtered buffer to bitstream
                    //
                    memmove(pBS->Data, pBS->Data + pBS->DataOffset, pBS->DataLength);
                    pBS->DataOffset = 0;
                    memcpy(pBS->Data + pBS->DataLength, pOutBuf, outBufSize);
                    pBS->DataLength += outBufSize; 

                    av_free(pOutBuf);
                }
                else  // MPEG2
                {
                    memmove(pBS->Data, pBS->Data + pBS->DataOffset, pBS->DataLength);
                    pBS->DataOffset = 0;
                    memcpy(pBS->Data + pBS->DataLength, packet.data, packet.size);
                    pBS->DataLength += packet.size; 
                }

                // We are required to tell MSDK that complete frame is in the bitstream!
                pBS->DataFlag = MFX_BITSTREAM_COMPLETE_FRAME;

                // Save PTS timestamp in stream and in PTS window vector
                //   - DTS is discarded since it's not important)
                pBS->TimeStamp = packet.pts;
                g_ptsStack.push_back(pBS->TimeStamp);

                videoFrameFound = true;
            }
#ifdef PROCESS_AUDIO
            else if(packet.stream_index == g_audioStreamIdx)
            {
                // Write the unmodified compressed frame in the media file
                //   - Since this function is called during retrieval of video stream header during which the 
                //     muxer context (g_pFormatCtxMux) is not available any audio frame preceeding the first video frame will be dropped.
                //     (this limitation should be addressed in future code revision...)
                if(g_pFormatCtxMux) {
                    // Rescale audio time base (likely not needed...)
                    packet.pts			= av_rescale_q(packet.pts, g_audio_dec_time_base, g_pAudioStreamMux->time_base);
                    packet.dts			= packet.pts;
                    packet.stream_index	= g_audioStreamMuxIdx;

                    //double realPTS = av_q2d(g_pAudioStreamMux->time_base) * packet.pts;
                    //printf("PTS A: %6lld (%.3f), DTS: %6lld\n", packet.pts, realPTS, packet.dts);

                    // Write unmodified compressed sample to destination container
                    if (av_interleaved_write_frame(g_pFormatCtxMux, &packet)) {
                        printf("FFMPEG: Error while writing audio frame\n");
                        return MFX_ERR_UNKNOWN;
                    }
                }
            }
#endif

            // Free the packet that was allocated by av_read_frame
            av_free_packet(&packet);
        }
        else
        {
            return MFX_ERR_MORE_DATA;  // Indicates that we reached end of container and to stop video decode
        }
    }

    return MFX_ERR_NONE;
}
Exemplo n.º 11
0
mfxStatus CQuickSyncDecoder::UnlockFrame(mfxFrameSurface1* pSurface, mfxFrameData* pFrameData)
{
    MSDK_CHECK_POINTER(pSurface, MFX_ERR_NULL_PTR);
    MSDK_CHECK_POINTER(pFrameData, MFX_ERR_NULL_PTR);
    return m_pFrameAllocator->Unlock(m_pFrameAllocator, pSurface->Data.MemId, pFrameData);
}
Exemplo n.º 12
0
mfxStatus CQuickSyncDecoder::CreateAllocator()
{
    if (m_pFrameAllocator != NULL)
        return MFX_ERR_NONE;

    MSDK_TRACE("QsDecoder: CreateAllocator\n");

    ASSERT(m_pVideoParams != NULL);
    if (NULL == m_pVideoParams)
        return MFX_ERR_NOT_INITIALIZED;

    std::auto_ptr<mfxAllocatorParams> pParam(NULL);
    mfxStatus sts = MFX_ERR_NONE;

    // Setup allocator - HW acceleration
    if (m_bUseD3DAlloc)
    {
        m_pVideoParams->IOPattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY | MFX_IOPATTERN_IN_VIDEO_MEMORY;
        int nAdapterID = GetMSDKAdapterNumber(*m_mfxVideoSession);
        // D3D11 HW device
        if (m_bUseD3D11Alloc)
        {
#if MFX_D3D11_SUPPORT
            // HW device must be initialized early - within session init.
            // If a call to DecodeHeader was called before session->SetHandle, SetHandle would fail.
            ASSERT(m_HwDevice);
            MSDK_CHECK_POINTER(m_HwDevice, MFX_ERR_NULL_PTR);

            D3D11AllocatorParams* p = new D3D11AllocatorParams;
            p->pDevice = (ID3D11Device*)m_HwDevice->GetHandle(MFX_HANDLE_D3D11_DEVICE);
            pParam.reset(p);
            m_pFrameAllocator = new D3D11FrameAllocator();
#endif
        }
        // D3D9 HW device
        else
        {
            // Having the D3D9 device manager from the renderer allows working in full screen exclusive mode
            // This parameter can be NULL for other usages
            m_HwDevice = new CD3D9Device(m_pRendererD3dDeviceManager);
            if (MSDK_FAILED(sts = m_HwDevice->Init(nAdapterID)))
            {
                MSDK_TRACE("QsDecoder: D3D9 init have failed!\n");
                MSDK_SAFE_DELETE(m_HwDevice);
                return sts;
            }

            // Set the pointer to the HW device (or device manager) to the session
            mfxHDL h = m_HwDevice->GetHandle(MFX_HANDLE_D3D9_DEVICE_MANAGER);
            sts = m_mfxVideoSession->SetHandle(MFX_HANDLE_D3D9_DEVICE_MANAGER, h);
            MSDK_CHECK_NOT_EQUAL(sts, MFX_ERR_NONE, sts);

            D3DAllocatorParams* p = new D3DAllocatorParams;
            p->pManager = (IDirect3DDeviceManager9*)h;
            pParam.reset(p);
            m_pFrameAllocator = new D3DFrameAllocator();
        }
    }
    // Setup allocator - No HW acceleration
    else
    {
        m_bUseD3DAlloc = false;
        m_pVideoParams->IOPattern = MFX_IOPATTERN_OUT_SYSTEM_MEMORY | MFX_IOPATTERN_IN_SYSTEM_MEMORY;
        m_pFrameAllocator = new SysMemFrameAllocator();
    }

    sts = m_pFrameAllocator->Init(pParam.get());
    if (MSDK_SUCCEEDED(sts))
    {
        // Note - setting the session allocator can be done only once per session!
        sts = m_mfxVideoSession->SetFrameAllocator(m_pFrameAllocator);
        if (MSDK_FAILED(sts))
        {
            MSDK_TRACE("QsDecoder: Session SetFrameAllocator failed!\n");    
        }
    }
    else
    // Allocator failed to initialize
    {
        MSDK_TRACE("QsDecoder: Allocator Init failed!\n");

        MSDK_SAFE_DELETE(m_pFrameAllocator);
        ASSERT(false);
    }

    return sts;
}
Exemplo n.º 13
0
mfxStatus CQuickSyncDecoder::InternalReset(mfxVideoParam* pVideoParams, mfxU32 nPitch, bool bInited)
{
    MSDK_CHECK_POINTER(pVideoParams, MFX_ERR_NULL_PTR);
    MSDK_CHECK_POINTER(m_pmfxDEC, MFX_ERR_NOT_INITIALIZED);
    
    mfxStatus sts = MFX_ERR_NONE;
    m_pVideoParams = pVideoParams;

    if (NULL == m_pFrameAllocator)
    {
        bInited = false;
    }

    // Reset decoder
    if (bInited)
    {
        sts = m_pmfxDEC->Reset(pVideoParams);
        // Need to reset the frame allocator
        if (MSDK_FAILED(sts))
        {
            m_pmfxDEC->Close();
            FreeFrameAllocator();
            bInited = false;
        }
        
        if (m_pFrameSurfaces != NULL)
        {
            // Another VC1 decoder + VPP bug workaround
            for (int i = 0; i < m_nRequiredFramesNum; ++i)
            {
                m_pFrameSurfaces[i].Data.Locked = 0;
                m_LockedSurfaces[i] = 0;
            }
        }
    }

    // Full init
    if (!bInited)
    {
        // Setup allocator - will initialize D3D if needed
        sts = InitFrameAllocator(pVideoParams, nPitch);
        MSDK_CHECK_RESULT_P_RET(sts, MFX_ERR_NONE);

        // Init MSDK decoder
        sts = m_pmfxDEC->Init(pVideoParams);
        switch (sts)
        {
        case MFX_ERR_NONE:
            MSDK_TRACE("QsDecoder: decoder Init is successful\n");
            break;
        case MFX_WRN_PARTIAL_ACCELERATION:
            MSDK_TRACE("QsDecoder: decoder Init is successful w/o HW acceleration\n");
            m_bHwAcceleration = false;
            break;
        case MFX_WRN_INCOMPATIBLE_VIDEO_PARAM:
            MSDK_TRACE("QsDecoder: decoder Init is successful - wrong video parameters\n");
            break;
        default:
            MSDK_TRACE("QsDecoder: decoder Init has failed!\n");
            break;
        }
    }

    MSDK_IGNORE_MFX_STS(sts, MFX_WRN_INCOMPATIBLE_VIDEO_PARAM);
    MSDK_IGNORE_MFX_STS(sts, MFX_WRN_PARTIAL_ACCELERATION);
    return sts;
}
Exemplo n.º 14
0
int main()
{  
    mfxStatus sts = MFX_ERR_NONE;

    mfxU16 inputWidth = 1920;
    mfxU16 inputHeight = 1080;

    // =====================================================================
    // Intel Media SDK encode pipeline setup
    // - In this example we are encoding an AVC (H.264) stream
    // - Video memory surfaces are used
    //

    // Open input YV12 YUV file
    FILE* fSource;
    fopen_s(&fSource, "bbb1920x1080.yuv", "rb");
    MSDK_CHECK_POINTER(fSource, MFX_ERR_NULL_PTR);

    // Create output elementary stream (ES) H.264 file
    FILE* fSink;
    fopen_s(&fSink, "test_d3d.264", "wb");
    MSDK_CHECK_POINTER(fSink, MFX_ERR_NULL_PTR);

    // Initialize Media SDK session
    // - MFX_IMPL_AUTO_ANY selects HW accelaration if available (on any adapter)
    // - Version 1.0 is selected for greatest backwards compatibility.
    //   If more recent API features are needed, change the version accordingly
    mfxIMPL impl = MFX_IMPL_AUTO_ANY;
#ifdef DX11_D3D
    impl |= MFX_IMPL_VIA_D3D11;
#endif
    mfxVersion ver = {0, 1};
    MFXVideoSession mfxSession;
    sts = mfxSession.Init(impl, &ver);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);


    // Create DirectX device context
    mfxHDL deviceHandle;
    sts = CreateHWDevice(mfxSession, &deviceHandle, NULL);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);   

    // Provide device manager to Media SDK
    sts = mfxSession.SetHandle(DEVICE_MGR_TYPE, deviceHandle);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);   

    mfxFrameAllocator mfxAllocator;
    mfxAllocator.Alloc	= simple_alloc;
    mfxAllocator.Free	= simple_free;
    mfxAllocator.Lock	= simple_lock;
    mfxAllocator.Unlock = simple_unlock;
    mfxAllocator.GetHDL = simple_gethdl;

    // When using video memory we must provide Media SDK with an external allocator 
    sts = mfxSession.SetFrameAllocator(&mfxAllocator);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);


    // Initialize encoder parameters
    mfxVideoParam mfxEncParams;
    memset(&mfxEncParams, 0, sizeof(mfxEncParams));
    mfxEncParams.mfx.CodecId                    = MFX_CODEC_AVC;
    mfxEncParams.mfx.TargetUsage                = MFX_TARGETUSAGE_BALANCED;
    mfxEncParams.mfx.TargetKbps                 = 2000;
    mfxEncParams.mfx.RateControlMethod          = MFX_RATECONTROL_VBR; 
    mfxEncParams.mfx.FrameInfo.FrameRateExtN    = 30;
    mfxEncParams.mfx.FrameInfo.FrameRateExtD    = 1;
    mfxEncParams.mfx.FrameInfo.FourCC           = MFX_FOURCC_NV12;
    mfxEncParams.mfx.FrameInfo.ChromaFormat     = MFX_CHROMAFORMAT_YUV420;
    mfxEncParams.mfx.FrameInfo.PicStruct        = MFX_PICSTRUCT_PROGRESSIVE;
    mfxEncParams.mfx.FrameInfo.CropX            = 0; 
    mfxEncParams.mfx.FrameInfo.CropY            = 0;
    mfxEncParams.mfx.FrameInfo.CropW            = inputWidth;
    mfxEncParams.mfx.FrameInfo.CropH            = inputHeight;
    // Width must be a multiple of 16 
    // Height must be a multiple of 16 in case of frame picture and a multiple of 32 in case of field picture
    mfxEncParams.mfx.FrameInfo.Width  = MSDK_ALIGN16(inputWidth);
    mfxEncParams.mfx.FrameInfo.Height = (MFX_PICSTRUCT_PROGRESSIVE == mfxEncParams.mfx.FrameInfo.PicStruct)?
        MSDK_ALIGN16(inputHeight) : MSDK_ALIGN32(inputHeight);
    
    mfxEncParams.IOPattern = MFX_IOPATTERN_IN_VIDEO_MEMORY;
    
    // Create Media SDK encoder
    MFXVideoENCODE mfxENC(mfxSession); 

    // Validate video encode parameters (optional)
    // - In this example the validation result is written to same structure
    // - MFX_WRN_INCOMPATIBLE_VIDEO_PARAM is returned if some of the video parameters are not supported,
    //   instead the encoder will select suitable parameters closest matching the requested configuration
    sts = mfxENC.Query(&mfxEncParams, &mfxEncParams);
    MSDK_IGNORE_MFX_STS(sts, MFX_WRN_INCOMPATIBLE_VIDEO_PARAM); 
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

    // Query number of required surfaces for encoder
    mfxFrameAllocRequest EncRequest;
    memset(&EncRequest, 0, sizeof(EncRequest));
    sts = mfxENC.QueryIOSurf(&mfxEncParams, &EncRequest);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);             

#ifdef DX11_D3D
    EncRequest.Type |= WILL_WRITE; // Hint to DX11 memory handler that application will write data to input surfaces
#endif

    // Allocate required surfaces
    mfxFrameAllocResponse mfxResponse;
    sts = mfxAllocator.Alloc(mfxAllocator.pthis, &EncRequest, &mfxResponse);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

    mfxU16 nEncSurfNum = mfxResponse.NumFrameActual;

    // Allocate surface headers (mfxFrameSurface1) for decoder
    mfxFrameSurface1** pmfxSurfaces = new mfxFrameSurface1*[nEncSurfNum];
    MSDK_CHECK_POINTER(pmfxSurfaces, MFX_ERR_MEMORY_ALLOC);       
    for (int i = 0; i < nEncSurfNum; i++)
    {
        pmfxSurfaces[i] = new mfxFrameSurface1;
        memset(pmfxSurfaces[i], 0, sizeof(mfxFrameSurface1));
        memcpy(&(pmfxSurfaces[i]->Info), &(mfxEncParams.mfx.FrameInfo), sizeof(mfxFrameInfo));
        pmfxSurfaces[i]->Data.MemId = mfxResponse.mids[i]; // MID (memory id) represent one D3D NV12 surface

#ifndef ENABLE_INPUT
        // In case simulating direct access to frames we initialize the allocated surfaces with default pattern
        // - For true benchmark comparisons to async workloads all surfaces must have the same data
#ifndef DX11_D3D
        IDirect3DSurface9 *pSurface;
        D3DSURFACE_DESC desc;
        D3DLOCKED_RECT locked;
        pSurface = (IDirect3DSurface9 *)mfxResponse.mids[i];
        pSurface->GetDesc(&desc);
        pSurface->LockRect(&locked, 0, D3DLOCK_NOSYSLOCK);
        memset((mfxU8 *)locked.pBits, 100, desc.Height*locked.Pitch);  // Y plane
        memset((mfxU8 *)locked.pBits + desc.Height * locked.Pitch, 50, (desc.Height*locked.Pitch)/2);  // UV plane
        pSurface->UnlockRect();
#else
        // For now, just leave D3D11 surface data uninitialized
#endif
#endif
    }  


    // Initialize the Media SDK encoder
    sts = mfxENC.Init(&mfxEncParams);
    MSDK_IGNORE_MFX_STS(sts, MFX_WRN_PARTIAL_ACCELERATION);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);    

    // Retrieve video parameters selected by encoder.
    // - BufferSizeInKB parameter is required to set bit stream buffer size
    mfxVideoParam par;
    memset(&par, 0, sizeof(par));
    sts = mfxENC.GetVideoParam(&par);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); 

    // Prepare Media SDK bit stream buffer
    mfxBitstream mfxBS; 
    memset(&mfxBS, 0, sizeof(mfxBS));
    mfxBS.MaxLength = par.mfx.BufferSizeInKB * 1000;
    mfxBS.Data = new mfxU8[mfxBS.MaxLength];
    MSDK_CHECK_POINTER(mfxBS.Data, MFX_ERR_MEMORY_ALLOC);


    // ===================================
    // Start encoding the frames
    //
 
#ifdef ENABLE_BENCHMARK
    LARGE_INTEGER tStart, tEnd;
    QueryPerformanceFrequency(&tStart);
    double freq = (double)tStart.QuadPart;
    QueryPerformanceCounter(&tStart);
#endif

    int nEncSurfIdx = 0;
    mfxSyncPoint syncp;
    mfxU32 nFrame = 0;

    //
    // Stage 1: Main encoding loop
    //
    while (MFX_ERR_NONE <= sts || MFX_ERR_MORE_DATA == sts)        
    {        
        nEncSurfIdx = GetFreeSurfaceIndex(pmfxSurfaces, nEncSurfNum); // Find free frame surface  
        if (MFX_ERR_NOT_FOUND == nEncSurfIdx)
            return MFX_ERR_MEMORY_ALLOC;

        // Surface locking required when read/write D3D surfaces
        sts = mfxAllocator.Lock(mfxAllocator.pthis, pmfxSurfaces[nEncSurfIdx]->Data.MemId, &(pmfxSurfaces[nEncSurfIdx]->Data));
        MSDK_BREAK_ON_ERROR(sts);

        sts = LoadRawFrame(pmfxSurfaces[nEncSurfIdx], fSource);
        MSDK_BREAK_ON_ERROR(sts);

        sts = mfxAllocator.Unlock(mfxAllocator.pthis, pmfxSurfaces[nEncSurfIdx]->Data.MemId, &(pmfxSurfaces[nEncSurfIdx]->Data));
        MSDK_BREAK_ON_ERROR(sts);
                   
        for (;;)
        {    
            // Encode a frame asychronously (returns immediately)
            sts = mfxENC.EncodeFrameAsync(NULL, pmfxSurfaces[nEncSurfIdx], &mfxBS, &syncp); 
           
            if (MFX_ERR_NONE < sts && !syncp) // Repeat the call if warning and no output
            {
                if (MFX_WRN_DEVICE_BUSY == sts)                
                    Sleep(1); // Wait if device is busy, then repeat the same call            
            }
            else if (MFX_ERR_NONE < sts && syncp)                 
            {
                sts = MFX_ERR_NONE; // Ignore warnings if output is available  
                break;
            }
            else if (MFX_ERR_NOT_ENOUGH_BUFFER == sts)
            {
                // Allocate more bitstream buffer memory here if needed...
                break;                
            }
            else
                break;
        }  

        if(MFX_ERR_NONE == sts)
        {
            sts = mfxSession.SyncOperation(syncp, 60000); // Synchronize. Wait until encoded frame is ready
            MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

            sts = WriteBitStreamFrame(&mfxBS, fSink);
            MSDK_BREAK_ON_ERROR(sts);

            ++nFrame;
#ifdef ENABLE_OUTPUT
            printf("Frame number: %d\r", nFrame);
#endif
        }
    }

    // MFX_ERR_MORE_DATA means that the input file has ended, need to go to buffering loop, exit in case of other errors
    MSDK_IGNORE_MFX_STS(sts, MFX_ERR_MORE_DATA);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
    
    //
    // Stage 2: Retrieve the buffered encoded frames
    //
    while (MFX_ERR_NONE <= sts)
    {       
        for (;;)
        {                
            // Encode a frame asychronously (returns immediately)
            sts = mfxENC.EncodeFrameAsync(NULL, NULL, &mfxBS, &syncp);  

            if (MFX_ERR_NONE < sts && !syncp) // Repeat the call if warning and no output
            {
                if (MFX_WRN_DEVICE_BUSY == sts)                
                    Sleep(1); // Wait if device is busy, then repeat the same call                 
            }
            else if (MFX_ERR_NONE < sts && syncp)                 
            {
                sts = MFX_ERR_NONE; // Ignore warnings if output is available 
                break;
            }
            else
                break;
        }            

        if(MFX_ERR_NONE == sts)
        {
            sts = mfxSession.SyncOperation(syncp, 60000); // Synchronize. Wait until encoded frame is ready
            MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

            sts = WriteBitStreamFrame(&mfxBS, fSink);
            MSDK_BREAK_ON_ERROR(sts);

            ++nFrame;
#ifdef ENABLE_OUTPUT
            printf("Frame number: %d\r", nFrame);
#endif
        }
    }    

    // MFX_ERR_MORE_DATA indicates that there are no more buffered frames, exit in case of other errors
    MSDK_IGNORE_MFX_STS(sts, MFX_ERR_MORE_DATA);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

#ifdef ENABLE_BENCHMARK
    QueryPerformanceCounter(&tEnd);
    double duration = ((double)tEnd.QuadPart - (double)tStart.QuadPart)  / freq;
    printf("\nExecution time: %3.2fs (%3.2ffps)\n", duration, nFrame/duration);
#endif

    // ===================================================================
    // Clean up resources
    //  - It is recommended to close Media SDK components first, before releasing allocated surfaces, since
    //    some surfaces may still be locked by internal Media SDK resources.
    
    mfxENC.Close();
    // mfxSession closed automatically on destruction

    for (int i = 0; i < nEncSurfNum; i++)
        delete pmfxSurfaces[i];
    MSDK_SAFE_DELETE_ARRAY(pmfxSurfaces);
    MSDK_SAFE_DELETE_ARRAY(mfxBS.Data);

    fclose(fSource);
    fclose(fSink);

    CleanupHWDevice();

    return 0;
}
Exemplo n.º 15
-1
int main()
{
    mfxStatus sts = MFX_ERR_NONE;

    // =====================================================================
    // Intel Media SDK decode pipeline setup
    // - In this example we are decoding an AVC (H.264) stream
    // - For simplistic memory management, system memory surfaces are used to store the decoded frames
    //   (Note that when using HW acceleration D3D surfaces are prefered, for better performance)
    //
    //  - VPP used to post process (resize) the frame
    //

    // Open input H.264 elementary stream (ES) file
    FILE* fSource;
    fopen_s(&fSource, "bbb1920x1080.264", "rb");
    MSDK_CHECK_POINTER(fSource, MFX_ERR_NULL_PTR);

    // Create output YUV file
    FILE* fSink;
    fopen_s(&fSink, "dectest_960x540.yuv", "wb");
    MSDK_CHECK_POINTER(fSink, MFX_ERR_NULL_PTR);

    // Initialize Media SDK session
    // - MFX_IMPL_AUTO_ANY selects HW accelaration if available (on any adapter)
    // - Version 1.0 is selected for greatest backwards compatibility.
    //   If more recent API features are needed, change the version accordingly
    mfxIMPL impl = MFX_IMPL_AUTO_ANY;
    mfxVersion ver = {0, 1};
    MFXVideoSession mfxSession;
    sts = mfxSession.Init(impl, &ver);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

    // Create Media SDK decoder
    MFXVideoDECODE mfxDEC(mfxSession);
    // Create Media SDK VPP component
    MFXVideoVPP mfxVPP(mfxSession);

    // Set required video parameters for decode
    // - In this example we are decoding an AVC (H.264) stream
    // - For simplistic memory management, system memory surfaces are used to store the decoded frames
    //   (Note that when using HW acceleration D3D surfaces are prefered, for better performance)
    mfxVideoParam mfxVideoParams;
    memset(&mfxVideoParams, 0, sizeof(mfxVideoParams));
    mfxVideoParams.mfx.CodecId = MFX_CODEC_AVC;
    mfxVideoParams.IOPattern = MFX_IOPATTERN_OUT_SYSTEM_MEMORY;

    // Prepare Media SDK bit stream buffer
    // - Arbitrary buffer size for this example
    mfxBitstream mfxBS;
    memset(&mfxBS, 0, sizeof(mfxBS));
    mfxBS.MaxLength = 1024 * 1024;
    mfxBS.Data = new mfxU8[mfxBS.MaxLength];
    MSDK_CHECK_POINTER(mfxBS.Data, MFX_ERR_MEMORY_ALLOC);

    // Read a chunk of data from stream file into bit stream buffer
    // - Parse bit stream, searching for header and fill video parameters structure
    // - Abort if bit stream header is not found in the first bit stream buffer chunk
    sts = ReadBitStreamData(&mfxBS, fSource);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

    sts = mfxDEC.DecodeHeader(&mfxBS, &mfxVideoParams);
    MSDK_IGNORE_MFX_STS(sts, MFX_WRN_PARTIAL_ACCELERATION);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);


    // Initialize VPP parameters
    // - For simplistic memory management, system memory surfaces are used to store the raw frames
    //   (Note that when using HW acceleration D3D surfaces are prefered, for better performance)
    mfxVideoParam VPPParams;
    memset(&VPPParams, 0, sizeof(VPPParams));
    // Input data
    VPPParams.vpp.In.FourCC         = MFX_FOURCC_NV12;
    VPPParams.vpp.In.ChromaFormat   = MFX_CHROMAFORMAT_YUV420;
    VPPParams.vpp.In.CropX          = 0;
    VPPParams.vpp.In.CropY          = 0;
    VPPParams.vpp.In.CropW          = mfxVideoParams.mfx.FrameInfo.CropW;
    VPPParams.vpp.In.CropH          = mfxVideoParams.mfx.FrameInfo.CropH;
    VPPParams.vpp.In.PicStruct      = MFX_PICSTRUCT_PROGRESSIVE;
    VPPParams.vpp.In.FrameRateExtN  = 30;
    VPPParams.vpp.In.FrameRateExtD  = 1;
    // width must be a multiple of 16
    // height must be a multiple of 16 in case of frame picture and a multiple of 32 in case of field picture
    VPPParams.vpp.In.Width  = MSDK_ALIGN16(VPPParams.vpp.In.CropW);
    VPPParams.vpp.In.Height = (MFX_PICSTRUCT_PROGRESSIVE == VPPParams.vpp.In.PicStruct)?
                              MSDK_ALIGN16(VPPParams.vpp.In.CropH) : MSDK_ALIGN32(VPPParams.vpp.In.CropH);
    // Output data
    VPPParams.vpp.Out.FourCC        = MFX_FOURCC_NV12;
    VPPParams.vpp.Out.ChromaFormat  = MFX_CHROMAFORMAT_YUV420;
    VPPParams.vpp.Out.CropX         = 0;
    VPPParams.vpp.Out.CropY         = 0;
    VPPParams.vpp.Out.CropW         = VPPParams.vpp.In.CropW/2;  // Resize to half size resolution
    VPPParams.vpp.Out.CropH         = VPPParams.vpp.In.CropH/2;
    VPPParams.vpp.Out.PicStruct     = MFX_PICSTRUCT_PROGRESSIVE;
    VPPParams.vpp.Out.FrameRateExtN = 30;
    VPPParams.vpp.Out.FrameRateExtD = 1;
    // width must be a multiple of 16
    // height must be a multiple of 16 in case of frame picture and a multiple of 32 in case of field picture
    VPPParams.vpp.Out.Width  = MSDK_ALIGN16(VPPParams.vpp.Out.CropW);
    VPPParams.vpp.Out.Height = (MFX_PICSTRUCT_PROGRESSIVE == VPPParams.vpp.Out.PicStruct)?
                               MSDK_ALIGN16(VPPParams.vpp.Out.CropH) : MSDK_ALIGN32(VPPParams.vpp.Out.CropH);

    VPPParams.IOPattern = MFX_IOPATTERN_IN_SYSTEM_MEMORY | MFX_IOPATTERN_OUT_SYSTEM_MEMORY;

    // Query number of required surfaces for decoder
    mfxFrameAllocRequest DecRequest;
    memset(&DecRequest, 0, sizeof(DecRequest));
    sts = mfxDEC.QueryIOSurf(&mfxVideoParams, &DecRequest);
    MSDK_IGNORE_MFX_STS(sts, MFX_WRN_PARTIAL_ACCELERATION);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

    // Query number of required surfaces for VPP
    mfxFrameAllocRequest VPPRequest[2];// [0] - in, [1] - out
    memset(&VPPRequest, 0, sizeof(mfxFrameAllocRequest)*2);
    sts = mfxVPP.QueryIOSurf(&VPPParams, VPPRequest);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);


    // Determine the required number of surfaces for decoder output (VPP input) and for VPP output
    mfxU16 nSurfNumDecVPP = DecRequest.NumFrameSuggested + VPPRequest[0].NumFrameSuggested;
    mfxU16 nSurfNumVPPOut = VPPRequest[1].NumFrameSuggested;


    // Allocate surfaces for decoder and VPP In
    // - Width and height of buffer must be aligned, a multiple of 32
    // - Frame surface array keeps pointers all surface planes and general frame info
    mfxU16 width = (mfxU16)MSDK_ALIGN32(DecRequest.Info.Width);
    mfxU16 height = (mfxU16)MSDK_ALIGN32(DecRequest.Info.Height);
    mfxU8  bitsPerPixel = 12;  // NV12 format is a 12 bits per pixel format
    mfxU32 surfaceSize = width * height * bitsPerPixel / 8;
    mfxU8* surfaceBuffers = (mfxU8 *)new mfxU8[surfaceSize * nSurfNumDecVPP];

    mfxFrameSurface1** pmfxSurfaces = new mfxFrameSurface1*[nSurfNumDecVPP];
    MSDK_CHECK_POINTER(pmfxSurfaces, MFX_ERR_MEMORY_ALLOC);
    for (int i = 0; i < nSurfNumDecVPP; i++)
    {
        pmfxSurfaces[i] = new mfxFrameSurface1;
        memset(pmfxSurfaces[i], 0, sizeof(mfxFrameSurface1));
        memcpy(&(pmfxSurfaces[i]->Info), &(mfxVideoParams.mfx.FrameInfo), sizeof(mfxFrameInfo));
        pmfxSurfaces[i]->Data.Y = &surfaceBuffers[surfaceSize * i];
        pmfxSurfaces[i]->Data.U = pmfxSurfaces[i]->Data.Y + width * height;
        pmfxSurfaces[i]->Data.V = pmfxSurfaces[i]->Data.U + 1;
        pmfxSurfaces[i]->Data.Pitch = width;
    }

    // Allocate surfaces for VPP Out
    // - Width and height of buffer must be aligned, a multiple of 32
    // - Frame surface array keeps pointers all surface planes and general frame info
    width = (mfxU16)MSDK_ALIGN32(VPPRequest[1].Info.Width);
    height = (mfxU16)MSDK_ALIGN32(VPPRequest[1].Info.Height);
    bitsPerPixel = 12;  // NV12 format is a 12 bits per pixel format
    surfaceSize = width * height * bitsPerPixel / 8;
    mfxU8* surfaceBuffers2 = (mfxU8 *)new mfxU8[surfaceSize * nSurfNumVPPOut];

    mfxFrameSurface1** pmfxSurfaces2 = new mfxFrameSurface1*[nSurfNumVPPOut];
    MSDK_CHECK_POINTER(pmfxSurfaces2, MFX_ERR_MEMORY_ALLOC);
    for (int i = 0; i < nSurfNumVPPOut; i++)
    {
        pmfxSurfaces2[i] = new mfxFrameSurface1;
        memset(pmfxSurfaces2[i], 0, sizeof(mfxFrameSurface1));
        memcpy(&(pmfxSurfaces2[i]->Info), &(VPPParams.vpp.Out), sizeof(mfxFrameInfo));
        pmfxSurfaces2[i]->Data.Y = &surfaceBuffers[surfaceSize * i];
        pmfxSurfaces2[i]->Data.U = pmfxSurfaces2[i]->Data.Y + width * height;
        pmfxSurfaces2[i]->Data.V = pmfxSurfaces2[i]->Data.U + 1;
        pmfxSurfaces2[i]->Data.Pitch = width;
    }

    // Initialize the Media SDK decoder
    sts = mfxDEC.Init(&mfxVideoParams);
    MSDK_IGNORE_MFX_STS(sts, MFX_WRN_PARTIAL_ACCELERATION);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

    // Initialize Media SDK VPP
    sts = mfxVPP.Init(&VPPParams);
    MSDK_IGNORE_MFX_STS(sts, MFX_WRN_PARTIAL_ACCELERATION);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);


    // ===============================================================
    // Start decoding the frames from the stream
    //

#ifdef ENABLE_BENCHMARK
    LARGE_INTEGER tStart, tEnd;
    QueryPerformanceFrequency(&tStart);
    double freq = (double)tStart.QuadPart;
    QueryPerformanceCounter(&tStart);
#endif

    mfxSyncPoint syncpD;
    mfxSyncPoint syncpV;
    mfxFrameSurface1* pmfxOutSurface = NULL;
    int nIndex = 0;
    int nIndex2 = 0;
    mfxU32 nFrame = 0;

    //
    // Stage 1: Main decoding loop
    //
    while (MFX_ERR_NONE <= sts || MFX_ERR_MORE_DATA == sts || MFX_ERR_MORE_SURFACE == sts)
    {
        if (MFX_WRN_DEVICE_BUSY == sts)
            Sleep(1); // Wait if device is busy, then repeat the same call to DecodeFrameAsync

        if (MFX_ERR_MORE_DATA == sts)
        {
            sts = ReadBitStreamData(&mfxBS, fSource); // Read more data into input bit stream
            MSDK_BREAK_ON_ERROR(sts);
        }

        if (MFX_ERR_MORE_SURFACE == sts || MFX_ERR_NONE == sts)
        {
            nIndex = GetFreeSurfaceIndex(pmfxSurfaces, nSurfNumDecVPP); // Find free frame surface
            if (MFX_ERR_NOT_FOUND == nIndex)
                return MFX_ERR_MEMORY_ALLOC;
        }

        // Decode a frame asychronously (returns immediately)
        sts = mfxDEC.DecodeFrameAsync(&mfxBS, pmfxSurfaces[nIndex], &pmfxOutSurface, &syncpD);

        // Ignore warnings if output is available,
        // if no output and no action required just repeat the DecodeFrameAsync call
        if (MFX_ERR_NONE < sts && syncpD)
            sts = MFX_ERR_NONE;


        if (MFX_ERR_NONE == sts)
        {
            nIndex2 = GetFreeSurfaceIndex(pmfxSurfaces2, nSurfNumVPPOut); // Find free frame surface
            if (MFX_ERR_NOT_FOUND == nIndex)
                return MFX_ERR_MEMORY_ALLOC;

            for (;;)
            {
                // Process a frame asychronously (returns immediately)
                sts = mfxVPP.RunFrameVPPAsync(pmfxOutSurface, pmfxSurfaces2[nIndex2], NULL, &syncpV);

                if (MFX_ERR_NONE < sts && !syncpV) // repeat the call if warning and no output
                {
                    if (MFX_WRN_DEVICE_BUSY == sts)
                        Sleep(1); // wait if device is busy
                }
                else if (MFX_ERR_NONE < sts && syncpV)
                {
                    sts = MFX_ERR_NONE; // ignore warnings if output is available
                    break;
                }
                else
                    break; // not a warning
            }

            // VPP needs more data, let decoder decode another frame as input
            if (MFX_ERR_MORE_DATA == sts)
            {
                continue;
            }
            else if (MFX_ERR_MORE_SURFACE == sts)
            {
                // Not relevant for the illustrated workload! Therefore not handled.
                // Relevant for cases when VPP produces more frames at output than consumes at input. E.g. framerate conversion 30 fps -> 60 fps
                break;
            }
            else
                MSDK_BREAK_ON_ERROR(sts);
        }


        if (MFX_ERR_NONE == sts)
            sts = mfxSession.SyncOperation(syncpV, 60000); // Synchronize. Wait until decoded frame is ready

        if (MFX_ERR_NONE == sts)
        {
            ++nFrame;
#ifdef ENABLE_OUTPUT
            sts = WriteRawFrame(pmfxSurfaces2[nIndex2], fSink);
            MSDK_BREAK_ON_ERROR(sts);

            printf("Frame number: %d\r", nFrame);
#endif
        }
    }

    // MFX_ERR_MORE_DATA means that file has ended, need to go to buffering loop, exit in case of other errors
    MSDK_IGNORE_MFX_STS(sts, MFX_ERR_MORE_DATA);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

    //
    // Stage 2: Retrieve the buffered decoded frames
    //
    while (MFX_ERR_NONE <= sts || MFX_ERR_MORE_SURFACE == sts)
    {
        if (MFX_WRN_DEVICE_BUSY == sts)
            Sleep(1); // Wait if device is busy, then repeat the same call to DecodeFrameAsync

        nIndex = GetFreeSurfaceIndex(pmfxSurfaces, nSurfNumDecVPP); // Find free frame surface
        if (MFX_ERR_NOT_FOUND == nIndex)
            return MFX_ERR_MEMORY_ALLOC;

        // Decode a frame asychronously (returns immediately)
        sts = mfxDEC.DecodeFrameAsync(NULL, pmfxSurfaces[nIndex], &pmfxOutSurface, &syncpD);

        // Ignore warnings if output is available,
        // if no output and no action required just repeat the DecodeFrameAsync call
        if (MFX_ERR_NONE < sts && syncpD)
            sts = MFX_ERR_NONE;


        if (MFX_ERR_NONE == sts)
        {
            nIndex2 = GetFreeSurfaceIndex(pmfxSurfaces2, nSurfNumVPPOut); // Find free frame surface
            if (MFX_ERR_NOT_FOUND == nIndex)
                return MFX_ERR_MEMORY_ALLOC;

            for (;;)
            {
                // Process a frame asychronously (returns immediately)
                sts = mfxVPP.RunFrameVPPAsync(pmfxOutSurface, pmfxSurfaces2[nIndex2], NULL, &syncpV);

                if (MFX_ERR_NONE < sts && !syncpV) // repeat the call if warning and no output
                {
                    if (MFX_WRN_DEVICE_BUSY == sts)
                        Sleep(1); // wait if device is busy
                }
                else if (MFX_ERR_NONE < sts && syncpV)
                {
                    sts = MFX_ERR_NONE; // ignore warnings if output is available
                    break;
                }
                else
                    break; // not a warning
            }

            // VPP needs more data, let decoder decode another frame as input
            if (MFX_ERR_MORE_DATA == sts)
            {
                continue;
            }
            else if (MFX_ERR_MORE_SURFACE == sts)
            {
                // Not relevant for the illustrated workload! Therefore not handled.
                // Relevant for cases when VPP produces more frames at output than consumes at input. E.g. framerate conversion 30 fps -> 60 fps
                break;
            }
            else
                MSDK_BREAK_ON_ERROR(sts);
        }


        if (MFX_ERR_NONE == sts)
            sts = mfxSession.SyncOperation(syncpV, 60000); // Synchronize. Waits until decoded frame is ready

        if (MFX_ERR_NONE == sts)
        {
            ++nFrame;
#ifdef ENABLE_OUTPUT
            sts = WriteRawFrame(pmfxSurfaces2[nIndex2], fSink);
            MSDK_BREAK_ON_ERROR(sts);

            printf("Frame number: %d\r", nFrame);
#endif
        }
    }

    // MFX_ERR_MORE_DATA means that decoder is done with buffered frames, need to go to VPP buffering loop, exit in case of other errors
    MSDK_IGNORE_MFX_STS(sts, MFX_ERR_MORE_DATA);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

    //
    // Stage 3: Retrieve the buffered VPP frames
    //
    while (MFX_ERR_NONE <= sts)
    {
        nIndex2 = GetFreeSurfaceIndex(pmfxSurfaces2, nSurfNumVPPOut); // Find free frame surface
        if (MFX_ERR_NOT_FOUND == nIndex2)
            return MFX_ERR_MEMORY_ALLOC;

        // Process a frame asychronously (returns immediately)
        sts = mfxVPP.RunFrameVPPAsync(NULL, pmfxSurfaces2[nIndex2], NULL, &syncpV);
        MSDK_IGNORE_MFX_STS(sts, MFX_ERR_MORE_SURFACE);
        MSDK_BREAK_ON_ERROR(sts);

        sts = mfxSession.SyncOperation(syncpV, 60000); // Synchronize. Wait until frame processing is ready
        MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

        ++nFrame;
#ifdef ENABLE_OUTPUT
        sts = WriteRawFrame(pmfxSurfaces2[nIndex2], fSink);
        MSDK_BREAK_ON_ERROR(sts);

        printf("Frame number: %d\r", nFrame);
#endif
    }

    // MFX_ERR_MORE_DATA indicates that all buffers has been fetched, exit in case of other errors
    MSDK_IGNORE_MFX_STS(sts, MFX_ERR_MORE_DATA);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

#ifdef ENABLE_BENCHMARK
    QueryPerformanceCounter(&tEnd);
    double duration = ((double)tEnd.QuadPart - (double)tStart.QuadPart)  / freq;
    printf("\nExecution time: %3.2fs (%3.2ffps)\n", duration, nFrame/duration);
#endif

    // ===================================================================
    // Clean up resources
    //  - It is recommended to close Media SDK components first, before releasing allocated surfaces, since
    //    some surfaces may still be locked by internal Media SDK resources.

    mfxDEC.Close();
    mfxVPP.Close();
    // mfxSession closed automatically on destruction

    for (int i = 0; i < nSurfNumDecVPP; i++)
        delete pmfxSurfaces[i];
    for (int i = 0; i < nSurfNumVPPOut; i++)
        delete pmfxSurfaces2[i];
    MSDK_SAFE_DELETE_ARRAY(pmfxSurfaces);
    MSDK_SAFE_DELETE_ARRAY(pmfxSurfaces2);
    MSDK_SAFE_DELETE_ARRAY(surfaceBuffers);
    MSDK_SAFE_DELETE_ARRAY(surfaceBuffers2);
    MSDK_SAFE_DELETE_ARRAY(mfxBS.Data);

    fclose(fSource);
    fclose(fSink);

    return 0;
}