static enum AVPixelFormat ADM_LIBVA_getFormat(struct AVCodecContext *avctx,  const enum AVPixelFormat *fmt)
{
    int i;
    ADM_info("[LIBVA]: GetFormat\n");
    AVCodecID id=AV_CODEC_ID_NONE;
    AVPixelFormat c;
    AVPixelFormat outPix;
    for(i=0;fmt[i]!=AV_PIX_FMT_NONE;i++)
    {
        c=fmt[i];
        char name[300]={0};
        av_get_pix_fmt_string(name,sizeof(name),c);
        ADM_info("[LIBVA]: Evaluating PIX_FMT %d,%s\n",c,name);  
        av_get_codec_tag_string(name,sizeof(name),avctx->codec_id);
        ADM_info("\t  Evaluating codec %d,%s\n",avctx->codec_id,name);  
        
        if(c!=AV_PIX_FMT_VAAPI_VLD) continue;
#define FMT_V_CHECK(x,y)      case AV_CODEC_ID_##x:   outPix=AV_PIX_FMT_VAAPI_VLD;id=avctx->codec_id;break;
        
        
        switch(avctx->codec_id)  //AV_CODEC_ID_H265
        {
            FMT_V_CHECK(H264,H264)
            FMT_V_CHECK(H265,H265)
            FMT_V_CHECK(MPEG1VIDEO,MPEG1)
            FMT_V_CHECK(MPEG2VIDEO,MPEG2)
            FMT_V_CHECK(WMV3,WMV3)
            FMT_V_CHECK(VC1,VC1)
            FMT_V_CHECK(VP9,VP9)
            default: 
                ADM_info("No hw support for format %d\n",avctx->codec_id);
                continue;
                break;
        }
        break;
    }
    if(id==AV_CODEC_ID_NONE)
    {
        
        return AV_PIX_FMT_NONE;
    }
    // Finish intialization of LIBVA decoder
    const AVHWAccel *accel=ADM_acceleratedDecoderFF::parseHwAccel(outPix,id,AV_PIX_FMT_VAAPI_VLD);
    if(accel)
    {
        ADM_info("Found matching hw accelerator : %s\n",accel->name);
        ADM_info("Successfully setup hw accel\n");
        return AV_PIX_FMT_VAAPI_VLD;
    }
    return AV_PIX_FMT_NONE;
}
Example #2
0
bool Capture::allocateVideoDecoderData(AVPixelFormat pix_fmt)
{
	int ret = 0;

	if (pix_fmt != mDstPixFmt)
	{
		char buf[256];
		std::size_t found;

		mVideoStrFormat = std::string(av_get_pix_fmt_string(buf, 256, pix_fmt));
		found = mVideoStrFormat.find(' ');
		if (found != std::string::npos)
			mVideoStrFormat = mVideoStrFormat.substr(0, found); //delate inrelevant data

		mVideoDstFormat.assign(av_get_pix_fmt_string(buf, 256, mDstPixFmt));
		found = mVideoDstFormat.find(' ');
		if (found != std::string::npos)
			mVideoDstFormat = mVideoDstFormat.substr(0, found); //delate inrelevant data

		sgct::MessageHandler::instance()->print(sgct::MessageHandler::NOTIFY_INFO, "Creating video scaling context (%s->%s)\n", mVideoStrFormat.c_str(), mVideoDstFormat.c_str());

		//create context for frame convertion
		mVideoScaleContext = sws_getContext(mWidth, mHeight, pix_fmt,
			mWidth, mHeight, mDstPixFmt,
			SWS_FAST_BILINEAR, nullptr, nullptr, nullptr);
		if (!mVideoScaleContext)
		{
			sgct::MessageHandler::instance()->print(sgct::MessageHandler::NOTIFY_ERROR, "Could not allocate frame convertion context!\n");
			return false;
		}
	}

	mTempFrame = av_frame_alloc();
	if (!mTempFrame)
	{
		sgct::MessageHandler::instance()->print(sgct::MessageHandler::NOTIFY_ERROR, "Could not allocate temp frame data!\n");
		return false;
	}

	mTempFrame->width = mWidth;
	mTempFrame->height = mHeight;
	mTempFrame->format = mDstPixFmt;

	ret = av_image_alloc(mTempFrame->data, mTempFrame->linesize, mWidth, mHeight, mDstPixFmt, 1);
	if (ret < 0)
	{
		sgct::MessageHandler::instance()->print(sgct::MessageHandler::NOTIFY_ERROR, "Could not allocate temp frame buffer!\n");
		return false;
	}

	if (!mFrame)
		mFrame = av_frame_alloc();

	if (!mFrame)
	{
		sgct::MessageHandler::instance()->print(sgct::MessageHandler::NOTIFY_ERROR, "Could not allocate frame data!\n");
		return false;
	}

	return true;
}