Ejemplo n.º 1
0
bool VideoEncoder::AVCodecIsSupported(const QString& codec_name) {
	AVCodec *codec = avcodec_find_encoder_by_name(codec_name.toUtf8().constData());
	if(codec == NULL)
		return false;
	if(!av_codec_is_encoder(codec))
		return false;
	if(codec->type != AVMEDIA_TYPE_VIDEO)
		return false;
	for(unsigned int i = 0; i < SUPPORTED_PIXEL_FORMATS.size(); ++i) {
		if(AVCodecSupportsPixelFormat(codec, SUPPORTED_PIXEL_FORMATS[i].m_format))
			return true;
	}
	return false;
}
Ejemplo n.º 2
0
static struct codec_ent lookup_default(const char* const req,
	struct codec_ent* tbl, size_t nmemb, bool audio)
{
	struct codec_ent res = {.name = req};
	AVCodec** dst = audio ? &res.storage.audio.codec : &res.storage.video.codec;

	if (req){
/* make sure that if the user supplies a name already in the standard table,
 * that we get the same prefix setup function */
		for (int i = 0; i < nmemb; i++)
			if (tbl[i].name != NULL && (strcmp(req, tbl[i].name) == 0 ||
				strcmp(req, tbl[i].shortname) == 0) ){
				memcpy(&res, &tbl[i], sizeof(struct codec_ent));
				*dst = avcodec_find_encoder_by_name(res.name);
			}

/* if the codec specified is unknown (to us) then let
 * avcodec try and sort it up, return default setup */
		if (*dst == NULL){
			*dst = avcodec_find_encoder_by_name(req);
		}
	}

/* if the user didn't supply an explicit codec, or one was not found,
 * search the table for reasonable default */
	for (int i = 0; i < nmemb && *dst == NULL; i++)
		if (tbl[i].name != NULL && tbl[i].id == 0){
			memcpy(&res, &tbl[i], sizeof(struct codec_ent));
			*dst = avcodec_find_encoder_by_name(tbl[i].name);
		}
		else{
			memcpy(&res, &tbl[i], sizeof(struct codec_ent));
			*dst = avcodec_find_encoder(tbl[i].id);
		}

	return res;
}
Ejemplo n.º 3
0
bool VideoEncoderFFmpegPrivate::open()
{
    if (codec_name.isEmpty()) {
        // copy ctx from muxer
        AVCodec *codec = avcodec_find_decoder(avctx->codec_id);
        AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false);
        return true;
    }
    AVCodec *codec = avcodec_find_encoder_by_name(codec_name.toUtf8().constData());
    if (!codec) {
        qWarning() << "Can not find encoder for codec " << codec_name;
        return false;
    }
    if (avctx) {
        avcodec_free_context(&avctx);
        avctx = 0;
    }
    avctx = avcodec_alloc_context3(codec);
    qDebug("tbc: %f", av_q2d(avctx->time_base));
    avctx->width = width; // coded_width works, why?
    avctx->height = height;
    avctx->pix_fmt = QTAV_PIX_FMT_C(YUV420P);
    avctx->time_base = av_d2q(1.0/frame_rate, frame_rate*1001.0+2);
    //avctx->max_b_frames = 3;//h264
    qDebug("2 tbc: %f=%d/%d", av_q2d(avctx->time_base), avctx->time_base.num, avctx->time_base.den);
    avctx->bit_rate = bit_rate;
    // Set Option
        AVDictionary *param = 0;
#if 0
        //H.264
        if(avctx->codec_id == QTAV_CODEC_ID(H264)) {
            av_dict_set(&param, "preset", "slow", 0);
            av_dict_set(&param, "tune", "zerolatency", 0);
            //av_dict_set(&param, "profile", "main", 0);
        }
        //H.265
        if(avctx->codec_id == AV_CODEC_ID_H265){
            av_dict_set(&param, "preset", "ultrafast", 0);
            av_dict_set(&param, "tune", "zero-latency", 0);
        }
#endif
    applyOptionsForContext();
    AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false);
    const int buffer_size = qMax<int>(qMax<int>(width*height*6+200, FF_MIN_BUFFER_SIZE), sizeof(AVPicture));//??
    buffer.resize(buffer_size);
    return true;
}
Ejemplo n.º 4
0
void 
audio_filter_process_init(vod_log_t* log)
{
	avcodec_register_all();
	avfilter_register_all();

	buffersrc_filter = avfilter_get_by_name(BUFFERSRC_FILTER_NAME);
	if (buffersrc_filter == NULL)
	{
		vod_log_error(VOD_LOG_WARN, log, 0,
			"audio_filter_process_init: failed to get buffer source filter, audio filtering is disabled");
		return;
	}

	buffersink_filter = avfilter_get_by_name(BUFFERSINK_FILTER_NAME);
	if (buffersink_filter == NULL)
	{
		vod_log_error(VOD_LOG_WARN, log, 0,
			"audio_filter_process_init: failed to get buffer sink filter, audio filtering is disabled");
		return;
	}

	decoder_codec = avcodec_find_decoder(AV_CODEC_ID_AAC);
	if (decoder_codec == NULL)
	{
		vod_log_error(VOD_LOG_WARN, log, 0,
			"audio_filter_process_init: failed to get AAC decoder, audio filtering is disabled");
		return;
	}

	encoder_codec = avcodec_find_encoder_by_name(AAC_ENCODER_NAME);
	if (encoder_codec == NULL)
	{
		vod_log_error(VOD_LOG_WARN, log, 0,
			"audio_filter_process_init: failed to get AAC encoder, audio filtering is disabled. recompile libavcodec with libfdk_aac to enable it");
		return;
	}

	if (!audio_filter_is_format_supported(encoder_codec, ENCODER_INPUT_SAMPLE_FORMAT))
	{
		vod_log_error(VOD_LOG_WARN, log, 0,
			"audio_filter_process_init: encoder does not support the required input format, audio filtering is disabled");
		return;
	}

	initialized = TRUE;
}
Ejemplo n.º 5
0
static int af_open(af_instance_t* af){

    af_ac3enc_t *s = calloc(1,sizeof(af_ac3enc_t));
    af->control=control;
    af->uninit=uninit;
    af->play=play;
    af->mul=1;
    af->data=calloc(1,sizeof(af_data_t));
    af->setup=s;

    s->lavc_acodec = avcodec_find_encoder_by_name("ac3");
    if (!s->lavc_acodec) {
        mp_tmsg(MSGT_AFILTER, MSGL_ERR, "Audio LAVC, couldn't find encoder for codec %s.\n", "ac3");
        return AF_ERROR;
    }

    s->lavc_actx = avcodec_alloc_context3(s->lavc_acodec);
    if (!s->lavc_actx) {
        mp_tmsg(MSGT_AFILTER, MSGL_ERR, "Audio LAVC, couldn't allocate context!\n");
        return AF_ERROR;
    }
    const enum AVSampleFormat *fmts = s->lavc_acodec->sample_fmts;
    for (int i = 0; ; i++) {
        if (fmts[i] == AV_SAMPLE_FMT_NONE) {
            mp_msg(MSGT_AFILTER, MSGL_ERR, "Audio LAVC, encoder doesn't "
                   "support expected sample formats!\n");
            return AF_ERROR;
        } else if (fmts[i] == AV_SAMPLE_FMT_S16) {
            s->in_sampleformat = AF_FORMAT_S16_NE;
            s->lavc_actx->sample_fmt = fmts[i];
            break;
        } else if (fmts[i] == AV_SAMPLE_FMT_FLT) {
            s->in_sampleformat = AF_FORMAT_FLOAT_NE;
            s->lavc_actx->sample_fmt = fmts[i];
            break;
        }
    }
    char buf[100];
    mp_msg(MSGT_AFILTER, MSGL_V, "[af_lavcac3enc]: in sample format: %s\n",
           af_fmt2str(s->in_sampleformat, buf, 100));
    s->pending_data_size = AF_NCH * AC3_FRAME_SIZE *
        af_fmt2bits(s->in_sampleformat) / 8;
    s->pending_data = malloc(s->pending_data_size);

    return AF_OK;
}
Ejemplo n.º 6
0
bool FFmpegEncoderSetVideo(struct FFmpegEncoder* encoder, const char* vcodec, unsigned vbr) {
	static const struct {
		enum AVPixelFormat format;
		int priority;
	} priorities[] = {
		{ AV_PIX_FMT_RGB555, 0 },
		{ AV_PIX_FMT_BGR555, 0 },
		{ AV_PIX_FMT_RGB565, 1 },
		{ AV_PIX_FMT_BGR565, 1 },
		{ AV_PIX_FMT_RGB24, 2 },
		{ AV_PIX_FMT_BGR24, 2 },
#ifndef USE_LIBAV
		{ AV_PIX_FMT_BGR0, 3 },
		{ AV_PIX_FMT_RGB0, 3 },
		{ AV_PIX_FMT_0BGR, 3 },
		{ AV_PIX_FMT_0RGB, 3 },
#endif
		{ AV_PIX_FMT_YUV422P, 4 },
		{ AV_PIX_FMT_YUV444P, 5 },
		{ AV_PIX_FMT_YUV420P, 6 }
	};
	AVCodec* codec = avcodec_find_encoder_by_name(vcodec);
	if (!codec) {
		return false;
	}

	size_t i;
	size_t j;
	int priority = INT_MAX;
	encoder->pixFormat = AV_PIX_FMT_NONE;
	for (i = 0; codec->pix_fmts[i] != AV_PIX_FMT_NONE; ++i) {
		for (j = 0; j < sizeof(priorities) / sizeof(*priorities); ++j) {
			if (codec->pix_fmts[i] == priorities[j].format && priority > priorities[j].priority) {
				priority = priorities[j].priority;
				encoder->pixFormat = codec->pix_fmts[i];
			}
		}
	}
	if (encoder->pixFormat == AV_PIX_FMT_NONE) {
		return false;
	}
	encoder->videoCodec = vcodec;
	encoder->videoBitrate = vbr;
	return true;
}
Ejemplo n.º 7
0
static bool ffemu_init_audio(struct ff_audio_info *audio, struct ffemu_params *param)
{
   AVCodec *codec = avcodec_find_encoder_by_name("flac");
   if (!codec)
      return false;

   audio->encoder = codec;

   // FFmpeg just loves to deprecate stuff :)
#ifdef HAVE_FFMPEG_ALLOC_CONTEXT3
   audio->codec = avcodec_alloc_context3(codec);
#else
   audio->codec = avcodec_alloc_context();
   avcodec_get_context_defaults(audio->codec);
#endif

   audio->codec->sample_rate = (int)roundf(param->samplerate);
   audio->codec->time_base = av_d2q(1.0 / param->samplerate, 1000000);
   audio->codec->channels = param->channels;
   audio->codec->sample_fmt = AV_SAMPLE_FMT_S16;

#ifdef HAVE_FFMPEG_AVCODEC_OPEN2
   if (avcodec_open2(audio->codec, codec, NULL) != 0)
#else
   if (avcodec_open(audio->codec, codec) != 0)
#endif
   {
      return false;
   }

   audio->buffer = (int16_t*)av_malloc(
         audio->codec->frame_size *
         audio->codec->channels *
         sizeof(int16_t));

   if (!audio->buffer)
      return false;

   audio->outbuf_size = FF_MIN_BUFFER_SIZE;
   audio->outbuf = (uint8_t*)av_malloc(audio->outbuf_size);
   if (!audio->outbuf)
      return false;

   return true;
}
Ejemplo n.º 8
0
// ---------------------------------------------------------------------------------
static PyObject * Codec_GetID( PyACodecObject* obj, PyObject *args)
{
	AVCodec *p;
	char *sName=NULL;
	int i= 0;

	if (!PyArg_ParseTuple(args, "s", &sName ))
		return NULL;

	p = avcodec_find_encoder_by_name(sName);
	if( !p )
		p = avcodec_find_decoder_by_name(sName);

	if (p)
		i= p->id;

	return PyInt_FromLong( i );
}
Ejemplo n.º 9
0
std::vector<std::string> getPixelFormats( const std::string& videoCodecName )
{
    std::vector<std::string> pixelFormats;

    // all video codec concerned
    if( videoCodecName == "" )
    {
        const AVPixFmtDescriptor* pixFmtDesc = NULL;

#if LIBAVUTIL_VERSION_INT < AV_VERSION_INT( 51, 44, 0 )
        for( int pix_fmt = 0; pix_fmt < PIX_FMT_NB; ++pix_fmt )
            pixFmtDesc = &av_pix_fmt_descriptors[pix_fmt];
#else
        while( ( pixFmtDesc = av_pix_fmt_desc_next( pixFmtDesc ) ) != NULL )
#endif
        {
            if( ! pixFmtDesc->name )
                continue;
            pixelFormats.push_back( std::string( pixFmtDesc->name ) );
        }
    }
    // specific video codec
    else
    {
        const AVCodec* videoCodec = avcodec_find_encoder_by_name( videoCodecName.c_str() );
        if( videoCodec && videoCodec->pix_fmts != NULL )
        {
            size_t pix_fmt = 0;
            while( videoCodec->pix_fmts[pix_fmt] != -1 )
            {
#if LIBAVUTIL_VERSION_INT < AV_VERSION_INT( 51, 44, 0 )
                const AVPixFmtDescriptor* pix_desc = &av_pix_fmt_descriptors[ videoCodec->pix_fmts[pix_fmt] ];
#else
                const AVPixFmtDescriptor* pix_desc = av_pix_fmt_desc_get( videoCodec->pix_fmts[pix_fmt] );
#endif
                if( ! pix_desc || ! pix_desc->name )
                    continue;
                pixelFormats.push_back( std::string( pix_desc->name ) );
                ++pix_fmt;
            }
        }
    }
    return pixelFormats;
}
Ejemplo n.º 10
0
QString MediaSink::codecType(const QString &codec)
{
    AVCodec *avCodec = avcodec_find_encoder_by_name(codec.toStdString().c_str());

    if (!avCodec)
        return QString();

    switch (avCodec->type) {
    case AVMEDIA_TYPE_AUDIO:
        return QString("audio/x-raw");
    case AVMEDIA_TYPE_VIDEO:
        return QString("video/x-raw");
    case AVMEDIA_TYPE_SUBTITLE:
        return QString("text/x-raw");
    default:
        break;
    }

    return QString();
}
Ejemplo n.º 11
0
static int vf_open(vf_instance_t *vf, char *args){
    int p_quality=0;
    float p_fps=0;

    vf->config=config;
    vf->put_image=put_image;
    vf->query_format=query_format;
    vf->priv=malloc(sizeof(struct vf_priv_s));
    memset(vf->priv,0,sizeof(struct vf_priv_s));

    init_avcodec();

    vf->priv->codec = avcodec_find_encoder_by_name("mpeg1video");
    if (!vf->priv->codec) {
	mp_msg(MSGT_MENCODER,MSGL_ERR,MSGTR_MissingLAVCcodec, "mpeg1video");
	return 0;
    }

    vf->priv->context=avcodec_alloc_context3(vf->priv->codec);
    vf->priv->pic = av_frame_alloc();

    // TODO: parse args ->
    if(args) sscanf(args, "%d:%f", &p_quality, &p_fps);

    if(p_quality<32){
	// fixed qscale
	lavc_venc_context.flags = CODEC_FLAG_QSCALE;
	lavc_venc_context.global_quality =
	vf->priv->pic->quality = (int)(FF_QP2LAMBDA * ((p_quality<1) ? 1 : p_quality) + 0.5);
    } else {
	// fixed bitrate (in kbits)
	lavc_venc_context.bit_rate = 1000*p_quality;
    }
    lavc_venc_context.time_base.num = 1000*1001;
    lavc_venc_context.time_base.den = (p_fps<1.0) ? 1000*1001*25 : (p_fps * lavc_venc_context.time_base.num);
    lavc_venc_context.gop_size = 0; // I-only
    lavc_venc_context.pix_fmt= AV_PIX_FMT_YUV420P;

    return 1;
}
Ejemplo n.º 12
0
static void *vaapi_create(obs_data_t *settings, obs_encoder_t *encoder)
{
	struct vaapi_encoder *enc;
	avcodec_register_all();

	enc          = bzalloc(sizeof(*enc));
	enc->encoder = encoder;

	int vaapi_codec = (int)obs_data_get_int(settings, "vaapi_codec");

	if (vaapi_codec == AV_CODEC_ID_H264) {
		enc->vaapi = avcodec_find_encoder_by_name("h264_vaapi");
	}

	enc->first_packet = true;

	blog(LOG_INFO, "---------------------------------");

	if (!enc->vaapi) {
		warn("Couldn't find encoder");
		goto fail;
	}

	enc->context = avcodec_alloc_context3(enc->vaapi);
	if (!enc->context) {
		warn("Failed to create codec context");
		goto fail;
	}

	if (!vaapi_update(enc, settings))
		goto fail;

	return enc;

fail:
	vaapi_destroy(enc);
	return NULL;
}
Ejemplo n.º 13
0
static int ffserver_set_codec(AVCodecContext *ctx, const char *codec_name,
                              FFServerConfig *config)
{
    int ret;
    AVCodec *codec = avcodec_find_encoder_by_name(codec_name);
    if (!codec || codec->type != ctx->codec_type) {
        report_config_error(config->filename, config->line_num, AV_LOG_ERROR,
                            &config->errors,
                            "Invalid codec name: '%s'\n", codec_name);
        return 0;
    }
    if (ctx->codec_id == AV_CODEC_ID_NONE && !ctx->priv_data) {
        if ((ret = avcodec_get_context_defaults3(ctx, codec)) < 0)
            return ret;
        ctx->codec = codec;
    }
    if (ctx->codec_id != codec->id)
        report_config_error(config->filename, config->line_num, AV_LOG_ERROR,
                            &config->errors,
                            "Inconsistent configuration: trying to set '%s' "
                            "codec option, but '%s' codec is used previously\n",
                            codec_name, avcodec_get_name(ctx->codec_id));
    return 0;
}
Ejemplo n.º 14
0
void
audio_encoder_process_init(vod_log_t* log)
{
	#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(58, 18, 100)
		avcodec_register_all();
	#endif

	encoder_codec = avcodec_find_encoder_by_name(AAC_ENCODER_NAME);
	if (encoder_codec == NULL)
	{
		vod_log_error(VOD_LOG_WARN, log, 0,
			"audio_encoder_process_init: failed to get AAC encoder, audio encoding is disabled. recompile libavcodec with libfdk_aac to enable it");
		return;
	}

	if (!audio_encoder_is_format_supported(encoder_codec, AUDIO_ENCODER_INPUT_SAMPLE_FORMAT))
	{
		vod_log_error(VOD_LOG_WARN, log, 0,
			"audio_encoder_process_init: encoder does not support the required input format, audio encoding is disabled");
		return;
	}

	initialized = TRUE;
}
Ejemplo n.º 15
0
// ---------------------------------------------------------------------------------
static PyObject * GetCodecID( PyObject* obj, PyObject *args)
{
	AVCodec *p;
	PyObject* cRes=NULL;
	char *sName=NULL;

	if (!PyArg_ParseTuple(args, "s", &sName ))
		return NULL;

	p = avcodec_find_encoder_by_name(sName);
	if( !p )
		p = avcodec_find_decoder_by_name(sName);

	if (p)
	{
		cRes = Py_BuildValue("i",p->id);
		return cRes;
	}
	else
	{
		PyErr_Format( g_cErr, "%s: no such codec exists", sName );
		return NULL;
	}
}
Ejemplo n.º 16
0
Archivo: raw.c Proyecto: AmesianX/pilot
int prepare(VideoOut_sV *video, const char *filename, const char *vcodec, const int width, const int height, const int bitrate,
             const unsigned int numerator, const unsigned int denominator)
{

    /* must be called before using avcodec lib */
    avcodec_init();

    video->frameNr = 0;
    video->errorMessage = NULL;
    video->filename = malloc(strlen(filename)+1);
    strcpy(video->filename, filename);

    /* initialize libavcodec, and register all codecs and formats */
    av_register_all();

    /* allocate the output media context */
    avformat_alloc_output_context2(&video->fc, NULL, NULL, filename);
    if (!video->fc) {
        printf("Could not deduce output format from file extension: using MPEG.\n");
        avformat_alloc_output_context2(&video->fc, NULL, "mpeg", filename);
    }
    if (!video->fc) {
        const char *s = "Could allocate the output context, even MPEG is not available.\n";
        fprintf(stderr, s);
        setErrorMessage(video, s);
        return 2;
    }
    video->format = video->fc->oformat;
    printf("Using format %s.\n", video->format->name);

    /* Use the given vcodec if it is not NULL */
    if (vcodec != NULL) {
        AVCodec *codec = avcodec_find_encoder_by_name(vcodec);
        if (codec == NULL) {
            char s[strlen(vcodec)+50];
            sprintf(s, "No codec available for %s.\n", vcodec);
            fprintf(stderr, s);
            setErrorMessage(video, s);
            return 2;
        }
        printf("Found codec: %s\n", codec->long_name);
        video->format->video_codec = codec->id;
    }

    /* add the audio and video streams using the default format codecs
       and initialize the codecs */
    video->streamV = NULL;
    if (video->format->video_codec != CODEC_ID_NONE) {

        video->streamV = av_new_stream(video->fc, 0);
        if (!video->streamV) {
            const char *s = "Could not allocate the video stream.\n";
            fprintf(stderr, s);
            setErrorMessage(video, s);
            return 2;
        }

        AVCodecContext *cc = video->streamV->codec;

        cc->codec_id = video->format->video_codec;
        cc->codec_type = AVMEDIA_TYPE_VIDEO;

        cc->bit_rate = bitrate;

        /* resolution must be a multiple of two */
        cc->width = width;
        cc->height = height;


        /* time base: this is the fundamental unit of time (in seconds) in terms
           of which frame timestamps are represented. for fixed-fps content,
           timebase should be 1/framerate and timestamp increments should be
           identically 1. */
        cc->time_base= (AVRational){numerator, denominator};
        cc->gop_size = 12; /* emit one intra frame every ten frames */


        cc->pix_fmt = PIX_FMT_YUV420P;
        if (cc->codec_id == CODEC_ID_MPEG2VIDEO || cc->codec_id == CODEC_ID_MPEG4) {
            /* just for testing, we also add B frames */
            cc->max_b_frames = 2;
        }
        if (cc->codec_id == CODEC_ID_MPEG1VIDEO){
            /* Needed to avoid using macroblocks in which some coeffs overflow.
               This does not happen with normal video, it just happens here as
               the motion of the chroma plane does not match the luma plane. */
            cc->mb_decision=2;
        }
        // some formats want stream headers to be separate
        if(video->fc->oformat->flags & AVFMT_GLOBALHEADER) {
            cc->flags |= CODEC_FLAG_GLOBAL_HEADER;
        }

        video->rgbConversionContext = sws_getContext(
                    cc->width, cc->height,
                    PIX_FMT_BGRA,
                    cc->width, cc->height,
                    cc->pix_fmt,
                    SWS_BICUBIC, NULL, NULL, NULL);

        // One line size for each plane. RGB consists of one plane only.
        // (YUV420p consists of 3, Y, Cb, and Cr
        video->rgbLinesize[0] = cc->width*4;
        video->rgbLinesize[1] = 0;
        video->rgbLinesize[2] = 0;
        video->rgbLinesize[3] = 0;

        if (video->rgbConversionContext == NULL) {
            char s[200];
            sprintf(s, "Cannot initialize the RGB conversion context. Incorrect size (%dx%d)?\n", cc->width, cc->height);
            fprintf(stderr, s);
            setErrorMessage(video, s);
            return 2;
        }


        printf("Settings: %dx%d, %d bits/s (tolerance: %d), %d fps\n", cc->width, cc->height,
               cc->bit_rate, cc->bit_rate_tolerance, ((float)cc->time_base.den)/cc->time_base.num);
        fflush(stdout);
    } else {
        const char *s = "No codec ID given.\n";
        fprintf(stderr, s);
        setErrorMessage(video, s);
        return 2;
    }

    av_dump_format(video->fc, 0, filename, 1);

    /* now that all the parameters are set, we can open the audio and
       video codecs and allocate the necessary encode buffers */
    if (video->streamV) {
        int ret = open_video(video);
        if (ret != 0) {
            return ret;
        }
    } else {
        const char *s = "Could not open video stream.\n";
        fprintf(stderr, s);
        setErrorMessage(video, s);
        return 2;
    }



    /* open the output file, if needed */
    if (!(video->format->flags & AVFMT_NOFILE)) {
        if (avio_open(&video->fc->pb, filename, AVIO_FLAG_WRITE) < 0) {
            fprintf(stderr, "could not open %s\n", video->filename);
            char *msg = "Could not open file: ";
            char *msgAll = malloc(sizeof(char) * (strlen(filename) + strlen(msg)));
            strcpy(msgAll, msg);
            strcat(msgAll, filename);
            fprintf(stderr, msgAll);
            setErrorMessage(video, msgAll);
            free(msgAll);
            return 5;
        }
    }

    /* write the stream header, if any */
    avformat_write_header(video->fc, NULL);


    /* alloc image and output buffer */
    video->outbufSizeV = avpicture_get_size(video->streamV->codec->pix_fmt, width, height);
    video->outbufV = av_malloc(video->outbufSizeV);

    video->picture = avcodec_alloc_frame();
    avpicture_alloc((AVPicture*)video->picture, video->streamV->codec->pix_fmt,
                    video->streamV->codec->width, video->streamV->codec->height);
    if (!video->picture) {
        const char *s = "Could not allocate AVPicture.\n";
        fprintf(stderr, s);
        setErrorMessage(video, s);
        return 2;
    }

    return 0;
}
Ejemplo n.º 17
0
bool AVFormatWriter::Init(void)
{
    AVOutputFormat *fmt = av_guess_format(m_container.toLatin1().constData(),
                                          NULL, NULL);
    if (!fmt)
    {
        LOG(VB_RECORD, LOG_ERR, LOC +
            QString("Init(): Unable to guess AVOutputFormat from container %1")
                    .arg(m_container));
        return false;
    }

    m_fmt = *fmt;

    if (m_width && m_height)
    {
        m_avVideoCodec = avcodec_find_encoder_by_name(
            m_videoCodec.toLatin1().constData());
        if (!m_avVideoCodec)
        {
            LOG(VB_RECORD, LOG_ERR, LOC +
                QString("Init(): Unable to find video codec %1").arg(m_videoCodec));
            return false;
        }

        m_fmt.video_codec = m_avVideoCodec->id;
    }
    else
        m_fmt.video_codec = AV_CODEC_ID_NONE;

    m_avAudioCodec = avcodec_find_encoder_by_name(
        m_audioCodec.toLatin1().constData());
    if (!m_avAudioCodec)
    {
        LOG(VB_RECORD, LOG_ERR, LOC +
            QString("Init(): Unable to find audio codec %1").arg(m_audioCodec));
        return false;
    }

    m_fmt.audio_codec = m_avAudioCodec->id;

    m_ctx = avformat_alloc_context();
    if (!m_ctx)
    {
        LOG(VB_RECORD, LOG_ERR,
            LOC + "Init(): Unable to allocate AVFormatContext");
        return false;
    }

    m_ctx->oformat = &m_fmt;

    if (m_container == "mpegts")
        m_ctx->packet_size = 2324;

    snprintf(m_ctx->filename, sizeof(m_ctx->filename), "%s",
             m_filename.toLatin1().constData());

    if (m_fmt.video_codec != AV_CODEC_ID_NONE)
        m_videoStream = AddVideoStream();
    if (m_fmt.audio_codec != AV_CODEC_ID_NONE)
        m_audioStream = AddAudioStream();

    if ((m_videoStream) && (!OpenVideo()))
    {
        LOG(VB_RECORD, LOG_ERR, LOC + "Init(): OpenVideo() failed");
        return false;
    }

    if ((m_audioStream) && (!OpenAudio()))
    {
        LOG(VB_RECORD, LOG_ERR, LOC + "Init(): OpenAudio() failed");
        return false;
    }

    return true;
}
Ejemplo n.º 18
0
static bool ffemu_init_video(struct ff_video_info *video, const struct ffemu_params *param)
{
#ifdef HAVE_X264RGB
   AVCodec *codec = NULL;
   if (g_settings.video.h264_record)
   {
      codec = avcodec_find_encoder_by_name("libx264rgb");
      // Older versions of FFmpeg have RGB encoding in libx264.
      if (!codec)
         codec = avcodec_find_encoder_by_name("libx264");
   }
   else
      codec = avcodec_find_encoder_by_name("ffv1");
#else
   AVCodec *codec = avcodec_find_encoder_by_name("ffv1");
#endif
   if (!codec)
      return false;

   video->encoder = codec;

#if AV_HAVE_BIGENDIAN
   video->fmt = PIX_FMT_RGB555BE;
#else
   video->fmt = PIX_FMT_RGB555LE;
#endif
   video->pix_size = sizeof(uint16_t);
   if (param->rgb32)
   {
      video->fmt = PIX_FMT_RGB32;
      video->pix_size = sizeof(uint32_t);
   }

#ifdef HAVE_X264RGB
   video->pix_fmt = g_settings.video.h264_record ? PIX_FMT_BGR24 : PIX_FMT_RGB32;
#else
   video->pix_fmt = PIX_FMT_RGB32;
#endif

#ifdef HAVE_FFMPEG_ALLOC_CONTEXT3
   video->codec = avcodec_alloc_context3(codec);
#else
   video->codec = avcodec_alloc_context();
   avcodec_get_context_defaults(video->codec);
#endif

   video->codec->width = param->out_width;
   video->codec->height = param->out_height;
   video->codec->time_base = av_d2q(1.0 / param->fps, 1000000); // Arbitrary big number.
   video->codec->sample_aspect_ratio = av_d2q(param->aspect_ratio * param->out_height / param->out_width, 255);
   video->codec->pix_fmt = video->pix_fmt;

#ifdef HAVE_FFMPEG_AVCODEC_OPEN2
   AVDictionary *opts = NULL;
#endif

#ifdef HAVE_X264RGB
   if (g_settings.video.h264_record)
   {
      video->codec->thread_count = 3;
      av_dict_set(&opts, "qp", "0", 0);
   }
   else
      video->codec->thread_count = 2;
#else
   video->codec->thread_count = 2;
#endif

#ifdef HAVE_FFMPEG_AVCODEC_OPEN2
   if (avcodec_open2(video->codec, codec, &opts) != 0)
#else
   if (avcodec_open(video->codec, codec) != 0)
#endif
      return false;

#ifdef HAVE_FFMPEG_AVCODEC_OPEN2
   if (opts)
      av_dict_free(&opts);
#endif

   // Allocate a big buffer :p ffmpeg API doesn't seem to give us some clues how big this buffer should be.
   video->outbuf_size = 1 << 23;
   video->outbuf = (uint8_t*)av_malloc(video->outbuf_size);

   size_t size = avpicture_get_size(video->pix_fmt, param->out_width, param->out_height);
   video->conv_frame_buf = (uint8_t*)av_malloc(size);
   video->conv_frame = avcodec_alloc_frame();
   avpicture_fill((AVPicture*)video->conv_frame, video->conv_frame_buf, video->pix_fmt, param->out_width, param->out_height);

   return true;
}
Ejemplo n.º 19
0
static int encavcodecaInit(hb_work_object_t *w, hb_job_t *job)
{
    AVCodec *codec;
    AVCodecContext *context;
    hb_audio_t *audio = w->audio;

    hb_work_private_t *pv = calloc(1, sizeof(hb_work_private_t));
    w->private_data       = pv;
    pv->job               = job;
    pv->list              = hb_list_init();

    // channel count, layout and matrix encoding
    int matrix_encoding;
    uint64_t channel_layout   = hb_ff_mixdown_xlat(audio->config.out.mixdown,
                                                   &matrix_encoding);
    pv->out_discrete_channels =
        hb_mixdown_get_discrete_channel_count(audio->config.out.mixdown);

    // default settings and options
    AVDictionary *av_opts          = NULL;
    const char *codec_name         = NULL;
    enum AVCodecID codec_id        = AV_CODEC_ID_NONE;
    enum AVSampleFormat sample_fmt = AV_SAMPLE_FMT_FLTP;
    int bits_per_raw_sample        = 0;
    int profile                    = FF_PROFILE_UNKNOWN;

    // override with encoder-specific values
    switch (audio->config.out.codec)
    {
        case HB_ACODEC_AC3:
            codec_id = AV_CODEC_ID_AC3;
            if (matrix_encoding != AV_MATRIX_ENCODING_NONE)
                av_dict_set(&av_opts, "dsur_mode", "on", 0);
            break;

        case HB_ACODEC_FDK_AAC:
        case HB_ACODEC_FDK_HAAC:
            codec_name          = "libfdk_aac";
            sample_fmt          = AV_SAMPLE_FMT_S16;
            bits_per_raw_sample = 16;
            switch (audio->config.out.codec)
            {
                case HB_ACODEC_FDK_HAAC:
                    profile = FF_PROFILE_AAC_HE;
                    break;
                default:
                    profile = FF_PROFILE_AAC_LOW;
                    break;
            }
            // Libav's libfdk-aac wrapper expects back channels for 5.1
            // audio, and will error out unless we translate the layout
            if (channel_layout == AV_CH_LAYOUT_5POINT1)
                channel_layout  = AV_CH_LAYOUT_5POINT1_BACK;
            break;

        case HB_ACODEC_FFAAC:
            codec_name = "aac";
            av_dict_set(&av_opts, "stereo_mode", "ms_off", 0);
            break;

        case HB_ACODEC_FFFLAC:
        case HB_ACODEC_FFFLAC24:
            codec_id = AV_CODEC_ID_FLAC;
            switch (audio->config.out.codec)
            {
                case HB_ACODEC_FFFLAC24:
                    sample_fmt          = AV_SAMPLE_FMT_S32;
                    bits_per_raw_sample = 24;
                    break;
                default:
                    sample_fmt          = AV_SAMPLE_FMT_S16;
                    bits_per_raw_sample = 16;
                    break;
            }
            break;

        default:
            hb_error("encavcodecaInit: unsupported codec (0x%x)",
                     audio->config.out.codec);
            return 1;
    }
    if (codec_name != NULL)
    {
        codec = avcodec_find_encoder_by_name(codec_name);
        if (codec == NULL)
        {
            hb_error("encavcodecaInit: avcodec_find_encoder_by_name(%s) failed",
                     codec_name);
            return 1;
        }
    }
    else
    {
        codec = avcodec_find_encoder(codec_id);
        if (codec == NULL)
        {
            hb_error("encavcodecaInit: avcodec_find_encoder(%d) failed",
                     codec_id);
            return 1;
        }
    }
    // allocate the context and apply the settings
    context                      = avcodec_alloc_context3(codec);
    hb_ff_set_sample_fmt(context, codec, sample_fmt);
    context->bits_per_raw_sample = bits_per_raw_sample;
    context->profile             = profile;
    context->channel_layout      = channel_layout;
    context->channels            = pv->out_discrete_channels;
    context->sample_rate         = audio->config.out.samplerate;

    if (audio->config.out.bitrate > 0)
    {
        context->bit_rate = audio->config.out.bitrate * 1000;
    }
    else if (audio->config.out.quality >= 0)
    {
        context->global_quality = audio->config.out.quality * FF_QP2LAMBDA;
        context->flags |= CODEC_FLAG_QSCALE;
    }

    if (audio->config.out.compression_level >= 0)
    {
        context->compression_level = audio->config.out.compression_level;
    }

    // For some codecs, libav requires the following flag to be set
    // so that it fills extradata with global header information.
    // If this flag is not set, it inserts the data into each
    // packet instead.
    context->flags |= CODEC_FLAG_GLOBAL_HEADER;

    if (hb_avcodec_open(context, codec, &av_opts, 0))
    {
        hb_error("encavcodecaInit: hb_avcodec_open() failed");
        return 1;
    }
    // avcodec_open populates the opts dictionary with the
    // things it didn't recognize.
    AVDictionaryEntry *t = NULL;
    while ((t = av_dict_get(av_opts, "", t, AV_DICT_IGNORE_SUFFIX)))
    {
        hb_log("encavcodecaInit: Unknown avcodec option %s", t->key);
    }
    av_dict_free(&av_opts);

    pv->context           = context;
    audio->config.out.samples_per_frame =
    pv->samples_per_frame = context->frame_size;
    pv->input_samples     = context->frame_size * context->channels;
    pv->input_buf         = malloc(pv->input_samples * sizeof(float));
    pv->max_output_bytes  = (pv->input_samples *
                             av_get_bytes_per_sample(context->sample_fmt));

    // sample_fmt conversion
    if (context->sample_fmt != AV_SAMPLE_FMT_FLT)
    {
        pv->output_buf = malloc(pv->max_output_bytes);
        pv->avresample = avresample_alloc_context();
        if (pv->avresample == NULL)
        {
            hb_error("encavcodecaInit: avresample_alloc_context() failed");
            return 1;
        }
        av_opt_set_int(pv->avresample, "in_sample_fmt",
                       AV_SAMPLE_FMT_FLT, 0);
        av_opt_set_int(pv->avresample, "out_sample_fmt",
                       context->sample_fmt, 0);
        av_opt_set_int(pv->avresample, "in_channel_layout",
                       context->channel_layout, 0);
        av_opt_set_int(pv->avresample, "out_channel_layout",
                       context->channel_layout, 0);
        if (hb_audio_dither_is_supported(audio->config.out.codec))
        {
            // dithering needs the sample rate
            av_opt_set_int(pv->avresample, "in_sample_rate",
                           context->sample_rate, 0);
            av_opt_set_int(pv->avresample, "out_sample_rate",
                           context->sample_rate, 0);
            av_opt_set_int(pv->avresample, "dither_method",
                           audio->config.out.dither_method, 0);
        }
        if (avresample_open(pv->avresample))
        {
            hb_error("encavcodecaInit: avresample_open() failed");
            avresample_free(&pv->avresample);
            return 1;
        }
    }
    else
    {
        pv->avresample = NULL;
        pv->output_buf = pv->input_buf;
    }

    if (context->extradata != NULL)
    {
        memcpy(w->config->extradata.bytes, context->extradata,
               context->extradata_size);
        w->config->extradata.length = context->extradata_size;
    }

    return 0;
}
Ejemplo n.º 20
0
int FFmpegEncoder::open()
{
	LOGI("FFmpegEncoder::open, begin!");
	if (this->opened) {
		LOGW("FFmpegEncoder::open, try to reopen!");
		return -1;
	}

	if (this->videoParam.videoCodecName.empty() && 
		this->audioParam.audioCodecName.empty()) {
		LOGE("FFmpegEncoder::open, no output or codec name");
		return -1;
	}

	// allocate the output media context
	this->outputContext = avformat_alloc_context();
	if (!this->outputContext) {
		LOGE("FFmpegEncoder::open, failed to alloc context!");
		return -1;
	}

	// video related initialization if necessary
	if (this->encodeVideo) {
		// validate the video codec
		if (this->videoParam.videoCodecName.empty()) {
			LOGE("FFmpegEncoder::open, no video codec name!");
			return -1;
		}

		// find the video encoder
		AVCodec *videoCodec = NULL;

		// use the codec name preferentially if it is specified in the input param
		videoCodec = avcodec_find_encoder_by_name(this->videoParam.videoCodecName.c_str());

		if (!videoCodec) {
			LOGE("FFmpegEncoder::open, find no video codec!");
			return -1;
		}

		// add the video stream with stream id 0
		this->videoStream = av_new_stream(this->outputContext, 0);
		if (!this->videoStream)	{
			LOGE("FFmpegEncoder::open, failed to new video stream!");
			return -1;
		}

		// set the parameters for video codec context
		AVCodecContext *videoCodecContext = this->videoStream->codec;
		videoCodecContext->codec_id       = videoCodec->id;
		videoCodecContext->codec_type     = CODEC_TYPE_VIDEO;
		videoCodecContext->bit_rate       = this->videoParam.bitRate;
		videoCodecContext->width          = this->videoParam.width;
		videoCodecContext->height         = this->videoParam.height;
		videoCodecContext->time_base.den  = this->videoParam.frameRate;
		videoCodecContext->time_base.num  = 1;

		// tune for video encoding
		videoCodecContext->gop_size = 24;
		videoCodecContext->qmin = 3;
		videoCodecContext->qmax = 33;
		videoCodecContext->max_qdiff = 4;
		videoCodecContext->qcompress = 0.6f;
	
		videoCodecContext->me_method = ME_FULL;
		videoCodecContext->me_range = 32;
		videoCodecContext->partitions = X264_PART_I4X4 | X264_PART_I8X8 | X264_PART_P8X8 | X264_PART_P4X4 | X264_PART_B8X8;
		videoCodecContext->coder_type = FF_CODER_TYPE_AC;
		videoCodecContext->max_b_frames = 1;

		// set the PixelFormat of the target encoded video
		if (videoCodec->pix_fmts) {
			// try to find the PixelFormat required by the input param,
			// use the default PixelFormat directly if required format not found
			const enum PixelFormat *p= videoCodec->pix_fmts;
			for ( ; *p != PIX_FMT_NONE; p ++) {
				if (*p == this->videoParam.pixelFormat)
					break;
			}
			
			if (*p == PIX_FMT_NONE)
				videoCodecContext->pix_fmt = videoCodec->pix_fmts[0];
			else
				videoCodecContext->pix_fmt = *p;
		}

		// open the video codec
		if (avcodec_open(videoCodecContext, videoCodec) < 0) {
			LOGE("FFmpegEncoder.open, find but failed to open video codec!");
			return -1;
		}

		// allocate the output buffer
		// the maximum possible buffer size could be the raw bmp format with R/G/B/A
		this->videoBufferSize = 4 * this->videoParam.width * this->videoParam.height;
		this->videoBuffer     = (uint8_t*)(av_malloc(this->videoBufferSize));
	}

	// audio related initialization if necessary
	if (this->encodeAudio)
	{
		// validate the audio codec
		if (this->audioParam.audioCodecName.empty())
		{
			LOGE("FFmpegEncoder.open, no outputformat or no audio codec name!");
			return -1;
		}

		// find the audio encoder
		AVCodec *audioCodec = NULL;

		// use the codec name preferentially if it is specified in the input param
		audioCodec = avcodec_find_encoder_by_name(this->audioParam.audioCodecName.c_str());

		if (!audioCodec)
		{
			LOGE("FFmpegEncoder.open, invalid audio codec!");
			return -1;
		}

		// add the audio stream with stream id 1
		this->audioStream = av_new_stream(this->outputContext, 1);
		if (!this->audioStream)
		{
			LOGE("FFmpegEncoder.open, failed to new audio stream!");
			return -1;
		}

		// set the parameters for audio codec context
		AVCodecContext *audioCodecContext = this->audioStream->codec;
		audioCodecContext->codec_id       = audioCodec->id;
		audioCodecContext->codec_type     = CODEC_TYPE_AUDIO;
		audioCodecContext->bit_rate       = this->audioParam.bitRate;
		audioCodecContext->sample_rate    = this->audioParam.sampleRate;
		audioCodecContext->channels       = this->audioParam.channels;

		// open the audio codec
		if (avcodec_open(audioCodecContext, audioCodec) < 0)
		{
			LOGE("FFmpegEncoder.open, failed to open audio codec!");
			return -1;
		}

		// TODO: how to determine the buffer size?
		// allocate the output buffer
		this->audioBufferSize = 4 * MAX_AUDIO_PACKET_SIZE;
		this->audioBuffer     = (uint8_t*)(av_malloc(this->audioBufferSize));
	}

	this->opened = true;
	LOGI("FFmpegEncoder.open, end!");

	return 0;
}
Ejemplo n.º 21
0
int mpae_init_lavc(audio_encoder_t *encoder)
{
	encoder->params.samples_per_frame = encoder->params.sample_rate;
	encoder->params.bitrate = encoder->params.sample_rate * encoder->params.channels * 2 * 8;

	if(!lavc_param_acodec)
	{
		mp_msg(MSGT_MENCODER, MSGL_FATAL, MSGTR_NoLavcAudioCodecName);
		return 0;
	}

	init_avcodec();

	lavc_acodec = avcodec_find_encoder_by_name(lavc_param_acodec);
	if (!lavc_acodec)
	{
		mp_msg(MSGT_MENCODER, MSGL_FATAL, MSGTR_LavcAudioCodecNotFound, lavc_param_acodec);
		return 0;
	}
	if(lavc_param_atag == 0)
	{
		lavc_param_atag = av_codec_get_tag(mp_wav_taglists, lavc_acodec->id);
		if(!lavc_param_atag)
		{
			mp_msg(MSGT_MENCODER, MSGL_FATAL, "Couldn't find wav tag for specified codec, exit\n");
			return 0;
		}
	}

	lavc_actx = avcodec_alloc_context();
	if(lavc_actx == NULL)
	{
		mp_msg(MSGT_MENCODER, MSGL_FATAL, MSGTR_CouldntAllocateLavcContext);
		return 0;
	}

	lavc_actx->codec_type = CODEC_TYPE_AUDIO;
	lavc_actx->codec_id = lavc_acodec->id;
	// put sample parameters
	lavc_actx->channels = encoder->params.channels;
	lavc_actx->sample_rate = encoder->params.sample_rate;
	lavc_actx->time_base.num = 1;
	lavc_actx->time_base.den = encoder->params.sample_rate;
        if(lavc_param_abitrate<1000)
                lavc_actx->bit_rate = encoder->params.bitrate = lavc_param_abitrate * 1000;
        else
                lavc_actx->bit_rate = encoder->params.bitrate = lavc_param_abitrate;
        if(lavc_param_audio_avopt){
            if(parse_avopts(lavc_actx, lavc_param_audio_avopt) < 0){
                mp_msg(MSGT_MENCODER,MSGL_ERR, "Your options /%s/ look like gibberish to me pal\n", lavc_param_audio_avopt);
                return 0;
            }
        }


	/*
	* Special case for adpcm_ima_wav.
	* The bitrate is only dependent on samplerate.
	* We have to known frame_size and block_align in advance,
	* so I just copied the code from libavcodec/adpcm.c
	*
	* However, ms adpcm_ima_wav uses a block_align of 2048,
	* lavc defaults to 1024
	*/
	if(lavc_param_atag == 0x11) {
		int blkalign = 2048;
		int framesize = (blkalign - 4 * lavc_actx->channels) * 8 / (4 * lavc_actx->channels) + 1;
		lavc_actx->bit_rate = lavc_actx->sample_rate*8*blkalign/framesize;
	}
        if((lavc_param_audio_global_header&1)
        /*|| (video_global_header==0 && (oc->oformat->flags & AVFMT_GLOBALHEADER))*/){
                lavc_actx->flags |= CODEC_FLAG_GLOBAL_HEADER;
        }
        if(lavc_param_audio_global_header&2){
                lavc_actx->flags2 |= CODEC_FLAG2_LOCAL_HEADER;
        }

	if(avcodec_open(lavc_actx, lavc_acodec) < 0)
	{
		mp_msg(MSGT_MENCODER, MSGL_FATAL, MSGTR_CouldntOpenCodec, lavc_param_acodec, lavc_param_abitrate);
		return 0;
	}

	if(lavc_param_atag == 0x11) {
		lavc_actx->block_align = 2048;
		lavc_actx->frame_size = (lavc_actx->block_align - 4 * lavc_actx->channels) * 8 / (4 * lavc_actx->channels) + 1;
	}

	encoder->decode_buffer_size = lavc_actx->frame_size * 2 * encoder->params.channels;
	while (encoder->decode_buffer_size < 1024) encoder->decode_buffer_size *= 2;
	encoder->bind = bind_lavc;
	encoder->get_frame_size = get_frame_size;
	encoder->encode = encode_lavc;
	encoder->close = close_lavc;

	return 1;
}
Ejemplo n.º 22
0
bool FFmpegEncoderOpen(struct FFmpegEncoder* encoder, const char* outfile) {
	AVCodec* acodec = avcodec_find_encoder_by_name(encoder->audioCodec);
	AVCodec* vcodec = avcodec_find_encoder_by_name(encoder->videoCodec);
	if ((encoder->audioCodec && !acodec) || !vcodec || !FFmpegEncoderVerifyContainer(encoder)) {
		return false;
	}

	encoder->currentAudioSample = 0;
	encoder->currentAudioFrame = 0;
	encoder->currentVideoFrame = 0;
	encoder->nextAudioPts = 0;

	AVOutputFormat* oformat = av_guess_format(encoder->containerFormat, 0, 0);
#ifndef USE_LIBAV
	avformat_alloc_output_context2(&encoder->context, oformat, 0, outfile);
#else
	encoder->context = avformat_alloc_context();
	strncpy(encoder->context->filename, outfile, sizeof(encoder->context->filename));
	encoder->context->oformat = oformat;
#endif

	if (acodec) {
		encoder->audioStream = avformat_new_stream(encoder->context, acodec);
		encoder->audio = encoder->audioStream->codec;
		encoder->audio->bit_rate = encoder->audioBitrate;
		encoder->audio->channels = 2;
		encoder->audio->channel_layout = AV_CH_LAYOUT_STEREO;
		encoder->audio->sample_rate = encoder->sampleRate;
		encoder->audio->sample_fmt = encoder->sampleFormat;
		AVDictionary* opts = 0;
		av_dict_set(&opts, "strict", "-2", 0);
		if (encoder->context->oformat->flags & AVFMT_GLOBALHEADER) {
			encoder->audio->flags |= CODEC_FLAG_GLOBAL_HEADER;
		}
		avcodec_open2(encoder->audio, acodec, &opts);
		av_dict_free(&opts);
#if LIBAVCODEC_VERSION_MAJOR >= 55
		encoder->audioFrame = av_frame_alloc();
#else
		encoder->audioFrame = avcodec_alloc_frame();
#endif
		if (!encoder->audio->frame_size) {
			encoder->audio->frame_size = 1;
		}
		encoder->audioFrame->nb_samples = encoder->audio->frame_size;
		encoder->audioFrame->format = encoder->audio->sample_fmt;
		encoder->audioFrame->pts = 0;
		encoder->resampleContext = avresample_alloc_context();
		av_opt_set_int(encoder->resampleContext, "in_channel_layout", AV_CH_LAYOUT_STEREO, 0);
		av_opt_set_int(encoder->resampleContext, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0);
		av_opt_set_int(encoder->resampleContext, "in_sample_rate", PREFERRED_SAMPLE_RATE, 0);
		av_opt_set_int(encoder->resampleContext, "out_sample_rate", encoder->sampleRate, 0);
		av_opt_set_int(encoder->resampleContext, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
		av_opt_set_int(encoder->resampleContext, "out_sample_fmt", encoder->sampleFormat, 0);
		avresample_open(encoder->resampleContext);
		encoder->audioBufferSize = (encoder->audioFrame->nb_samples * PREFERRED_SAMPLE_RATE / encoder->sampleRate) * 4;
		encoder->audioBuffer = av_malloc(encoder->audioBufferSize);
		encoder->postaudioBufferSize = av_samples_get_buffer_size(0, encoder->audio->channels, encoder->audio->frame_size, encoder->audio->sample_fmt, 0);
		encoder->postaudioBuffer = av_malloc(encoder->postaudioBufferSize);
		avcodec_fill_audio_frame(encoder->audioFrame, encoder->audio->channels, encoder->audio->sample_fmt, (const uint8_t*) encoder->postaudioBuffer, encoder->postaudioBufferSize, 0);

		if (encoder->audio->codec->id == AV_CODEC_ID_AAC &&
			(strcasecmp(encoder->containerFormat, "mp4") ||
			strcasecmp(encoder->containerFormat, "m4v") ||
			strcasecmp(encoder->containerFormat, "mov"))) {
			// MP4 container doesn't support the raw ADTS AAC format that the encoder spits out
			encoder->absf = av_bitstream_filter_init("aac_adtstoasc");
		}
	}

	encoder->videoStream = avformat_new_stream(encoder->context, vcodec);
	encoder->video = encoder->videoStream->codec;
	encoder->video->bit_rate = encoder->videoBitrate;
	encoder->video->width = encoder->width;
	encoder->video->height = encoder->height;
	encoder->video->time_base = (AVRational) { VIDEO_TOTAL_LENGTH, GBA_ARM7TDMI_FREQUENCY };
	encoder->video->pix_fmt = encoder->pixFormat;
	encoder->video->gop_size = 60;
	encoder->video->max_b_frames = 3;
	if (encoder->context->oformat->flags & AVFMT_GLOBALHEADER) {
		encoder->video->flags |= CODEC_FLAG_GLOBAL_HEADER;
	}
	if (strcmp(vcodec->name, "libx264") == 0) {
		// Try to adaptively figure out when you can use a slower encoder
		if (encoder->width * encoder->height > 1000000) {
			av_opt_set(encoder->video->priv_data, "preset", "superfast", 0);
		} else if (encoder->width * encoder->height > 500000) {
			av_opt_set(encoder->video->priv_data, "preset", "veryfast", 0);
		} else {
			av_opt_set(encoder->video->priv_data, "preset", "faster", 0);
		}
		av_opt_set(encoder->video->priv_data, "tune", "zerolatency", 0);
	}
	avcodec_open2(encoder->video, vcodec, 0);
#if LIBAVCODEC_VERSION_MAJOR >= 55
	encoder->videoFrame = av_frame_alloc();
#else
	encoder->videoFrame = avcodec_alloc_frame();
#endif
	encoder->videoFrame->format = encoder->video->pix_fmt;
	encoder->videoFrame->width = encoder->video->width;
	encoder->videoFrame->height = encoder->video->height;
	encoder->videoFrame->pts = 0;
	encoder->scaleContext = sws_getContext(VIDEO_HORIZONTAL_PIXELS, VIDEO_VERTICAL_PIXELS,
#ifndef USE_LIBAV
		AV_PIX_FMT_0BGR32,
#else
		AV_PIX_FMT_BGR32,
#endif
		encoder->videoFrame->width, encoder->videoFrame->height, encoder->video->pix_fmt,
		SWS_POINT, 0, 0, 0);
	av_image_alloc(encoder->videoFrame->data, encoder->videoFrame->linesize, encoder->video->width, encoder->video->height, encoder->video->pix_fmt, 32);

	avio_open(&encoder->context->pb, outfile, AVIO_FLAG_WRITE);
	avformat_write_header(encoder->context, 0);

	return true;
}
Ejemplo n.º 23
0
int dc_video_encoder_open(VideoOutputFile * p_voutf, VideoData * p_vdata) {

	//AVCodec * p_video_codec;
	//AVStream * p_video_stream;

	p_voutf->i_vbuf_size = 9 * p_vdata->i_width * p_vdata->i_height + 10000;
	p_voutf->p_vbuf = (uint8_t *) av_malloc(p_voutf->i_vbuf_size);

	p_voutf->p_codec = avcodec_find_encoder_by_name("libx264"/*p_vdata->psz_codec*/);
	if (p_voutf->p_codec == NULL) {
		fprintf(stderr, "Output video codec not found\n");
		return -1;
	}

	p_voutf->p_codec_ctx = avcodec_alloc_context3(p_voutf->p_codec);

	//Create new video stream
//	p_video_stream = avformat_new_stream(p_voutf->p_fmt, p_video_codec);
//	if (!p_video_stream) {
//		fprintf(stderr, "Cannot create output video stream\n");
//		return -1;
//	}

	p_voutf->p_codec_ctx->codec_id = p_voutf->p_codec->id;
	p_voutf->p_codec_ctx->codec_type = AVMEDIA_TYPE_VIDEO;
	p_voutf->p_codec_ctx->bit_rate = p_vdata->i_bitrate;
	p_voutf->p_codec_ctx->width = p_vdata->i_width;
	p_voutf->p_codec_ctx->height = p_vdata->i_height;
	p_voutf->p_codec_ctx->time_base = (AVRational) {1 ,
				p_vdata->i_framerate};
	p_voutf->p_codec_ctx->pix_fmt = PIX_FMT_YUV420P;
	p_voutf->p_codec_ctx->gop_size = p_voutf->i_gop_size;//p_vdata->i_framerate;

	av_opt_set(p_voutf->p_codec_ctx->priv_data, "preset", "ultrafast", 0);
	av_opt_set(p_voutf->p_codec_ctx->priv_data, "tune", "zerolatency", 0);

//	if (p_voutf->p_fmt->oformat->flags & AVFMT_GLOBALHEADER)
//		p_voutf->p_codec_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;

//	p_video_stream->codec->codec_id = p_video_codec->id;
//	p_video_stream->codec->codec_type = AVMEDIA_TYPE_VIDEO;
//	p_video_stream->codec->bit_rate = p_voutf->p_vdata->i_bitrate;
//	p_video_stream->codec->width = p_voutf->p_vdata->i_width;
//	p_video_stream->codec->height = p_voutf->p_vdata->i_height;
//	p_video_stream->codec->time_base = (AVRational) {1 ,
//				p_voutf->p_vdata->i_framerate};
//	p_video_stream->codec->pix_fmt = PIX_FMT_YUV420P;
//	p_video_stream->codec->gop_size = p_voutf->p_vdata->i_framerate;
//
//	av_opt_set(p_video_stream->codec->priv_data, "preset", "ultrafast", 0);
//	av_opt_set(p_video_stream->codec->priv_data, "tune", "zerolatency", 0);

	/*
	 p_video_stream->codec->max_b_frames = 0;
	 p_video_stream->codec->thread_count = 1;
	 p_video_stream->codec->delay = 0;
	 p_video_stream->codec->rc_lookahead = 0;
	 */

	/*
	 * p_video_stream->codec->gop_size = p_voutf->i_vfr;
	 * videoStream->codec->gop_size = 1;
	 * p_video_stream->codec->rc_lookahead = 0;
	 * videoStream->time_base = (AVRational) {1 , 1000000};
	 * videoStream->r_frame_rate = (AVRational) {outVideoCtx->video_framerate, 1};
	 * av_opt_set(videoStream->codec->priv_data, "preset", "slow", 0);
	 * videoStream->codec->me_range = 16;
	 * videoStream->codec->max_qdiff = 4;
	 * videoStream->codec->qmin = 10;
	 * videoStream->codec->qmax = 51;
	 * videoStream->codec->qcompress = 0.6;
	 * videoStream->codec->profile = FF_PROFILE_H264_BASELINE;
	 * videoStream->codec->level = 10;
	 *
	 */

//	if (p_voutf->p_fmt->oformat->flags & AVFMT_GLOBALHEADER)
//		p_video_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;

	p_voutf->i_vstream_idx = 0;//p_video_stream->index;

	/* open the video codec */
	if (avcodec_open2(p_voutf->p_codec_ctx, p_voutf->p_codec, NULL) < 0) {
		fprintf(stderr, "Cannot open output video codec\n");
		return -1;
	}

//	/* open the video codec */
//	if (avcodec_open2(p_video_stream->codec, p_video_codec, NULL) < 0) {
//		fprintf(stderr, "Cannot open output video codec\n");
//		return -1;
//	}

	return 0;
}
Ejemplo n.º 24
0
static int
bt_play_open(struct voss_backend *pbe, const char *devname, int samplerate,
    int bufsize, int *pchannels, int *pformat)
{
	struct bt_config *cfg = pbe->arg;
	int retval;

	bt_init_cfg(cfg);

	retval = bt_open(pbe, devname, samplerate, bufsize, pchannels, pformat,
	    cfg, SDP_SERVICE_CLASS_AUDIO_SINK, 1);
	if (retval != 0)
		return (retval);

	/* setup codec */
	switch (cfg->codec) {
	case CODEC_SBC:
		cfg->handle.sbc_enc =
		    malloc(sizeof(*cfg->handle.sbc_enc));
		if (cfg->handle.sbc_enc == NULL)
			return (-1);
		memset(cfg->handle.sbc_enc, 0, sizeof(*cfg->handle.sbc_enc));
		break;
#ifdef HAVE_FFMPEG
	case CODEC_AAC:
		av_register_all();

		cfg->handle.av.codec = avcodec_find_encoder_by_name("aac");
		if (cfg->handle.av.codec == NULL) {
			DPRINTF("Codec AAC encoder not found\n");
			goto av_error_0;
		}
		cfg->handle.av.format = avformat_alloc_context();
		if (cfg->handle.av.format == NULL) {
			DPRINTF("Could not allocate format context\n");
			goto av_error_0;
		}
		cfg->handle.av.format->oformat =
		    av_guess_format("latm", NULL, NULL);
		if (cfg->handle.av.format->oformat == NULL) {
			DPRINTF("Could not guess output format\n");
			goto av_error_1;
		}
		cfg->handle.av.stream = avformat_new_stream(
		    cfg->handle.av.format, cfg->handle.av.codec);

		if (cfg->handle.av.stream == NULL) {
			DPRINTF("Could not create new stream\n");
			goto av_error_1;
		}
		cfg->handle.av.context = cfg->handle.av.stream->codec;
		if (cfg->handle.av.context == NULL) {
			DPRINTF("Could not allocate audio context\n");
			goto av_error_1;
		}
		avcodec_get_context_defaults3(cfg->handle.av.context,
		    cfg->handle.av.codec);

		cfg->handle.av.context->bit_rate = 128000;
		cfg->handle.av.context->sample_fmt = AV_SAMPLE_FMT_FLTP;
		cfg->handle.av.context->sample_rate = samplerate;
		switch (*pchannels) {
		case 1:
			cfg->handle.av.context->channel_layout = AV_CH_LAYOUT_MONO;
			cfg->handle.av.context->channels = 1;
			break;
		default:
			cfg->handle.av.context->channel_layout = AV_CH_LAYOUT_STEREO;
			cfg->handle.av.context->channels = 2;
			break;
		}

		cfg->handle.av.context->profile = FF_PROFILE_AAC_LOW;
		if (1) {
			AVDictionary *opts = NULL;

			av_dict_set(&opts, "strict", "-2", 0);
			av_dict_set_int(&opts, "latm", 1, 0);

			if (avcodec_open2(cfg->handle.av.context,
			    cfg->handle.av.codec, &opts) < 0) {
				av_dict_free(&opts);

				DPRINTF("Could not open codec\n");
				goto av_error_1;
			}
			av_dict_free(&opts);
		}
		cfg->handle.av.frame = av_frame_alloc();
		if (cfg->handle.av.frame == NULL) {
			DPRINTF("Could not allocate audio frame\n");
			goto av_error_2;
		}
		cfg->handle.av.frame->nb_samples = cfg->handle.av.context->frame_size;
		cfg->handle.av.frame->format = cfg->handle.av.context->sample_fmt;
		cfg->handle.av.frame->channel_layout = cfg->handle.av.context->channel_layout;
		cfg->rem_in_size = av_samples_get_buffer_size(NULL,
		    cfg->handle.av.context->channels,
		    cfg->handle.av.context->frame_size,
		    cfg->handle.av.context->sample_fmt, 0);

		cfg->rem_in_data = av_malloc(cfg->rem_in_size);
		if (cfg->rem_in_data == NULL) {
			DPRINTF("Could not allocate %u bytes sample buffer\n",
			    (unsigned)cfg->rem_in_size);
			goto av_error_3;
		}
		retval = avcodec_fill_audio_frame(cfg->handle.av.frame,
		    cfg->handle.av.context->channels,
		    cfg->handle.av.context->sample_fmt,
		    cfg->rem_in_data, cfg->rem_in_size, 0);
		if (retval < 0) {
			DPRINTF("Could not setup audio frame\n");
			goto av_error_4;
		}
		break;
av_error_4:
		av_free(cfg->rem_in_data);
av_error_3:
		av_frame_free(&cfg->handle.av.frame);
av_error_2:
		avcodec_close(cfg->handle.av.context);
av_error_1:
		avformat_free_context(cfg->handle.av.format);
		cfg->handle.av.context = NULL;
av_error_0:
		bt_close(pbe);
		return (-1);
#endif
	default:
		bt_close(pbe);
		return (-1);
	}
	return (0);
}
Ejemplo n.º 25
0
bool AVFormatWriter::Init(void)
{
    if (m_videoOutBuf)
        delete [] m_videoOutBuf;

    if (m_width && m_height)
        m_videoOutBuf = new unsigned char[m_width * m_height * 2 + 10];

    AVOutputFormat *fmt = av_guess_format(m_container.toAscii().constData(),
                                          NULL, NULL);
    if (!fmt)
    {
        LOG(VB_RECORD, LOG_ERR, LOC +
            QString("Init(): Unable to guess AVOutputFormat from container %1")
                    .arg(m_container));
        return false;
    }

    m_fmt = *fmt;

    if (m_width && m_height)
    {
        m_avVideoCodec = avcodec_find_encoder_by_name(
            m_videoCodec.toAscii().constData());
        if (!m_avVideoCodec)
        {
            LOG(VB_RECORD, LOG_ERR, LOC +
                QString("Init(): Unable to find video codec %1").arg(m_videoCodec));
            return false;
        }

        m_fmt.video_codec = m_avVideoCodec->id;
    }
    else
        m_fmt.video_codec = CODEC_ID_NONE;

    m_avAudioCodec = avcodec_find_encoder_by_name(
        m_audioCodec.toAscii().constData());
    if (!m_avAudioCodec)
    {
        LOG(VB_RECORD, LOG_ERR, LOC +
            QString("Init(): Unable to find audio codec %1").arg(m_audioCodec));
        return false;
    }

    m_fmt.audio_codec = m_avAudioCodec->id;

    m_ctx = avformat_alloc_context();
    if (!m_ctx)
    {
        LOG(VB_RECORD, LOG_ERR,
            LOC + "Init(): Unable to allocate AVFormatContext");
        return false;
    }

    m_ctx->oformat = &m_fmt;

    if (m_container == "mpegts")
        m_ctx->packet_size = 2324;

    snprintf(m_ctx->filename, sizeof(m_ctx->filename), "%s",
             m_filename.toAscii().constData());

    if (m_fmt.video_codec != CODEC_ID_NONE)
        m_videoStream = AddVideoStream();
    if (m_fmt.audio_codec != CODEC_ID_NONE)
        m_audioStream = AddAudioStream();

    m_pkt = new AVPacket;
    if (!m_pkt)
    {
        LOG(VB_RECORD, LOG_ERR, LOC + "Init(): error allocating AVPacket");
        return false;
    }

    if (av_set_parameters(m_ctx, NULL) < 0)
    {
        LOG(VB_RECORD, LOG_ERR, "Init(): Invalid output format parameters");
        return false;
    }

    if ((m_videoStream) && (!OpenVideo()))
    {
        LOG(VB_RECORD, LOG_ERR, LOC + "Init(): OpenVideo() failed");
        return false;
    }

    if ((m_audioStream) && (!OpenAudio()))
    {
        LOG(VB_RECORD, LOG_ERR, LOC + "Init(): OpenAudio() failed");
        return false;
    }

    return true;
}
Ejemplo n.º 26
0
bool FFmpegEncoderOpen(struct FFmpegEncoder* encoder, const char* outfile) {
	AVCodec* acodec = avcodec_find_encoder_by_name(encoder->audioCodec);
	AVCodec* vcodec = avcodec_find_encoder_by_name(encoder->videoCodec);
	if ((encoder->audioCodec && !acodec) || !vcodec || !FFmpegEncoderVerifyContainer(encoder)) {
		return false;
	}

	encoder->currentAudioSample = 0;
	encoder->currentAudioFrame = 0;
	encoder->currentVideoFrame = 0;
	encoder->nextAudioPts = 0;

	AVOutputFormat* oformat = av_guess_format(encoder->containerFormat, 0, 0);
#ifndef USE_LIBAV
	avformat_alloc_output_context2(&encoder->context, oformat, 0, outfile);
#else
	encoder->context = avformat_alloc_context();
	strncpy(encoder->context->filename, outfile, sizeof(encoder->context->filename) - 1);
	encoder->context->filename[sizeof(encoder->context->filename) - 1] = '\0';
	encoder->context->oformat = oformat;
#endif

	if (acodec) {
#ifdef FFMPEG_USE_CODECPAR
		encoder->audioStream = avformat_new_stream(encoder->context, NULL);
		encoder->audio = avcodec_alloc_context3(acodec);
#else
		encoder->audioStream = avformat_new_stream(encoder->context, acodec);
		encoder->audio = encoder->audioStream->codec;
#endif
		encoder->audio->bit_rate = encoder->audioBitrate;
		encoder->audio->channels = 2;
		encoder->audio->channel_layout = AV_CH_LAYOUT_STEREO;
		encoder->audio->sample_rate = encoder->sampleRate;
		encoder->audio->sample_fmt = encoder->sampleFormat;
		AVDictionary* opts = 0;
		av_dict_set(&opts, "strict", "-2", 0);
		if (encoder->context->oformat->flags & AVFMT_GLOBALHEADER) {
#ifdef AV_CODEC_FLAG_GLOBAL_HEADER
			encoder->audio->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
#else
			encoder->audio->flags |= CODEC_FLAG_GLOBAL_HEADER;
#endif
		}
		avcodec_open2(encoder->audio, acodec, &opts);
		av_dict_free(&opts);
#if LIBAVCODEC_VERSION_MAJOR >= 55
		encoder->audioFrame = av_frame_alloc();
#else
		encoder->audioFrame = avcodec_alloc_frame();
#endif
		if (!encoder->audio->frame_size) {
			encoder->audio->frame_size = 1;
		}
		encoder->audioFrame->nb_samples = encoder->audio->frame_size;
		encoder->audioFrame->format = encoder->audio->sample_fmt;
		encoder->audioFrame->pts = 0;
#ifdef USE_LIBAVRESAMPLE
		encoder->resampleContext = avresample_alloc_context();
		av_opt_set_int(encoder->resampleContext, "in_channel_layout", AV_CH_LAYOUT_STEREO, 0);
		av_opt_set_int(encoder->resampleContext, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0);
		av_opt_set_int(encoder->resampleContext, "in_sample_rate", PREFERRED_SAMPLE_RATE, 0);
		av_opt_set_int(encoder->resampleContext, "out_sample_rate", encoder->sampleRate, 0);
		av_opt_set_int(encoder->resampleContext, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
		av_opt_set_int(encoder->resampleContext, "out_sample_fmt", encoder->sampleFormat, 0);
		avresample_open(encoder->resampleContext);
#else
		encoder->resampleContext = swr_alloc_set_opts(NULL, AV_CH_LAYOUT_STEREO, encoder->sampleFormat, encoder->sampleRate,
		                                              AV_CH_LAYOUT_STEREO, AV_SAMPLE_FMT_S16, PREFERRED_SAMPLE_RATE, 0, NULL);
		swr_init(encoder->resampleContext);
#endif
		encoder->audioBufferSize = (encoder->audioFrame->nb_samples * PREFERRED_SAMPLE_RATE / encoder->sampleRate) * 4;
		encoder->audioBuffer = av_malloc(encoder->audioBufferSize);
		encoder->postaudioBufferSize = av_samples_get_buffer_size(0, encoder->audio->channels, encoder->audio->frame_size, encoder->audio->sample_fmt, 0);
		encoder->postaudioBuffer = av_malloc(encoder->postaudioBufferSize);
		avcodec_fill_audio_frame(encoder->audioFrame, encoder->audio->channels, encoder->audio->sample_fmt, (const uint8_t*) encoder->postaudioBuffer, encoder->postaudioBufferSize, 0);

		if (encoder->audio->codec->id == AV_CODEC_ID_AAC &&
		    (strcasecmp(encoder->containerFormat, "mp4") ||
		        strcasecmp(encoder->containerFormat, "m4v") ||
		        strcasecmp(encoder->containerFormat, "mov"))) {
			// MP4 container doesn't support the raw ADTS AAC format that the encoder spits out
#ifdef FFMPEG_USE_NEW_BSF
			av_bsf_alloc(av_bsf_get_by_name("aac_adtstoasc"), &encoder->absf);
			avcodec_parameters_from_context(encoder->absf->par_in, encoder->audio);
			av_bsf_init(encoder->absf);
#else
			encoder->absf = av_bitstream_filter_init("aac_adtstoasc");
#endif
		}
#ifdef FFMPEG_USE_CODECPAR
		avcodec_parameters_from_context(encoder->audioStream->codecpar, encoder->audio);
#endif
	}

#ifdef FFMPEG_USE_CODECPAR
	encoder->videoStream = avformat_new_stream(encoder->context, NULL);
	encoder->video = avcodec_alloc_context3(vcodec);
#else
	encoder->videoStream = avformat_new_stream(encoder->context, vcodec);
	encoder->video = encoder->videoStream->codec;
#endif
	encoder->video->bit_rate = encoder->videoBitrate;
	encoder->video->width = encoder->width;
	encoder->video->height = encoder->height;
	encoder->video->time_base = (AVRational) { VIDEO_TOTAL_LENGTH, GBA_ARM7TDMI_FREQUENCY };
	encoder->video->pix_fmt = encoder->pixFormat;
	encoder->video->gop_size = 60;
	encoder->video->max_b_frames = 3;
	if (encoder->context->oformat->flags & AVFMT_GLOBALHEADER) {
#ifdef AV_CODEC_FLAG_GLOBAL_HEADER
		encoder->video->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
#else
		encoder->video->flags |= CODEC_FLAG_GLOBAL_HEADER;
#endif
	}

	if (encoder->video->codec->id == AV_CODEC_ID_H264 &&
	    (strcasecmp(encoder->containerFormat, "mp4") ||
	        strcasecmp(encoder->containerFormat, "m4v") ||
	        strcasecmp(encoder->containerFormat, "mov"))) {
		// QuickTime and a few other things require YUV420
		encoder->video->pix_fmt = AV_PIX_FMT_YUV420P;
	}

	if (strcmp(vcodec->name, "libx264") == 0) {
		// Try to adaptively figure out when you can use a slower encoder
		if (encoder->width * encoder->height > 1000000) {
			av_opt_set(encoder->video->priv_data, "preset", "superfast", 0);
		} else if (encoder->width * encoder->height > 500000) {
			av_opt_set(encoder->video->priv_data, "preset", "veryfast", 0);
		} else {
			av_opt_set(encoder->video->priv_data, "preset", "faster", 0);
		}
		if (encoder->videoBitrate == 0) {
			av_opt_set(encoder->video->priv_data, "crf", "0", 0);
			encoder->video->pix_fmt = AV_PIX_FMT_YUV444P;
		}
	}

	avcodec_open2(encoder->video, vcodec, 0);
#if LIBAVCODEC_VERSION_MAJOR >= 55
	encoder->videoFrame = av_frame_alloc();
#else
	encoder->videoFrame = avcodec_alloc_frame();
#endif
	encoder->videoFrame->format = encoder->video->pix_fmt;
	encoder->videoFrame->width = encoder->video->width;
	encoder->videoFrame->height = encoder->video->height;
	encoder->videoFrame->pts = 0;
	_ffmpegSetVideoDimensions(&encoder->d, encoder->iwidth, encoder->iheight);
	av_image_alloc(encoder->videoFrame->data, encoder->videoFrame->linesize, encoder->video->width, encoder->video->height, encoder->video->pix_fmt, 32);
#ifdef FFMPEG_USE_CODECPAR
	avcodec_parameters_from_context(encoder->videoStream->codecpar, encoder->video);
#endif

	if (avio_open(&encoder->context->pb, outfile, AVIO_FLAG_WRITE) < 0) {
		return false;
	}
	return avformat_write_header(encoder->context, 0) >= 0;
}
Ejemplo n.º 27
0
static bool ffmpeg_init_audio(ffmpeg_t *handle)
{
   struct ff_config_param *params = &handle->config;
   struct ff_audio_info *audio    = &handle->audio;
   struct ff_video_info *video    = &handle->video;
   struct ffemu_params *param     = &handle->params;

   AVCodec *codec = avcodec_find_encoder_by_name(*params->acodec ? params->acodec : "flac");
   if (!codec)
   {
      RARCH_ERR("[FFmpeg]: Cannot find acodec %s.\n", *params->acodec ? params->acodec : "flac");
      return false;
   }

   audio->encoder = codec;

   audio->codec = avcodec_alloc_context3(codec);

   audio->codec->codec_type     = AVMEDIA_TYPE_AUDIO;
   audio->codec->channels       = param->channels;
   audio->codec->channel_layout = param->channels > 1 ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;

   ffmpeg_audio_resolve_format(audio, codec);
   ffmpeg_audio_resolve_sample_rate(handle, codec);

   if (params->sample_rate)
   {
      audio->ratio = (double)params->sample_rate / param->samplerate;
      audio->codec->sample_rate = params->sample_rate;
      audio->codec->time_base = av_d2q(1.0 / params->sample_rate, 1000000);

      rarch_resampler_realloc(&audio->resampler_data,
            &audio->resampler,
            g_settings.audio.resampler,
            audio->ratio);
   }
   else
   {
      audio->codec->sample_fmt = AV_SAMPLE_FMT_S16;
      audio->codec->sample_rate = (int)roundf(param->samplerate);
      audio->codec->time_base = av_d2q(1.0 / param->samplerate, 1000000);
   }

   if (params->audio_qscale)
   {
      audio->codec->flags |= CODEC_FLAG_QSCALE;
      audio->codec->global_quality = params->audio_global_quality;
   }
   else if (params->audio_bit_rate)
      audio->codec->bit_rate = params->audio_bit_rate;

   // Allow experimental codecs.
   audio->codec->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;

   if (handle->muxer.ctx->oformat->flags & AVFMT_GLOBALHEADER)
      audio->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;

   if (avcodec_open2(audio->codec, codec, params->audio_opts ? &params->audio_opts : NULL) != 0)
      return false;

   if (!audio->codec->frame_size) // If not set (PCM), just set something.
      audio->codec->frame_size = 1024;

   audio->buffer = (uint8_t*)av_malloc(
         audio->codec->frame_size *
         audio->codec->channels *
         audio->sample_size);

   //RARCH_LOG("[FFmpeg]: Audio frame size: %d.\n", audio->codec->frame_size);

   if (!audio->buffer)
      return false;

   audio->outbuf_size = FF_MIN_BUFFER_SIZE;
   audio->outbuf = (uint8_t*)av_malloc(audio->outbuf_size);
   if (!audio->outbuf)
      return false;

   return true;
}
Ejemplo n.º 28
0
guacenc_video* guacenc_video_alloc(const char* path, const char* codec_name,
        int width, int height, int bitrate) {

    /* Pull codec based on name */
    AVCodec* codec = avcodec_find_encoder_by_name(codec_name);
    if (codec == NULL) {
        guacenc_log(GUAC_LOG_ERROR, "Failed to locate codec \"%s\".",
                codec_name);
        goto fail_codec;
    }

    /* Retrieve encoding context */
    AVCodecContext* context = avcodec_alloc_context3(codec);
    if (context == NULL) {
        guacenc_log(GUAC_LOG_ERROR, "Failed to allocate context for "
                "codec \"%s\".", codec_name);
        goto fail_context;
    }

    /* Init context with encoding parameters */
    context->bit_rate = bitrate;
    context->width = width;
    context->height = height;
    context->time_base = (AVRational) { 1, GUACENC_VIDEO_FRAMERATE };
    context->gop_size = 10;
    context->max_b_frames = 1;
    context->pix_fmt = AV_PIX_FMT_YUV420P;

    /* Open codec for use */
    if (avcodec_open2(context, codec, NULL) < 0) {
        guacenc_log(GUAC_LOG_ERROR, "Failed to open codec \"%s\".", codec_name);
        goto fail_codec_open;
    }

    /* Allocate corresponding frame */
    AVFrame* frame = av_frame_alloc();
    if (frame == NULL) {
        goto fail_frame;
    }

    /* Copy necessary data for frame from context */
    frame->format = context->pix_fmt;
    frame->width = context->width;
    frame->height = context->height;

    /* Allocate actual backing data for frame */
    if (av_image_alloc(frame->data, frame->linesize, frame->width,
                frame->height, frame->format, 32) < 0) {
        goto fail_frame_data;
    }

    /* Open output file */
    int fd = open(path, O_CREAT | O_EXCL | O_WRONLY, S_IRUSR | S_IWUSR);
    if (fd == -1) {
        guacenc_log(GUAC_LOG_ERROR, "Failed to open output file \"%s\": %s",
                path, strerror(errno));
        goto fail_output_fd;
    }

    /* Create stream for output file */
    FILE* output = fdopen(fd, "wb");
    if (output == NULL) {
        guacenc_log(GUAC_LOG_ERROR, "Failed to allocate stream for output "
                "file \"%s\": %s", path, strerror(errno));
        goto fail_output_file;
    }

    /* Allocate video structure */
    guacenc_video* video = malloc(sizeof(guacenc_video));
    if (video == NULL) {
        goto fail_video;
    }

    /* Init properties of video */
    video->output = output;
    video->context = context;
    video->next_frame = frame;
    video->width = width;
    video->height = height;
    video->bitrate = bitrate;

    /* No frames have been written or prepared yet */
    video->last_timestamp = 0;
    video->next_pts = 0;

    return video;

    /* Free all allocated data in case of failure */
fail_video:
    fclose(output);

fail_output_file:
    close(fd);

fail_output_fd:
    av_freep(&frame->data[0]);

fail_frame_data:
    av_frame_free(&frame);

fail_frame:
fail_codec_open:
    avcodec_free_context(&context);

fail_context:
fail_codec:
    return NULL;

}
Ejemplo n.º 29
0
bool FFmpegEncoderSetAudio(struct FFmpegEncoder* encoder, const char* acodec, unsigned abr) {
	static const struct {
		int format;
		int priority;
	} priorities[] = {
		{ AV_SAMPLE_FMT_S16, 0 },
		{ AV_SAMPLE_FMT_S16P, 1 },
		{ AV_SAMPLE_FMT_S32, 2 },
		{ AV_SAMPLE_FMT_S32P, 2 },
		{ AV_SAMPLE_FMT_FLT, 3 },
		{ AV_SAMPLE_FMT_FLTP, 3 },
		{ AV_SAMPLE_FMT_DBL, 4 },
		{ AV_SAMPLE_FMT_DBLP, 4 }
	};

	if (!acodec) {
		encoder->audioCodec = 0;
		return true;
	}

	AVCodec* codec = avcodec_find_encoder_by_name(acodec);
	if (!codec) {
		return false;
	}

	if (!codec->sample_fmts) {
		return false;
	}
	size_t i;
	size_t j;
	int priority = INT_MAX;
	encoder->sampleFormat = AV_SAMPLE_FMT_NONE;
	for (i = 0; codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; ++i) {
		for (j = 0; j < sizeof(priorities) / sizeof(*priorities); ++j) {
			if (codec->sample_fmts[i] == priorities[j].format && priority > priorities[j].priority) {
				priority = priorities[j].priority;
				encoder->sampleFormat = codec->sample_fmts[i];
			}
		}
	}
	if (encoder->sampleFormat == AV_SAMPLE_FMT_NONE) {
		return false;
	}
	encoder->sampleRate = PREFERRED_SAMPLE_RATE;
	if (codec->supported_samplerates) {
		for (i = 0; codec->supported_samplerates[i]; ++i) {
			if (codec->supported_samplerates[i] < PREFERRED_SAMPLE_RATE) {
				continue;
			}
			if (encoder->sampleRate == PREFERRED_SAMPLE_RATE || encoder->sampleRate > codec->supported_samplerates[i]) {
				encoder->sampleRate = codec->supported_samplerates[i];
			}
		}
	} else if (codec->id == AV_CODEC_ID_AAC) {
		// HACK: AAC doesn't support 32768Hz (it rounds to 32000), but libfaac doesn't tell us that
		encoder->sampleRate = 44100;
	}
	encoder->audioCodec = acodec;
	encoder->audioBitrate = abr;
	return true;
}
Ejemplo n.º 30
0
static bool ffmpeg_init_video(ffmpeg_t *handle)
{
   struct ff_config_param *params = &handle->config;
   struct ff_video_info *video    = &handle->video;
   struct ffemu_params *param     = &handle->params;

   AVCodec *codec = NULL;

   if (*params->vcodec)
      codec = avcodec_find_encoder_by_name(params->vcodec);
   else
   {
      // By default, lossless video.
      av_dict_set(&params->video_opts, "qp", "0", 0);
      codec = avcodec_find_encoder_by_name("libx264rgb");
   }

   if (!codec)
   {
      RARCH_ERR("[FFmpeg]: Cannot find vcodec %s.\n", *params->vcodec ? params->vcodec : "libx264rgb");
      return false;
   }

   video->encoder = codec;

   // Don't use swscaler unless format is not something "in-house" scaler supports.
   // libswscale doesn't scale RGB -> RGB correctly (goes via YUV first), and it's non-trivial to fix
   // upstream as it's heavily geared towards YUV.
   // If we're dealing with strange formats or YUV, just use libswscale.
   if (params->out_pix_fmt != PIX_FMT_NONE)
   {
      video->pix_fmt = params->out_pix_fmt;
      if (video->pix_fmt != PIX_FMT_BGR24 && video->pix_fmt != PIX_FMT_RGB32)
         video->use_sws = true;

      switch (video->pix_fmt)
      {
         case PIX_FMT_BGR24:
            video->scaler.out_fmt = SCALER_FMT_BGR24;
            break;

         case PIX_FMT_RGB32:
            video->scaler.out_fmt = SCALER_FMT_ARGB8888;
            break;

         default:
            break;
      }
   }
   else // Use BGR24 as default out format.
   {
      video->pix_fmt        = PIX_FMT_BGR24;
      video->scaler.out_fmt = SCALER_FMT_BGR24;
   }

   switch (param->pix_fmt)
   {
      case FFEMU_PIX_RGB565:
         video->scaler.in_fmt = SCALER_FMT_RGB565;
         video->in_pix_fmt    = PIX_FMT_RGB565;
         video->pix_size      = 2;
         break;

      case FFEMU_PIX_BGR24:
         video->scaler.in_fmt = SCALER_FMT_BGR24;
         video->in_pix_fmt    = PIX_FMT_BGR24;
         video->pix_size      = 3;
         break;

      case FFEMU_PIX_ARGB8888:
         video->scaler.in_fmt = SCALER_FMT_ARGB8888;
         video->in_pix_fmt    = PIX_FMT_RGB32;
         video->pix_size      = 4;
         break;

      default:
         return false;
   }

   video->codec = avcodec_alloc_context3(codec);

   // Useful to set scale_factor to 2 for chroma subsampled formats to maintain full chroma resolution.
   // (Or just use 4:4:4 or RGB ...)
   param->out_width  *= params->scale_factor;
   param->out_height *= params->scale_factor;

   video->codec->codec_type          = AVMEDIA_TYPE_VIDEO;
   video->codec->width               = param->out_width;
   video->codec->height              = param->out_height;
   video->codec->time_base           = av_d2q((double)params->frame_drop_ratio / param->fps, 1000000); // Arbitrary big number.
   video->codec->sample_aspect_ratio = av_d2q(param->aspect_ratio * param->out_height / param->out_width, 255);
   video->codec->pix_fmt             = video->pix_fmt;

   video->codec->thread_count = params->threads;

   if (params->video_qscale)
   {
      video->codec->flags |= CODEC_FLAG_QSCALE;
      video->codec->global_quality = params->video_global_quality;
   }
   else if (params->video_bit_rate)
      video->codec->bit_rate = params->video_bit_rate;

   if (handle->muxer.ctx->oformat->flags & AVFMT_GLOBALHEADER)
      video->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;

   if (avcodec_open2(video->codec, codec, params->video_opts ? &params->video_opts : NULL) != 0)
      return false;

   // Allocate a big buffer :p ffmpeg API doesn't seem to give us some clues how big this buffer should be.
   video->outbuf_size = 1 << 23;
   video->outbuf = (uint8_t*)av_malloc(video->outbuf_size);

   video->frame_drop_ratio = params->frame_drop_ratio;

   size_t size = avpicture_get_size(video->pix_fmt, param->out_width, param->out_height);
   video->conv_frame_buf = (uint8_t*)av_malloc(size);
   video->conv_frame = av_frame_alloc();
   avpicture_fill((AVPicture*)video->conv_frame, video->conv_frame_buf, video->pix_fmt,
         param->out_width, param->out_height);

   return true;
}