Пример #1
0
static int decode_packet(int *got_frame, int cached)
{
    int ret = 0;

	if (pkt.stream_index != audio_stream_idx)
		return 0;

	while (1) {
        /* decode audio frame */
        ret = avcodec_decode_audio4(audio_dec_ctx, frame, got_frame, &pkt);
        if (ret < 0) {
            fprintf(stderr, "Error decoding audio frame\n");
            return ret;
        }

		pkt.data += ret;
        pkt.size -= ret;

        if (*got_frame) {
            printf("audio_frame%s n:%d nb_samples:%d pts:%.3f sec\r",
                   cached ? "(cached)" : "",
                   audio_frame_count++, frame->nb_samples,
				   pkt.pts * av_q2d(audio_stream->time_base));

			if (frame->channel_layout != audio_channel_layout || frame->channels != audio_channels) {
				char frame_channel_layout_name[64] = {0};
				char audio_channel_layout_name[64] = {0};
				av_get_channel_layout_string(frame_channel_layout_name, 64, frame->channels, frame->channel_layout);
				av_get_channel_layout_string(audio_channel_layout_name, 64, audio_channels, audio_channel_layout);
				printf("frame NOT match prop %I64d(%s) %d -> %I64d(%s) %d\n",
					audio_channel_layout, audio_channel_layout_name, audio_channels, 
					frame->channel_layout, frame_channel_layout_name, frame->channels);
			}
			else {
				// resample
				if (swr_ctx != NULL) {
					int32_t sampleInCount = frame->nb_samples;
					int sampleOutCount = (int)av_rescale_rnd(
						swr_get_delay(swr_ctx, frame->sample_rate) + sampleInCount,
						frame->sample_rate,
						frame->sample_rate,
						AV_ROUND_UP);

					int sampleCountOutput = swr_convert(swr_ctx,
						(uint8_t**)(&audio_dst_data), sampleOutCount,
						(const uint8_t**)(frame->extended_data), sampleInCount);
					if (sampleCountOutput < 0) {
						printf("Audio convert sampleformat failed, ret %d\n", sampleCountOutput);
						return -1;
					}
				
					audio_buffer_size = sampleCountOutput * 2 * 2;
					int written = audio_fifo.write((char *)audio_dst_data, audio_buffer_size);
					//printf("swr output: sample:%d, size:%d, written %d\n", sampleCountOutput, audio_buffer_size, written);
				}
				else {
					int written = audio_fifo.write((char *)frame->data, frame->linesize[0]);
					//printf("swr output: sample:%d, size:%d, written %d\n", sampleCountOutput, audio_buffer_size, written);
				}
			}
        }

		/* packet has no more data, decode next packet. */
		if (pkt.size <= 0)
			break;
    }

    return ret;
}
Пример #2
0
static int flv_write_header(AVFormatContext *s)
{
    ByteIOContext *pb = s->pb;
    FLVContext *flv = s->priv_data;
    int i, width, height, samplerate, samplesize, channels, audiocodecid, videocodecid;
    double framerate = 0.0;
    int metadata_size_pos, data_size;

    flv->hasAudio = 0;
    flv->hasVideo = 0;

    for(i=0; i<s->nb_streams; i++){
        AVCodecContext *enc = s->streams[i]->codec;
        if (enc->codec_type == CODEC_TYPE_VIDEO) {
            width = enc->width;
            height = enc->height;
            if (s->streams[i]->r_frame_rate.den && s->streams[i]->r_frame_rate.num) {
                framerate = av_q2d(s->streams[i]->r_frame_rate);
            } else {
                framerate = 1/av_q2d(s->streams[i]->codec->time_base);
            }
            flv->hasVideo=1;

            videocodecid = enc->codec_tag;
            if(videocodecid == 0) {
                av_log(enc, AV_LOG_ERROR, "video codec not compatible with flv\n");
                return -1;
            }
        } else {
            flv->hasAudio=1;
            samplerate = enc->sample_rate;
            channels = enc->channels;

            audiocodecid = enc->codec_tag;
            samplesize = (enc->codec_id == CODEC_ID_PCM_S8) ? 8 : 16;

            if(get_audio_flags(enc)<0)
                return -1;
        }
        av_set_pts_info(s->streams[i], 24, 1, 1000); /* 24 bit pts in ms */
    }
    put_tag(pb,"FLV");
    put_byte(pb,1);
    put_byte(pb,   FLV_HEADER_FLAG_HASAUDIO * flv->hasAudio
                 + FLV_HEADER_FLAG_HASVIDEO * flv->hasVideo);
    put_be32(pb,9);
    put_be32(pb,0);

    for(i=0; i<s->nb_streams; i++){
        if(s->streams[i]->codec->codec_tag == 5){
            put_byte(pb,8); // message type
            put_be24(pb,0); // include flags
            put_be24(pb,0); // time stamp
            put_be32(pb,0); // reserved
            put_be32(pb,11); // size
            flv->reserved=5;
        }
    }

    /* write meta_tag */
    put_byte(pb, 18);         // tag type META
    metadata_size_pos= url_ftell(pb);
    put_be24(pb, 0);          // size of data part (sum of all parts below)
    put_be24(pb, 0);          // time stamp
    put_be32(pb, 0);          // reserved

    /* now data of data_size size */

    /* first event name as a string */
    put_byte(pb, AMF_DATA_TYPE_STRING);
    put_amf_string(pb, "onMetaData"); // 12 bytes

    /* mixed array (hash) with size and string/type/data tuples */
    put_byte(pb, AMF_DATA_TYPE_MIXEDARRAY);
    put_be32(pb, 5*flv->hasVideo + 4*flv->hasAudio + 2); // +2 for duration and file size

    put_amf_string(pb, "duration");
    flv->duration_offset= url_ftell(pb);
    put_amf_double(pb, 0); // delayed write

    if(flv->hasVideo){
        put_amf_string(pb, "width");
        put_amf_double(pb, width);

        put_amf_string(pb, "height");
        put_amf_double(pb, height);

        put_amf_string(pb, "videodatarate");
        put_amf_double(pb, s->bit_rate / 1024.0);

        put_amf_string(pb, "framerate");
        put_amf_double(pb, framerate);

        put_amf_string(pb, "videocodecid");
        put_amf_double(pb, videocodecid);
    }

    if(flv->hasAudio){
        put_amf_string(pb, "audiosamplerate");
        put_amf_double(pb, samplerate);

        put_amf_string(pb, "audiosamplesize");
        put_amf_double(pb, samplesize);

        put_amf_string(pb, "stereo");
        put_amf_bool(pb, (channels == 2));

        put_amf_string(pb, "audiocodecid");
        put_amf_double(pb, audiocodecid);
    }

    put_amf_string(pb, "filesize");
    flv->filesize_offset= url_ftell(pb);
    put_amf_double(pb, 0); // delayed write

    put_amf_string(pb, "");
    put_byte(pb, AMF_END_OF_OBJECT);

    /* write total size of tag */
    data_size= url_ftell(pb) - metadata_size_pos - 10;
    url_fseek(pb, metadata_size_pos, SEEK_SET);
    put_be24(pb, data_size);
    url_fseek(pb, data_size + 10 - 3, SEEK_CUR);
    put_be32(pb, data_size + 11);

    return 0;
}
Пример #3
0
int ff_rate_control_init(MpegEncContext *s)
{
    RateControlContext *rcc= &s->rc_context;
    int i, res;
    static const char * const const_names[]={
        "PI",
        "E",
        "iTex",
        "pTex",
        "tex",
        "mv",
        "fCode",
        "iCount",
        "mcVar",
        "var",
        "isI",
        "isP",
        "isB",
        "avgQP",
        "qComp",
/*        "lastIQP",
        "lastPQP",
        "lastBQP",
        "nextNonBQP",*/
        "avgIITex",
        "avgPITex",
        "avgPPTex",
        "avgBPTex",
        "avgTex",
        NULL
    };
    static double (* const func1[])(void *, double)={
        (void *)bits2qp,
        (void *)qp2bits,
        NULL
    };
    static const char * const func1_names[]={
        "bits2qp",
        "qp2bits",
        NULL
    };
    emms_c();

    res = av_parse_expr(&rcc->rc_eq_eval, s->avctx->rc_eq ? s->avctx->rc_eq : "tex^qComp", const_names, func1_names, func1, NULL, NULL, 0, s->avctx);
    if (res < 0) {
        av_log(s->avctx, AV_LOG_ERROR, "Error parsing rc_eq \"%s\"\n", s->avctx->rc_eq);
        return res;
    }

    for(i=0; i<5; i++){
        rcc->pred[i].coeff= FF_QP2LAMBDA * 7.0;
        rcc->pred[i].count= 1.0;

        rcc->pred[i].decay= 0.4;
        rcc->i_cplx_sum [i]=
        rcc->p_cplx_sum [i]=
        rcc->mv_bits_sum[i]=
        rcc->qscale_sum [i]=
        rcc->frame_count[i]= 1; // 1 is better because of 1/0 and such
        rcc->last_qscale_for[i]=FF_QP2LAMBDA * 5;
    }
    rcc->buffer_index= s->avctx->rc_initial_buffer_occupancy;

    if(s->flags&CODEC_FLAG_PASS2){
        int i;
        char *p;

        /* find number of pics */
        p= s->avctx->stats_in;
        for(i=-1; p; i++){
            p= strchr(p+1, ';');
        }
        i+= s->max_b_frames;
        if(i<=0 || i>=INT_MAX / sizeof(RateControlEntry))
            return -1;
        rcc->entry = av_mallocz(i*sizeof(RateControlEntry));
        rcc->num_entries= i;

        /* init all to skipped p frames (with b frames we might have a not encoded frame at the end FIXME) */
        for(i=0; i<rcc->num_entries; i++){
            RateControlEntry *rce= &rcc->entry[i];
            rce->pict_type= rce->new_pict_type=FF_P_TYPE;
            rce->qscale= rce->new_qscale=FF_QP2LAMBDA * 2;
            rce->misc_bits= s->mb_num + 10;
            rce->mb_var_sum= s->mb_num*100;
        }

        /* read stats */
        p= s->avctx->stats_in;
        for(i=0; i<rcc->num_entries - s->max_b_frames; i++){
            RateControlEntry *rce;
            int picture_number;
            int e;
            char *next;

            next= strchr(p, ';');
            if(next){
                (*next)=0; //sscanf in unbelievably slow on looong strings //FIXME copy / do not write
                next++;
            }
            e= sscanf(p, " in:%d ", &picture_number);

            assert(picture_number >= 0);
            assert(picture_number < rcc->num_entries);
            rce= &rcc->entry[picture_number];

            e+=sscanf(p, " in:%*d out:%*d type:%d q:%f itex:%d ptex:%d mv:%d misc:%d fcode:%d bcode:%d mc-var:%d var:%d icount:%d skipcount:%d hbits:%d",
                   &rce->pict_type, &rce->qscale, &rce->i_tex_bits, &rce->p_tex_bits, &rce->mv_bits, &rce->misc_bits,
                   &rce->f_code, &rce->b_code, &rce->mc_mb_var_sum, &rce->mb_var_sum, &rce->i_count, &rce->skip_count, &rce->header_bits);
            if(e!=14){
                av_log(s->avctx, AV_LOG_ERROR, "statistics are damaged at line %d, parser out=%d\n", i, e);
                return -1;
            }

            p= next;
        }

        if(init_pass2(s) < 0) return -1;

        //FIXME maybe move to end
        if((s->flags&CODEC_FLAG_PASS2) && s->avctx->rc_strategy == FF_RC_STRATEGY_XVID) {
#if CONFIG_LIBXVID
            return ff_xvid_rate_control_init(s);
#else
            av_log(s->avctx, AV_LOG_ERROR, "Xvid ratecontrol requires libavcodec compiled with Xvid support.\n");
            return -1;
#endif
        }
    }

    if(!(s->flags&CODEC_FLAG_PASS2)){

        rcc->short_term_qsum=0.001;
        rcc->short_term_qcount=0.001;

        rcc->pass1_rc_eq_output_sum= 0.001;
        rcc->pass1_wanted_bits=0.001;

        if(s->avctx->qblur > 1.0){
            av_log(s->avctx, AV_LOG_ERROR, "qblur too large\n");
            return -1;
        }
        /* init stuff with the user specified complexity */
        if(s->avctx->rc_initial_cplx){
            for(i=0; i<60*30; i++){
                double bits= s->avctx->rc_initial_cplx * (i/10000.0 + 1.0)*s->mb_num;
                RateControlEntry rce;

                if     (i%((s->gop_size+3)/4)==0) rce.pict_type= FF_I_TYPE;
                else if(i%(s->max_b_frames+1))    rce.pict_type= FF_B_TYPE;
                else                              rce.pict_type= FF_P_TYPE;

                rce.new_pict_type= rce.pict_type;
                rce.mc_mb_var_sum= bits*s->mb_num/100000;
                rce.mb_var_sum   = s->mb_num;
                rce.qscale   = FF_QP2LAMBDA * 2;
                rce.f_code   = 2;
                rce.b_code   = 1;
                rce.misc_bits= 1;

                if(s->pict_type== FF_I_TYPE){
                    rce.i_count   = s->mb_num;
                    rce.i_tex_bits= bits;
                    rce.p_tex_bits= 0;
                    rce.mv_bits= 0;
                }else{
                    rce.i_count   = 0; //FIXME we do know this approx
                    rce.i_tex_bits= 0;
                    rce.p_tex_bits= bits*0.9;
                    rce.mv_bits= bits*0.1;
                }
                rcc->i_cplx_sum [rce.pict_type] += rce.i_tex_bits*rce.qscale;
                rcc->p_cplx_sum [rce.pict_type] += rce.p_tex_bits*rce.qscale;
                rcc->mv_bits_sum[rce.pict_type] += rce.mv_bits;
                rcc->frame_count[rce.pict_type] ++;

                get_qscale(s, &rce, rcc->pass1_wanted_bits/rcc->pass1_rc_eq_output_sum, i);
                rcc->pass1_wanted_bits+= s->bit_rate/(1/av_q2d(s->avctx->time_base)); //FIXME misbehaves a little for variable fps
            }
        }

    }

    return 0;
}
Пример #4
0
static ImBuf *ffmpeg_fetchibuf(struct anim *anim, int position,
                               IMB_Timecode_Type tc)
{
    int64_t pts_to_search = 0;
    double frame_rate;
    double pts_time_base;
    long long st_time;
    struct anim_index *tc_index = 0;
    AVStream *v_st;
    int new_frame_index = 0; /* To quiet gcc barking... */
    int old_frame_index = 0; /* To quiet gcc barking... */

    if (anim == NULL) return (0);

    av_log(anim->pFormatCtx, AV_LOG_DEBUG, "FETCH: pos=%d\n", position);

    if (tc != IMB_TC_NONE) {
        tc_index = IMB_anim_open_index(anim, tc);
    }

    v_st = anim->pFormatCtx->streams[anim->videoStream];

    frame_rate = av_q2d(av_get_r_frame_rate_compat(v_st));

    st_time = anim->pFormatCtx->start_time;
    pts_time_base = av_q2d(v_st->time_base);

    if (tc_index) {
        new_frame_index = IMB_indexer_get_frame_index(
                              tc_index, position);
        old_frame_index = IMB_indexer_get_frame_index(
                              tc_index, anim->curposition);
        pts_to_search = IMB_indexer_get_pts(
                            tc_index, new_frame_index);
    }
    else {
        pts_to_search = (long long)
                        floor(((double) position) /
                              pts_time_base / frame_rate + 0.5);

        if (st_time != AV_NOPTS_VALUE) {
            pts_to_search += st_time / pts_time_base / AV_TIME_BASE;
        }
    }

    av_log(anim->pFormatCtx, AV_LOG_DEBUG,
           "FETCH: looking for PTS=%lld "
           "(pts_timebase=%g, frame_rate=%g, st_time=%lld)\n",
           (long long int)pts_to_search, pts_time_base, frame_rate, st_time);

    if (anim->last_frame &&
            anim->last_pts <= pts_to_search && anim->next_pts > pts_to_search)
    {
        av_log(anim->pFormatCtx, AV_LOG_DEBUG,
               "FETCH: frame repeat: last: %lld next: %lld\n",
               (long long int)anim->last_pts,
               (long long int)anim->next_pts);
        IMB_refImBuf(anim->last_frame);
        anim->curposition = position;
        return anim->last_frame;
    }

    if (position > anim->curposition + 1 &&
            anim->preseek &&
            !tc_index &&
            position - (anim->curposition + 1) < anim->preseek)
    {
        av_log(anim->pFormatCtx, AV_LOG_DEBUG,
               "FETCH: within preseek interval (no index)\n");

        ffmpeg_decode_video_frame_scan(anim, pts_to_search);
    }
    else if (tc_index &&
             IMB_indexer_can_scan(tc_index, old_frame_index,
                                  new_frame_index))
    {
        av_log(anim->pFormatCtx, AV_LOG_DEBUG,
               "FETCH: within preseek interval "
               "(index tells us)\n");

        ffmpeg_decode_video_frame_scan(anim, pts_to_search);
    }
    else if (position != anim->curposition + 1) {
        long long pos;
        int ret;

        if (tc_index) {
            unsigned long long dts;

            pos = IMB_indexer_get_seek_pos(
                      tc_index, new_frame_index);
            dts = IMB_indexer_get_seek_pos_dts(
                      tc_index, new_frame_index);

            av_log(anim->pFormatCtx, AV_LOG_DEBUG,
                   "TC INDEX seek pos = %lld\n", pos);
            av_log(anim->pFormatCtx, AV_LOG_DEBUG,
                   "TC INDEX seek dts = %llu\n", dts);

            if (ffmpeg_seek_by_byte(anim->pFormatCtx)) {
                av_log(anim->pFormatCtx, AV_LOG_DEBUG,
                       "... using BYTE pos\n");

                ret = av_seek_frame(anim->pFormatCtx,
                                    -1,
                                    pos, AVSEEK_FLAG_BYTE);
                av_update_cur_dts(anim->pFormatCtx, v_st, dts);
            }
            else {
                av_log(anim->pFormatCtx, AV_LOG_DEBUG,
                       "... using DTS pos\n");
                ret = av_seek_frame(anim->pFormatCtx,
                                    anim->videoStream,
                                    dts, AVSEEK_FLAG_BACKWARD);
            }
        }
        else {
            pos = (long long) (position - anim->preseek) *
                  AV_TIME_BASE / frame_rate;

            av_log(anim->pFormatCtx, AV_LOG_DEBUG,
                   "NO INDEX seek pos = %lld, st_time = %lld\n",
                   pos, (st_time != AV_NOPTS_VALUE) ? st_time : 0);

            if (pos < 0) {
                pos = 0;
            }

            if (st_time != AV_NOPTS_VALUE) {
                pos += st_time;
            }

            av_log(anim->pFormatCtx, AV_LOG_DEBUG,
                   "NO INDEX final seek pos = %lld\n", pos);

            ret = av_seek_frame(anim->pFormatCtx, -1,
                                pos, AVSEEK_FLAG_BACKWARD);
        }

        if (ret < 0) {
            av_log(anim->pFormatCtx, AV_LOG_ERROR,
                   "FETCH: "
                   "error while seeking to DTS = %lld "
                   "(frameno = %d, PTS = %lld): errcode = %d\n",
                   pos, position, (long long int)pts_to_search, ret);
        }

        avcodec_flush_buffers(anim->pCodecCtx);

        anim->next_pts = -1;

        if (anim->next_packet.stream_index == anim->videoStream) {
            av_free_packet(&anim->next_packet);
            anim->next_packet.stream_index = -1;
        }

        /* memset(anim->pFrame, ...) ?? */

        if (ret >= 0) {
            ffmpeg_decode_video_frame_scan(anim, pts_to_search);
        }
    }
    else if (position == 0 && anim->curposition == -1) {
        /* first frame without seeking special case... */
        ffmpeg_decode_video_frame(anim);
    }
    else {
        av_log(anim->pFormatCtx, AV_LOG_DEBUG,
               "FETCH: no seek necessary, just continue...\n");
    }

    IMB_freeImBuf(anim->last_frame);
    anim->last_frame = IMB_allocImBuf(anim->x, anim->y, 32, IB_rect);
    anim->last_frame->rect_colorspace = colormanage_colorspace_get_named(anim->colorspace);

    ffmpeg_postprocess(anim);

    anim->last_pts = anim->next_pts;

    ffmpeg_decode_video_frame(anim);

    anim->curposition = position;

    IMB_refImBuf(anim->last_frame);

    return anim->last_frame;
}
Пример #5
0
static int init_pass2(MpegEncContext *s)
{
    RateControlContext *rcc = &s->rc_context;
    AVCodecContext *a       = s->avctx;
    int i, toobig;
    double fps             = 1 / av_q2d(s->avctx->time_base);
    double complexity[5]   = { 0 }; // approximate bits at quant=1
    uint64_t const_bits[5] = { 0 }; // quantizer independent bits
    uint64_t all_const_bits;
    uint64_t all_available_bits = (uint64_t)(s->bit_rate *
                                             (double)rcc->num_entries / fps);
    double rate_factor          = 0;
    double step;
    const int filter_size = (int)(a->qblur * 4) | 1;
    double expected_bits;
    double *qscale, *blurred_qscale, qscale_sum;

    /* find complexity & const_bits & decide the pict_types */
    for (i = 0; i < rcc->num_entries; i++) {
        RateControlEntry *rce = &rcc->entry[i];

        rce->new_pict_type                = rce->pict_type;
        rcc->i_cplx_sum[rce->pict_type]  += rce->i_tex_bits * rce->qscale;
        rcc->p_cplx_sum[rce->pict_type]  += rce->p_tex_bits * rce->qscale;
        rcc->mv_bits_sum[rce->pict_type] += rce->mv_bits;
        rcc->frame_count[rce->pict_type]++;

        complexity[rce->new_pict_type] += (rce->i_tex_bits + rce->p_tex_bits) *
                                          (double)rce->qscale;
        const_bits[rce->new_pict_type] += rce->mv_bits + rce->misc_bits;
    }

    all_const_bits = const_bits[AV_PICTURE_TYPE_I] +
                     const_bits[AV_PICTURE_TYPE_P] +
                     const_bits[AV_PICTURE_TYPE_B];

    if (all_available_bits < all_const_bits) {
        av_log(s->avctx, AV_LOG_ERROR, "requested bitrate is too low\n");
        return -1;
    }

    qscale         = av_malloc(sizeof(double) * rcc->num_entries);
    blurred_qscale = av_malloc(sizeof(double) * rcc->num_entries);
    toobig = 0;

    for (step = 256 * 256; step > 0.0000001; step *= 0.5) {
        expected_bits = 0;
        rate_factor  += step;

        rcc->buffer_index = s->avctx->rc_buffer_size / 2;

        /* find qscale */
        for (i = 0; i < rcc->num_entries; i++) {
            RateControlEntry *rce = &rcc->entry[i];

            qscale[i] = get_qscale(s, &rcc->entry[i], rate_factor, i);
            rcc->last_qscale_for[rce->pict_type] = qscale[i];
        }
        assert(filter_size % 2 == 1);

        /* fixed I/B QP relative to P mode */
        for (i = rcc->num_entries - 1; i >= 0; i--) {
            RateControlEntry *rce = &rcc->entry[i];

            qscale[i] = get_diff_limited_q(s, rce, qscale[i]);
        }

        /* smooth curve */
        for (i = 0; i < rcc->num_entries; i++) {
            RateControlEntry *rce = &rcc->entry[i];
            const int pict_type   = rce->new_pict_type;
            int j;
            double q = 0.0, sum = 0.0;

            for (j = 0; j < filter_size; j++) {
                int index    = i + j - filter_size / 2;
                double d     = index - i;
                double coeff = a->qblur == 0 ? 1.0 : exp(-d * d / (a->qblur * a->qblur));

                if (index < 0 || index >= rcc->num_entries)
                    continue;
                if (pict_type != rcc->entry[index].new_pict_type)
                    continue;
                q   += qscale[index] * coeff;
                sum += coeff;
            }
            blurred_qscale[i] = q / sum;
        }

        /* find expected bits */
        for (i = 0; i < rcc->num_entries; i++) {
            RateControlEntry *rce = &rcc->entry[i];
            double bits;

            rce->new_qscale = modify_qscale(s, rce, blurred_qscale[i], i);

            bits  = qp2bits(rce, rce->new_qscale) + rce->mv_bits + rce->misc_bits;
            bits += 8 * ff_vbv_update(s, bits);

            rce->expected_bits = expected_bits;
            expected_bits     += bits;
        }

        av_dlog(s->avctx,
                "expected_bits: %f all_available_bits: %d rate_factor: %f\n",
                expected_bits, (int)all_available_bits, rate_factor);
        if (expected_bits > all_available_bits) {
            rate_factor -= step;
            ++toobig;
        }
    }
    av_free(qscale);
    av_free(blurred_qscale);

    /* check bitrate calculations and print info */
    qscale_sum = 0.0;
    for (i = 0; i < rcc->num_entries; i++) {
        av_dlog(s, "[lavc rc] entry[%d].new_qscale = %.3f  qp = %.3f\n",
                i,
                rcc->entry[i].new_qscale,
                rcc->entry[i].new_qscale / FF_QP2LAMBDA);
        qscale_sum += av_clip(rcc->entry[i].new_qscale / FF_QP2LAMBDA,
                              s->avctx->qmin, s->avctx->qmax);
    }
    assert(toobig <= 40);
    av_log(s->avctx, AV_LOG_DEBUG,
           "[lavc rc] requested bitrate: %d bps  expected bitrate: %d bps\n",
           s->bit_rate,
           (int)(expected_bits / ((double)all_available_bits / s->bit_rate)));
    av_log(s->avctx, AV_LOG_DEBUG,
           "[lavc rc] estimated target average qp: %.3f\n",
           (float)qscale_sum / rcc->num_entries);
    if (toobig == 0) {
        av_log(s->avctx, AV_LOG_INFO,
               "[lavc rc] Using all of requested bitrate is not "
               "necessary for this video with these parameters.\n");
    } else if (toobig == 40) {
        av_log(s->avctx, AV_LOG_ERROR,
               "[lavc rc] Error: bitrate too low for this video "
               "with these parameters.\n");
        return -1;
    } else if (fabs(expected_bits / all_available_bits - 1.0) > 0.01) {
        av_log(s->avctx, AV_LOG_ERROR,
               "[lavc rc] Error: 2pass curve failed to converge\n");
        return -1;
    }

    return 0;
}
int seg_transcode_main(Segment_U * seg_union){

	Output_Context *ptr_output_ctx = seg_union->output_ctx;

    if (!(ptr_output_ctx->fmt->flags & AVFMT_NOFILE)) {		//for mp4 or mpegts ,this must be performed
        if (avio_open(&(ptr_output_ctx->ptr_format_ctx->pb), seg_union->ts_name, AVIO_FLAG_WRITE) < 0) {
            fprintf(stderr, "Could not open '%s'\n", seg_union->ts_name);
            exit(OPEN_MUX_FILE_FAIL);
        }
    }
    // write the stream header, if any
    avformat_write_header(ptr_output_ctx->ptr_format_ctx ,NULL);
    write_m3u8_header(ptr_output_ctx);
    int i ;
    for(i = 0 ; i < seg_union->input_nb ; i ++){

    	/*initialize input file information*/
    	init_input( seg_union->input_ctx ,seg_union->input_file[i]);
    	Input_Context *ptr_input_ctx = seg_union->input_ctx;

		ptr_output_ctx->img_convert_ctx = sws_getContext(
				ptr_input_ctx->video_codec_ctx->width ,ptr_input_ctx->video_codec_ctx->height ,PIX_FMT_YUV420P,
				 ptr_output_ctx->video_stream->codec->width ,ptr_output_ctx->video_stream->codec->height ,PIX_FMT_YUV420P ,
				 SWS_BICUBIC ,NULL ,NULL ,NULL);


		printf("src_width = %d ,src_height = %d \n" ,ptr_input_ctx->video_codec_ctx->width ,ptr_input_ctx->video_codec_ctx->height);
		printf("dts_width = %d ,dts_height = %d \n" ,ptr_output_ctx->video_stream->codec->width ,
				ptr_output_ctx->video_stream->codec->height);

		printf("before av_read_frame ...\n");
		/*************************************************************************************/
		/*decoder loop*/
		//
		//
		//***********************************************************************************/
		while(av_read_frame(ptr_input_ctx->ptr_format_ctx ,&ptr_input_ctx->pkt) >= 0){

			if (ptr_input_ctx->pkt.stream_index == ptr_input_ctx->video_index) {

				//decode video packet
				int got_picture = 0;
				avcodec_decode_video2(ptr_input_ctx->video_codec_ctx,
						ptr_input_ctx->yuv_frame, &got_picture, &ptr_input_ctx->pkt);

				if (got_picture) {
					//encode video
					//input stream 的问题。
					ptr_output_ctx->sync_ipts = av_q2d(ptr_input_ctx->ptr_format_ctx->streams[ptr_input_ctx->video_index]->time_base) *
							(ptr_input_ctx->yuv_frame->best_effort_timestamp  )
							- (double)ptr_input_ctx->ptr_format_ctx->start_time / AV_TIME_BASE
							+	ptr_output_ctx->base_ipts;    //current packet time in second

					//printf("ptr_output_ctx->sync_ipts = %f \n" ,ptr_output_ctx->sync_ipts);

					//first swscale
					sws_scale(ptr_output_ctx->img_convert_ctx ,
							(const uint8_t* const*)ptr_input_ctx->yuv_frame->data ,ptr_input_ctx->yuv_frame->linesize ,
							0 ,
							ptr_input_ctx->video_codec_ctx->height ,
							ptr_output_ctx->encoded_yuv_pict->data ,ptr_output_ctx->encoded_yuv_pict->linesize);

					//second swscale
					encode_video_frame(ptr_output_ctx , ptr_output_ctx->encoded_yuv_pict ,ptr_input_ctx );
				}

			} else if (ptr_input_ctx->pkt.stream_index == ptr_input_ctx->audio_index) {
				//printf("audio ...\n");
				//decode audio packet
				uint8_t *tmp_data = ptr_input_ctx->pkt.data;
				int tmp_size = ptr_input_ctx->pkt.size;
				while (ptr_input_ctx->pkt.size > 0) {
					int got_frame = 0;
					int len = avcodec_decode_audio4(ptr_input_ctx->audio_codec_ctx,
							ptr_input_ctx->audio_decode_frame, &got_frame,
							&ptr_input_ctx->pkt);

					if (len < 0) { //decode failed ,skip frame
						fprintf(stderr, "Error while decoding audio frame\n");
						break;
					}

					if (got_frame) {
						//encode the audio data ,and write the data into the output
//						do_audio_out(ptr_output_ctx ,ptr_input_ctx ,ptr_input_ctx->audio_decode_frame);
					} else { //no data
						printf("======>avcodec_decode_audio4 ,no data ..\n");
						continue;
					}

					ptr_input_ctx->pkt.size -= len;
					ptr_input_ctx->pkt.data += len;

				}

				//renew
				ptr_input_ctx->pkt.size = tmp_size;
				ptr_input_ctx->pkt.data = tmp_data;

			}

			if(&ptr_input_ctx->pkt)
				av_free_packet(&ptr_input_ctx->pkt);

		}//endwhile

		double file_duration = ptr_input_ctx->ptr_format_ctx->duration / AV_TIME_BASE
					+ (double)( ptr_input_ctx->ptr_format_ctx->duration % AV_TIME_BASE ) / AV_TIME_BASE;


		ptr_output_ctx->base_ipts  += file_duration;  //completed files sum time duration
		printf("end while ......,time_base = %f .............> \n" ,ptr_output_ctx->base_ipts  );
		ptr_output_ctx->audio_resample = 0;
		sws_freeContext(ptr_output_ctx->img_convert_ctx);
		free_input(ptr_input_ctx);
    } //end for

	printf("before flush ,ptr_output_ctx->ptr_format_ctx->nb_streams = %d \n\n" ,ptr_output_ctx->ptr_format_ctx->nb_streams);
	encode_flush(ptr_output_ctx ,ptr_output_ctx->ptr_format_ctx->nb_streams);

	write_m3u8_tailer(ptr_output_ctx);
	printf("before wirite tailer ...\n\n");
	av_write_trailer(ptr_output_ctx->ptr_format_ctx );

	return 0;
}
Пример #7
0
static double get_fps(AVCodecContext *avctx)
{
    return 1.0 / av_q2d(avctx->time_base) / FFMAX(avctx->ticks_per_frame, 1);
}
Пример #8
0
void VideoThread::readFrame()
{
// 	qDebug() << "VideoThread::readFrame(): start of function";
	if(!m_inited)
	{
// 		qDebug() << "VideoThread::readFrame(): not inited";
		//emit frameReady(m_nextDelay);
		return;
	}
	
	
	
	AVPacket pkt1, *packet = &pkt1;
	double pts;
	
// 	qDebug() << "VideoThread::readFrame(): killed: "<<m_killed;

	int frame_finished = 0;
	while(!frame_finished && !m_killed)
	{
		if(av_read_frame(m_av_format_context, packet) >= 0)
		{
			// Is this a packet from the video stream?
			if(packet->stream_index == m_video_stream)
			{
				//global_video_pkt_pts = packet->pts;
				#ifdef USE_DECODE_VID2
				avcodec_decode_video2(m_video_codec_context, m_av_frame, &frame_finished, packet);
				#else
				avcodec_decode_video(m_video_codec_context, m_av_frame, &frame_finished, packet->data, packet->size);
				#endif

				if(packet->dts == AV_NOPTS_VALUE &&
						  m_av_frame->opaque &&
				  *(uint64_t*)m_av_frame->opaque != AV_NOPTS_VALUE)
				{
					pts = *(uint64_t *)m_av_frame->opaque;
				}
				else if(packet->dts != AV_NOPTS_VALUE)
				{
					pts = packet->dts;
				}
				else
				{
					pts = 0;
				}

				pts *= av_q2d(m_timebase);

				// Did we get a video frame?
				if(frame_finished)
				{
					
					// Convert the image from its native format to RGB, then copy the image data to a QImage
					if(m_sws_context == NULL)
					{
						//mutex.lock();
						m_sws_context = sws_getContext(
							m_video_codec_context->width, m_video_codec_context->height,
							m_video_codec_context->pix_fmt,
							m_video_codec_context->width, m_video_codec_context->height,
							//PIX_FMT_RGB32,SWS_BICUBIC,
							//PIX_FMT_RGB565, SWS_FAST_BILINEAR,
							PIX_FMT_RGB32, SWS_FAST_BILINEAR,
							//PIX_FMT_YUV420P, SWS_FAST_BILINEAR,
							NULL, NULL, NULL); //SWS_PRINT_INFO
						//mutex.unlock();
						//printf("decode(): created m_sws_context\n");
					}
					//printf("decode(): got frame\n");

					sws_scale(m_sws_context,
						  m_av_frame->data,
						  m_av_frame->linesize, 0,
						  m_video_codec_context->height,
						  m_av_rgb_frame->data,
						  m_av_rgb_frame->linesize);

					//m_bufferMutex.lock();
// 					qDebug() << "VideoThread: void*:"<<(void*)m_av_rgb_frame->data[0];
					QImage frame = QImage(m_av_rgb_frame->data[0],
								m_video_codec_context->width,
								m_video_codec_context->height,
								//QImage::Format_RGB16);
								QImage::Format_ARGB32);
					//m_bufferMutex.unlock();
					
					av_free_packet(packet);

					// This block from the synchronize_video(VideoState *is, AVFrame *src_frame, double pts) : double
					// function given at: http://dranger.com/ffmpeg/tutorial05.html
					{
						// update the frame pts
						double frame_delay;

						if(pts != 0)
						{
							/* if we have pts, set video clock to it */
							m_video_clock = pts;
						} else {
							/* if we aren't given a pts, set it to the clock */
							pts = m_video_clock;
						}
						/* update the video clock */
						frame_delay = av_q2d(m_timebase);
						/* if we are repeating a frame, adjust clock accordingly */
						frame_delay += m_av_frame->repeat_pict * (frame_delay * 0.5);
						m_video_clock += frame_delay;
						//qDebug() << "Frame Dealy: "<<frame_delay<<", avq2d:"<<av_q2d(m_timebase)<<", repeat:"<<(m_av_frame->repeat_pict * (frame_delay * 0.5))<<", vidclock: "<<m_video_clock; 
					}


					//QFFMpegVideoFrame video_frame;
					//video_frame.frame = m_frame;
					//video_frame.pts = pts;
					//video_frame.previous_pts = m_previous_pts;

					//m_current_frame = video_frame;

					//emit newFrame(video_frame);
					//qDebug() << "emit newImage(), frameSize:"<<frame.size();
					//emit newImage(frame);
					
					
					//calculate the pts delay
					double pts_delay = pts - m_previous_pts;
					if((pts_delay < 0.0) || (pts_delay > 1.0))
						pts_delay = m_previous_pts_delay;
			
					m_previous_pts_delay = pts_delay;
					
					/* update delay to sync to audio */
			// 		double ref_clock = get_audio_clock(is);
			// 		double diff = vp->pts - ref_clock;
			
					
			
					//calculate the global delay
					//int global_delay = (m_current_frame.pts * 1000) - m_run_time.elapsed();
					//m_run_time.elapsed() / 1000; //
					//double curTime = (double)(av_gettime() / 1000000.0);
					double curTime = ((double)m_run_time.elapsed() + m_total_runtime) / 1000.0;
					m_frameTimer += pts_delay;
					if(m_frameTimer > curTime+.5)
						m_frameTimer = curTime;
					if(m_frameTimer < curTime-1.)
						m_frameTimer = curTime;
					double actual_delay = m_frameTimer - curTime;
					//qDebug() << "VideoThread::readFrame(): frame timer: "<<m_frameTimer<<", curTime:"<<curTime<<", \t actual_delay:"<<((int)(actual_delay*1000))<<", pts_delay:"<<((int)(pts_delay*1000))<<", m_run_time:"<<m_run_time.elapsed()<<", m_total_runtime:"<<m_total_runtime;
					if(actual_delay < 0.010)
					{
						// This should just skip this frame
			// 			qDebug() << "Skipping this frame, skips:"<<m_skipFrameCount;
			// 			if(status() == Running)
			// 				m_play_timer = startTimer(0);
			// 			return;
						
						actual_delay = 0.0;
						
						m_skipFrameCount ++;
						
						if(m_skipFrameCount > MAX_SKIPS_TILL_RESET)
						{
							m_skipFrameCount = 0;
							m_frameTimer = curTime - pts_delay;
			// 				qDebug() << "Reset frame timer";
						}
					}
					
					int frameDelay = (int)(actual_delay * 1000 + 0.5);
					
					// The count/accum pair is designed to average out sharp
					// jumps in frameDelay to make the resulting output appear
					// smoother
					m_frameSmoothCount ++;
					m_frameSmoothAccum += frameDelay;
					
					//frameDelay = m_frameSmoothAccum / m_frameSmoothCount;
					
					// Reset the averaging every 15 sec (approx)
					if( m_frameSmoothCount % 100 == 0)
					{
						m_frameSmoothCount = 0;
						m_frameSmoothAccum = 0;
						//qDebug() << "VideoThread::readFrame(): frame smoother reset";
					}
					
					// Arbitrary min/max delays - equals about a max fps of 66fps (15 ms) and a min fps of 9fps (111ms)
					if(frameDelay < 15)
						frameDelay = 15;
					if(frameDelay > 111)
						frameDelay = 111;
					//qDebug() << "VideoThread::readFrame(): frameDelay:"<<frameDelay;
					
// 					if(m_frameConsumed || (!m_frameConsumed && ++m_frameLockCount > 10))
// 					{
// 						m_frameLockCount = 0;
// 						m_frameConsumed = false;

						//m_time = QTime::currentTime(); 
						//QTimer::singleShot(frameDelay, this, SLOT(releaseCurrentFrame()));
						//emit frameReady((int)(pts_delay*1000));
						
						//enqueue(VideoFrame(m_frame,frameDelay));
						
						VideoFrame *frame2 = new VideoFrame(frame.convertToFormat(QImage::Format_ARGB32),pts_delay*1000);
						frame2->setCaptureTime(QTime::currentTime());
						enqueue(frame2);
						
// 						VideoFrame vidframe;
// 						vidframe.isRaw = true;
// 						vidframe.bufferType = VideoFrame::BUFFER_BYTEARRAY;
// 						const uchar *bits = frame.bits();
// 						vidframe.byteArray.append((const char*)bits, frame.byteCount());
// 						vidframe.holdTime = pts_delay*1000;
// 						vidframe.setSize(frame.size());
// 						enqueue(vidframe);
						
// 						VideoFrame vid(pts_delay*1000);
// 						QImage rgb32img = frame.convertToFormat(QImage::Format_ARGB32);
// 						vid.isRaw = true;
// 						vid.bufferType = VideoFrame::BUFFER_BYTEARRAY;
// 						vid.pixelFormat = QVideoFrame::Format_ARGB32;
// 						vid.setSize(m_frame_size);
// 						vid.byteArray.resize(rgb32img.byteCount());
// 						const uchar * bits = rgb32img.bits();
// 						vid.byteArray.append((const char*)bits, rgb32img.byteCount());
// 						enqueue(vid);
						
						//enqueue(VideoFrame(frame,pts_delay*1000));
// 						VideoFrame vid(pts_delay*1000);
// 						vid.isRaw = true;
// 						vid.bufferType = VideoFrame::BUFFER_BYTEARRAY;
// 						vid.pixelFormat = QVideoFrame::Format_YUV420P;
// 						//vid.setPointerData(m_av_rgb_frame->data, m_av_rgb_frame->linesize);
// 						vid.setSize(m_frame_size);
// 						int frame0 = m_frame_size.width() * m_frame_size.height();
// 						int frame1 = m_frame_size.width()/2 * m_frame_size.height()/2;
// 						int frame2 = m_frame_size.width()/2 * m_frame_size.height()/2;
// 						int maxSize = frame0 + frame1 + frame2;
// 						qDebug() << "frame0:"<<frame0<<", frame1:"<<frame1+frame0<<", frame2:"<<frame2+frame1+frame0<<m_frame_size;
// 						vid.byteArray.resize(maxSize);
// 						vid.byteArray.append((const char*)m_av_frame->data[0], frame0);
// 						vid.byteArray.append((const char*)m_av_frame->data[1], frame1);
// 						vid.byteArray.append((const char*)m_av_frame->data[2], frame2);
// 						
// 						enqueue(vid);
						
						
						
						//emit frameReady();
					//}
						
					m_nextDelay = frameDelay; //frameDelay * .5;
					//QTimer::singleShot(0, this, SLOT(updateTimer()));
					//updateTimer();
					
					m_previous_pts = pts;
				}

			}
			else if(packet->stream_index == m_audio_stream)
			{
// 				mutex.lock();
				//decode audio packet, store in queue
				av_free_packet(packet);
// 				mutex.unlock();

			}
			else
			{
// 				mutex.lock();
				av_free_packet(packet);
// 				mutex.unlock();

			}
		}
		else
		{
			//emit reachedEnd();
			//qDebug() << "reachedEnd()";
			restart();
			
		}
	}
	//qDebug() << "VideoThread::readFrame(): end of function";
}
Пример #9
0
/**
 * Initializes the gdi grab device demuxer (public device demuxer API).
 *
 * @param s1 Context from avformat core
 * @return AVERROR_IO error, 0 success
 */
static int
gdigrab_read_header(AVFormatContext *s1)
{
    struct gdigrab *gdigrab = s1->priv_data;

    HWND hwnd;
    HDC source_hdc = NULL;
    HDC dest_hdc   = NULL;
    BITMAPINFO bmi;
    HBITMAP hbmp   = NULL;
    void *buffer   = NULL;

    const char *filename = s1->filename;
    const char *name     = NULL;
    AVStream   *st       = NULL;

    int bpp;
    RECT virtual_rect;
    RECT clip_rect;
    BITMAP bmp;
    int ret;

    if (!strncmp(filename, "title=", 6)) {
        name = filename + 6;
        hwnd = FindWindow(NULL, name);
        if (!hwnd) {
            av_log(s1, AV_LOG_ERROR,
                   "Can't find window '%s', aborting.\n", name);
            ret = AVERROR(EIO);
            goto error;
        }
        if (gdigrab->show_region) {
            av_log(s1, AV_LOG_WARNING,
                    "Can't show region when grabbing a window.\n");
            gdigrab->show_region = 0;
        }
    } else if (!strcmp(filename, "desktop")) {
        hwnd = NULL;
    } else {
        av_log(s1, AV_LOG_ERROR,
               "Please use \"desktop\" or \"title=<windowname>\" to specify your target.\n");
        ret = AVERROR(EIO);
        goto error;
    }

    if (hwnd) {
        GetClientRect(hwnd, &virtual_rect);
    } else {
        virtual_rect.left = GetSystemMetrics(SM_XVIRTUALSCREEN);
        virtual_rect.top = GetSystemMetrics(SM_YVIRTUALSCREEN);
        virtual_rect.right = virtual_rect.left + GetSystemMetrics(SM_CXVIRTUALSCREEN);
        virtual_rect.bottom = virtual_rect.top + GetSystemMetrics(SM_CYVIRTUALSCREEN);
    }

    /* If no width or height set, use full screen/window area */
    if (!gdigrab->width || !gdigrab->height) {
        clip_rect.left = virtual_rect.left;
        clip_rect.top = virtual_rect.top;
        clip_rect.right = virtual_rect.right;
        clip_rect.bottom = virtual_rect.bottom;
    } else {
        clip_rect.left = gdigrab->offset_x;
        clip_rect.top = gdigrab->offset_y;
        clip_rect.right = gdigrab->width + gdigrab->offset_x;
        clip_rect.bottom = gdigrab->height + gdigrab->offset_y;
    }

    if (clip_rect.left < virtual_rect.left ||
            clip_rect.top < virtual_rect.top ||
            clip_rect.right > virtual_rect.right ||
            clip_rect.bottom > virtual_rect.bottom) {
            av_log(s1, AV_LOG_ERROR,
                    "Capture area (%li,%li),(%li,%li) extends outside window area (%li,%li),(%li,%li)",
                    clip_rect.left, clip_rect.top,
                    clip_rect.right, clip_rect.bottom,
                    virtual_rect.left, virtual_rect.top,
                    virtual_rect.right, virtual_rect.bottom);
            ret = AVERROR(EIO);
            goto error;
    }

    /* This will get the device context for the selected window, or if
     * none, the primary screen */
    source_hdc = GetDC(hwnd);
    if (!source_hdc) {
        WIN32_API_ERROR("Couldn't get window device context");
        ret = AVERROR(EIO);
        goto error;
    }
    bpp = GetDeviceCaps(source_hdc, BITSPIXEL);

    if (name) {
        av_log(s1, AV_LOG_INFO,
               "Found window %s, capturing %lix%lix%i at (%li,%li)\n",
               name,
               clip_rect.right - clip_rect.left,
               clip_rect.bottom - clip_rect.top,
               bpp, clip_rect.left, clip_rect.top);
    } else {
        av_log(s1, AV_LOG_INFO,
               "Capturing whole desktop as %lix%lix%i at (%li,%li)\n",
               clip_rect.right - clip_rect.left,
               clip_rect.bottom - clip_rect.top,
               bpp, clip_rect.left, clip_rect.top);
    }

    if (clip_rect.right - clip_rect.left <= 0 ||
            clip_rect.bottom - clip_rect.top <= 0 || bpp%8) {
        av_log(s1, AV_LOG_ERROR, "Invalid properties, aborting\n");
        ret = AVERROR(EIO);
        goto error;
    }

    dest_hdc = CreateCompatibleDC(source_hdc);
    if (!dest_hdc) {
        WIN32_API_ERROR("Screen DC CreateCompatibleDC");
        ret = AVERROR(EIO);
        goto error;
    }

    /* Create a DIB and select it into the dest_hdc */
    bmi.bmiHeader.biSize          = sizeof(BITMAPINFOHEADER);
    bmi.bmiHeader.biWidth         = clip_rect.right - clip_rect.left;
    bmi.bmiHeader.biHeight        = -(clip_rect.bottom - clip_rect.top);
    bmi.bmiHeader.biPlanes        = 1;
    bmi.bmiHeader.biBitCount      = bpp;
    bmi.bmiHeader.biCompression   = BI_RGB;
    bmi.bmiHeader.biSizeImage     = 0;
    bmi.bmiHeader.biXPelsPerMeter = 0;
    bmi.bmiHeader.biYPelsPerMeter = 0;
    bmi.bmiHeader.biClrUsed       = 0;
    bmi.bmiHeader.biClrImportant  = 0;
    hbmp = CreateDIBSection(dest_hdc, &bmi, DIB_RGB_COLORS,
            &buffer, NULL, 0);
    if (!hbmp) {
        WIN32_API_ERROR("Creating DIB Section");
        ret = AVERROR(EIO);
        goto error;
    }

    if (!SelectObject(dest_hdc, hbmp)) {
        WIN32_API_ERROR("SelectObject");
        ret = AVERROR(EIO);
        goto error;
    }

    /* Get info from the bitmap */
    GetObject(hbmp, sizeof(BITMAP), &bmp);

    st = avformat_new_stream(s1, NULL);
    if (!st) {
        ret = AVERROR(ENOMEM);
        goto error;
    }
    avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */

    gdigrab->frame_size  = bmp.bmWidthBytes * bmp.bmHeight * bmp.bmPlanes;
    gdigrab->header_size = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER) +
                           (bpp <= 8 ? (1 << bpp) : 0) * sizeof(RGBQUAD) /* palette size */;
    gdigrab->time_base   = av_inv_q(gdigrab->framerate);
    gdigrab->time_frame  = av_gettime() / av_q2d(gdigrab->time_base);

    gdigrab->hwnd       = hwnd;
    gdigrab->source_hdc = source_hdc;
    gdigrab->dest_hdc   = dest_hdc;
    gdigrab->hbmp       = hbmp;
    gdigrab->bmi        = bmi;
    gdigrab->buffer     = buffer;
    gdigrab->clip_rect  = clip_rect;

    gdigrab->cursor_error_printed = 0;

    if (gdigrab->show_region) {
        if (gdigrab_region_wnd_init(s1, gdigrab)) {
            ret = AVERROR(EIO);
            goto error;
        }
    }

    st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
    st->codec->codec_id   = AV_CODEC_ID_BMP;
    st->codec->time_base  = gdigrab->time_base;
    st->codec->bit_rate   = (gdigrab->header_size + gdigrab->frame_size) * 1/av_q2d(gdigrab->time_base) * 8;

    return 0;

error:
    if (source_hdc)
        ReleaseDC(hwnd, source_hdc);
    if (dest_hdc)
        DeleteDC(dest_hdc);
    if (hbmp)
        DeleteObject(hbmp);
    if (source_hdc)
        DeleteDC(source_hdc);
    return ret;
}
Пример #10
0
/* put sequence header if needed */
static void mpeg1_encode_sequence_header(MpegEncContext *s)
{
    unsigned int vbv_buffer_size, fps, v;
    int i, constraint_parameter_flag;
    uint64_t time_code;
    float best_aspect_error = 1E10;
    float aspect_ratio      = av_q2d(s->avctx->sample_aspect_ratio);

    if (aspect_ratio == 0.0)
        aspect_ratio = 1.0;             // pixel aspect 1.1 (VGA)

    if (s->current_picture.f.key_frame) {
        AVRational framerate = ff_mpeg12_frame_rate_tab[s->frame_rate_index];

        /* mpeg1 header repeated every gop */
        put_header(s, SEQ_START_CODE);

        put_sbits(&s->pb, 12, s->width  & 0xFFF);
        put_sbits(&s->pb, 12, s->height & 0xFFF);

        for (i = 1; i < 15; i++) {
            float error = aspect_ratio;
            if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || i <= 1)
                error -= 1.0 / ff_mpeg1_aspect[i];
            else
                error -= av_q2d(ff_mpeg2_aspect[i]) * s->height / s->width;

            error = FFABS(error);

            if (error < best_aspect_error) {
                best_aspect_error    = error;
                s->aspect_ratio_info = i;
            }
        }

        put_bits(&s->pb, 4, s->aspect_ratio_info);
        put_bits(&s->pb, 4, s->frame_rate_index);

        if (s->avctx->rc_max_rate) {
            v = (s->avctx->rc_max_rate + 399) / 400;
            if (v > 0x3ffff && s->codec_id == AV_CODEC_ID_MPEG1VIDEO)
                v = 0x3ffff;
        } else {
            v = 0x3FFFF;
        }

        if (s->avctx->rc_buffer_size)
            vbv_buffer_size = s->avctx->rc_buffer_size;
        else
            /* VBV calculation: Scaled so that a VCD has the proper
             * VBV size of 40 kilobytes */
            vbv_buffer_size = ((20 * s->bit_rate) / (1151929 / 2)) * 8 * 1024;
        vbv_buffer_size = (vbv_buffer_size + 16383) / 16384;

        put_sbits(&s->pb, 18, v);
        put_bits(&s->pb, 1, 1);         // marker
        put_sbits(&s->pb, 10, vbv_buffer_size);

        constraint_parameter_flag =
            s->width  <= 768                                    &&
            s->height <= 576                                    &&
            s->mb_width * s->mb_height                 <= 396   &&
            s->mb_width * s->mb_height * framerate.num <= 396 * 25 * framerate.den &&
            framerate.num <= framerate.den * 30                 &&
            s->avctx->me_range                                  &&
            s->avctx->me_range < 128                            &&
            vbv_buffer_size <= 20                               &&
            v <= 1856000 / 400                                  &&
            s->codec_id == AV_CODEC_ID_MPEG1VIDEO;

        put_bits(&s->pb, 1, constraint_parameter_flag);

        ff_write_quant_matrix(&s->pb, s->avctx->intra_matrix);
        ff_write_quant_matrix(&s->pb, s->avctx->inter_matrix);

        if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
            put_header(s, EXT_START_CODE);
            put_bits(&s->pb, 4, 1);                 // seq ext

            put_bits(&s->pb, 1, s->avctx->profile == 0); // escx 1 for 4:2:2 profile

            put_bits(&s->pb, 3, s->avctx->profile); // profile
            put_bits(&s->pb, 4, s->avctx->level);   // level

            put_bits(&s->pb, 1, s->progressive_sequence);
            put_bits(&s->pb, 2, s->chroma_format);
            put_bits(&s->pb, 2, s->width  >> 12);
            put_bits(&s->pb, 2, s->height >> 12);
            put_bits(&s->pb, 12, v >> 18);          // bitrate ext
            put_bits(&s->pb, 1, 1);                 // marker
            put_bits(&s->pb, 8, vbv_buffer_size >> 10); // vbv buffer ext
            put_bits(&s->pb, 1, s->low_delay);
            put_bits(&s->pb, 2, s->mpeg2_frame_rate_ext.num-1); // frame_rate_ext_n
            put_bits(&s->pb, 5, s->mpeg2_frame_rate_ext.den-1); // frame_rate_ext_d
        }
Пример #11
0
static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
{
    VideoData *s = s1->priv_data;
    AVStream *st;
    int width, height;
    int video_fd, frame_size;
    int ret, frame_rate, frame_rate_base;
    int desired_palette;
    struct video_tuner tuner;
    struct video_audio audio;
    const char *video_device;
    int j;

    if (ap->width <= 0 || ap->height <= 0 || ap->time_base.den <= 0) {
        av_log(s1, AV_LOG_ERROR, "Bad capture size (%dx%d) or wrong time base (%d)\n",
            ap->width, ap->height, ap->time_base.den);

        return -1;
    }

    width = ap->width;
    height = ap->height;
    frame_rate      = ap->time_base.den;
    frame_rate_base = ap->time_base.num;

    if((unsigned)width > 32767 || (unsigned)height > 32767) {
        av_log(s1, AV_LOG_ERROR, "Capture size is out of range: %dx%d\n",
            width, height);

        return -1;
    }

    st = av_new_stream(s1, 0);
    if (!st)
        return -ENOMEM;
    av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */

    s->width = width;
    s->height = height;
    s->frame_rate      = frame_rate;
    s->frame_rate_base = frame_rate_base;

    video_device = ap->device;
    if (!video_device)
        video_device = "/dev/video";
    video_fd = open(video_device, O_RDWR);
    if (video_fd < 0) {
        perror(video_device);
        goto fail;
    }

    if (ioctl(video_fd,VIDIOCGCAP, &s->video_cap) < 0) {
        perror("VIDIOCGCAP");
        goto fail;
    }

    if (!(s->video_cap.type & VID_TYPE_CAPTURE)) {
        av_log(s1, AV_LOG_ERROR, "Fatal: grab device does not handle capture\n");
        goto fail;
    }

    desired_palette = -1;
    if (st->codec->pix_fmt == PIX_FMT_YUV420P) {
        desired_palette = VIDEO_PALETTE_YUV420P;
    } else if (st->codec->pix_fmt == PIX_FMT_YUV422) {
        desired_palette = VIDEO_PALETTE_YUV422;
    } else if (st->codec->pix_fmt == PIX_FMT_BGR24) {
        desired_palette = VIDEO_PALETTE_RGB24;
    }

    /* set tv standard */
    if (ap->standard && !ioctl(video_fd, VIDIOCGTUNER, &tuner)) {
        if (!strcasecmp(ap->standard, "pal"))
            tuner.mode = VIDEO_MODE_PAL;
        else if (!strcasecmp(ap->standard, "secam"))
            tuner.mode = VIDEO_MODE_SECAM;
        else
            tuner.mode = VIDEO_MODE_NTSC;
        ioctl(video_fd, VIDIOCSTUNER, &tuner);
    }

    /* unmute audio */
    audio.audio = 0;
    ioctl(video_fd, VIDIOCGAUDIO, &audio);
    memcpy(&s->audio_saved, &audio, sizeof(audio));
    audio.flags &= ~VIDEO_AUDIO_MUTE;
    ioctl(video_fd, VIDIOCSAUDIO, &audio);

    ret = ioctl(video_fd,VIDIOCGMBUF,&s->gb_buffers);
    if (ret < 0) {
        /* try to use read based access */
        struct video_window win;
        struct video_picture pict;
        int val;

        win.x = 0;
        win.y = 0;
        win.width = width;
        win.height = height;
        win.chromakey = -1;
        win.flags = 0;

        ioctl(video_fd, VIDIOCSWIN, &win);

        ioctl(video_fd, VIDIOCGPICT, &pict);
#if 0
        printf("v4l: colour=%d hue=%d brightness=%d constrast=%d whiteness=%d\n",
               pict.colour,
               pict.hue,
               pict.brightness,
               pict.contrast,
               pict.whiteness);
#endif
        /* try to choose a suitable video format */
        pict.palette = desired_palette;
        if (desired_palette == -1 || (ret = ioctl(video_fd, VIDIOCSPICT, &pict)) < 0) {
            pict.palette=VIDEO_PALETTE_YUV420P;
            ret = ioctl(video_fd, VIDIOCSPICT, &pict);
            if (ret < 0) {
                pict.palette=VIDEO_PALETTE_YUV422;
                ret = ioctl(video_fd, VIDIOCSPICT, &pict);
                if (ret < 0) {
                    pict.palette=VIDEO_PALETTE_RGB24;
                    ret = ioctl(video_fd, VIDIOCSPICT, &pict);
                    if (ret < 0)
                        goto fail1;
                }
            }
        }

        s->frame_format = pict.palette;

        val = 1;
        ioctl(video_fd, VIDIOCCAPTURE, &val);

        s->time_frame = av_gettime() * s->frame_rate / s->frame_rate_base;
        s->use_mmap = 0;

        /* ATI All In Wonder automatic activation */
        if (!strcmp(s->video_cap.name, "Km")) {
            if (aiw_init(s) < 0)
                goto fail;
            s->aiw_enabled = 1;
            /* force 420P format because convertion from YUV422 to YUV420P
               is done in this driver (ugly) */
            s->frame_format = VIDEO_PALETTE_YUV420P;
        }
    } else {
        s->video_buf = mmap(0,s->gb_buffers.size,PROT_READ|PROT_WRITE,MAP_SHARED,video_fd,0);
        if ((unsigned char*)-1 == s->video_buf) {
            perror("mmap");
            goto fail;
        }
        s->gb_frame = 0;
        s->time_frame = av_gettime() * s->frame_rate / s->frame_rate_base;

        /* start to grab the first frame */
        s->gb_buf.frame = s->gb_frame % s->gb_buffers.frames;
        s->gb_buf.height = height;
        s->gb_buf.width = width;
        s->gb_buf.format = desired_palette;

        if (desired_palette == -1 || (ret = ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf)) < 0) {
            s->gb_buf.format = VIDEO_PALETTE_YUV420P;

            ret = ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf);
            if (ret < 0 && errno != EAGAIN) {
                /* try YUV422 */
                s->gb_buf.format = VIDEO_PALETTE_YUV422;

                ret = ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf);
                if (ret < 0 && errno != EAGAIN) {
                    /* try RGB24 */
                    s->gb_buf.format = VIDEO_PALETTE_RGB24;
                    ret = ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf);
                }
            }
        }
        if (ret < 0) {
            if (errno != EAGAIN) {
            fail1:
                av_log(s1, AV_LOG_ERROR, "Fatal: grab device does not support suitable format\n");
            } else {
                av_log(s1, AV_LOG_ERROR,"Fatal: grab device does not receive any video signal\n");
            }
            goto fail;
        }
        for (j = 1; j < s->gb_buffers.frames; j++) {
          s->gb_buf.frame = j;
          ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf);
        }
        s->frame_format = s->gb_buf.format;
        s->use_mmap = 1;
    }

    switch(s->frame_format) {
    case VIDEO_PALETTE_YUV420P:
        frame_size = (width * height * 3) / 2;
        st->codec->pix_fmt = PIX_FMT_YUV420P;
        break;
    case VIDEO_PALETTE_YUV422:
        frame_size = width * height * 2;
        st->codec->pix_fmt = PIX_FMT_YUV422;
        break;
    case VIDEO_PALETTE_RGB24:
        frame_size = width * height * 3;
        st->codec->pix_fmt = PIX_FMT_BGR24; /* NOTE: v4l uses BGR24, not RGB24 ! */
        break;
    default:
        goto fail;
    }
    s->fd = video_fd;
    s->frame_size = frame_size;

    st->codec->codec_type = CODEC_TYPE_VIDEO;
    st->codec->codec_id = CODEC_ID_RAWVIDEO;
    st->codec->width = width;
    st->codec->height = height;
    st->codec->time_base.den      = frame_rate;
    st->codec->time_base.num = frame_rate_base;
    st->codec->bit_rate = frame_size * 1/av_q2d(st->codec->time_base) * 8;

    return 0;
 fail:
    if (video_fd >= 0)
        close(video_fd);
    av_free(st);
    return AVERROR_IO;
}
/* pkt = NULL means EOF (needed to flush decoder buffers) */
static int output_packet(InputStream *ist, const AVPacket *pkt)
{
    int ret = 0, i;
    int got_output = 0;

    AVPacket avpkt;
    if (!ist->saw_first_ts) {
        ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
        ist->pts = 0;
        if (pkt != NULL && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
            ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
            ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
        }
        ist->saw_first_ts = 1;
    }

    if (ist->next_dts == AV_NOPTS_VALUE)
        ist->next_dts = ist->dts;
    if (ist->next_pts == AV_NOPTS_VALUE)
        ist->next_pts = ist->pts;

    if (pkt == NULL) {
        /* EOF handling */
        av_init_packet(&avpkt);
        avpkt.data = NULL;
        avpkt.size = 0;
        goto handle_eof;
    } else {
        avpkt = *pkt;
    }

    if (pkt->dts != AV_NOPTS_VALUE) {
        ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
        if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
            ist->next_pts = ist->pts = ist->dts;
    }

    // while we have more to decode or while the decoder did output something on EOF
    while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
        int duration;
    handle_eof:

        ist->pts = ist->next_pts;
        ist->dts = ist->next_dts;

        if (avpkt.size && avpkt.size != pkt->size &&
            !(ist->dec->capabilities & CODEC_CAP_SUBFRAMES)) {
            av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
                   "Multiple frames in a packet from stream %d\n", pkt->stream_index);
            ist->showed_multi_packet_warning = 1;
        }

        switch (ist->dec_ctx->codec_type) {
        case AVMEDIA_TYPE_AUDIO:
            ret = decode_audio    (ist, &avpkt, &got_output);
            break;
        case AVMEDIA_TYPE_VIDEO:
            ret = decode_video    (ist, &avpkt, &got_output);
            if (avpkt.duration) {
                duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
            } else if(ist->dec_ctx->time_base.num != 0 && ist->dec_ctx->time_base.den != 0) {
                int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
                duration = ((int64_t)AV_TIME_BASE *
                                ist->dec_ctx->time_base.num * ticks) /
                                ist->dec_ctx->time_base.den;
            } else
                duration = 0;

            if(ist->dts != AV_NOPTS_VALUE && duration) {
                ist->next_dts += duration;
            }else
                ist->next_dts = AV_NOPTS_VALUE;

            if (got_output)
                ist->next_pts += duration; //FIXME the duration is not correct in some cases
            break;
        case AVMEDIA_TYPE_SUBTITLE:
            ret = transcode_subtitles(ist, &avpkt, &got_output);
            break;
        default:
            return -1;
        }

        if (ret < 0)
            return ret;

        avpkt.dts=
        avpkt.pts= AV_NOPTS_VALUE;

        // touch data and size only if not EOF
        if (pkt) {
            if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
                ret = avpkt.size;
            avpkt.data += ret;
            avpkt.size -= ret;
        }
        if (!got_output) {
            continue;
        }
    }

    /* handle stream copy */
    if (!ist->decoding_needed) {
        ist->dts = ist->next_dts;
        switch (ist->dec_ctx->codec_type) {
        case AVMEDIA_TYPE_AUDIO:
            ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
                             ist->dec_ctx->sample_rate;
            break;
        case AVMEDIA_TYPE_VIDEO:
            if (ist->framerate.num) {
                // TODO: Remove work-around for c99-to-c89 issue 7
                AVRational time_base_q = AV_TIME_BASE_Q;
                int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
                ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
            } else if (pkt->duration) {
                ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
            } else if(ist->dec_ctx->time_base.num != 0) {
                int ticks= ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
                ist->next_dts += ((int64_t)AV_TIME_BASE *
                                  ist->dec_ctx->time_base.num * ticks) /
                                  ist->dec_ctx->time_base.den;
            }
            break;
        }
        ist->pts = ist->dts;
        ist->next_pts = ist->next_dts;
    }
    for (i = 0; pkt && i < nb_output_streams; i++) {
        OutputStream *ost = output_streams[i];

        if (!check_output_constraints(ist, ost) || ost->encoding_needed)
            continue;

        do_streamcopy(ist, ost, pkt);
    }

    return 0;
}
Пример #13
0
static void select_frame(AVFilterContext *ctx, AVFrame *frame)
{
    SelectContext *select = ctx->priv;
    AVFilterLink *inlink = ctx->inputs[0];
    double res;

    if (isnan(select->var_values[VAR_START_PTS]))
        select->var_values[VAR_START_PTS] = TS2D(frame->pts);
    if (isnan(select->var_values[VAR_START_T]))
        select->var_values[VAR_START_T] = TS2D(frame->pts) * av_q2d(inlink->time_base);

    select->var_values[VAR_N  ] = inlink->frame_count;
    select->var_values[VAR_PTS] = TS2D(frame->pts);
    select->var_values[VAR_T  ] = TS2D(frame->pts) * av_q2d(inlink->time_base);
    select->var_values[VAR_POS] = av_frame_get_pkt_pos(frame) == -1 ? NAN : av_frame_get_pkt_pos(frame);
    select->var_values[VAR_KEY] = frame->key_frame;

    switch (inlink->type) {
    case AVMEDIA_TYPE_AUDIO:
        select->var_values[VAR_SAMPLES_N] = frame->nb_samples;
        break;

    case AVMEDIA_TYPE_VIDEO:
        select->var_values[VAR_INTERLACE_TYPE] =
            !frame->interlaced_frame ? INTERLACE_TYPE_P :
        frame->top_field_first ? INTERLACE_TYPE_T : INTERLACE_TYPE_B;
        select->var_values[VAR_PICT_TYPE] = frame->pict_type;
        if (select->do_scene_detect) {
            char buf[32];
            select->var_values[VAR_SCENE] = get_scene_score(ctx, frame);
            // TODO: document metadata
            snprintf(buf, sizeof(buf), "%f", select->var_values[VAR_SCENE]);
            av_dict_set(avpriv_frame_get_metadatap(frame), "lavfi.scene_score", buf, 0);
        }
        break;
    }

    select->select = res = av_expr_eval(select->expr, select->var_values, NULL);
    av_log(inlink->dst, AV_LOG_DEBUG,
           "n:%f pts:%f t:%f key:%d",
           select->var_values[VAR_N],
           select->var_values[VAR_PTS],
           select->var_values[VAR_T],
           frame->key_frame);

    switch (inlink->type) {
    case AVMEDIA_TYPE_VIDEO:
        av_log(inlink->dst, AV_LOG_DEBUG, " interlace_type:%c pict_type:%c scene:%f",
               (!frame->interlaced_frame) ? 'P' :
               frame->top_field_first     ? 'T' : 'B',
               av_get_picture_type_char(frame->pict_type),
               select->var_values[VAR_SCENE]);
        break;
    case AVMEDIA_TYPE_AUDIO:
        av_log(inlink->dst, AV_LOG_DEBUG, " samples_n:%d consumed_samples_n:%f",
               frame->nb_samples,
               select->var_values[VAR_CONSUMED_SAMPLES_N]);
        break;
    }

    if (res == 0) {
        select->select_out = -1; /* drop */
    } else if (isnan(res) || res < 0) {
        select->select_out = 0; /* first output */
    } else {
        select->select_out = FFMIN(ceilf(res)-1, select->nb_outputs-1); /* other outputs */
    }

    av_log(inlink->dst, AV_LOG_DEBUG, " -> select:%f select_out:%d\n", res, select->select_out);

    if (res) {
        select->var_values[VAR_PREV_SELECTED_N]   = select->var_values[VAR_N];
        select->var_values[VAR_PREV_SELECTED_PTS] = select->var_values[VAR_PTS];
        select->var_values[VAR_PREV_SELECTED_T]   = select->var_values[VAR_T];
        select->var_values[VAR_SELECTED_N] += 1.0;
        if (inlink->type == AVMEDIA_TYPE_AUDIO)
            select->var_values[VAR_CONSUMED_SAMPLES_N] += frame->nb_samples;
    }

    select->var_values[VAR_PREV_PTS] = select->var_values[VAR_PTS];
    select->var_values[VAR_PREV_T]   = select->var_values[VAR_T];
}
Пример #14
0
void AUD_FFMPEGWriter::encode(sample_t* data)
{
	// convert first
	if(m_input_size)
		m_convert(reinterpret_cast<data_t*>(data), reinterpret_cast<data_t*>(data), m_input_size * m_specs.channels);

	AVPacket packet = { 0 };
	av_init_packet(&packet);

#ifdef FFMPEG_HAVE_ENCODE_AUDIO2
	int got_output, ret;
	m_frame->pts = m_frame_pts / av_q2d(m_codecCtx->time_base);
	m_frame_pts++;
#ifdef FFMPEG_HAVE_FRAME_CHANNEL_LAYOUT
	m_frame->channel_layout = m_codecCtx->channel_layout;
#endif

	if(m_deinterleave) {
		for(int channel = 0; channel < m_codecCtx->channels; channel++) {
			for(int i = 0; i < m_frame->nb_samples; i++) {
				memcpy(reinterpret_cast<uint8_t*>(m_deinterleave_buffer.getBuffer()) + (i + channel * m_frame->nb_samples) * m_sample_size,
					   reinterpret_cast<uint8_t*>(data) + (m_codecCtx->channels * i + channel) * m_sample_size, m_sample_size);
			}
		}

		data = m_deinterleave_buffer.getBuffer();
	}

	avcodec_fill_audio_frame(m_frame, m_codecCtx->channels, m_codecCtx->sample_fmt, reinterpret_cast<uint8_t*>(data),
	                         m_frame->nb_samples * av_get_bytes_per_sample(m_codecCtx->sample_fmt) * m_codecCtx->channels, 1);

	ret = avcodec_encode_audio2(m_codecCtx, &packet, m_frame, &got_output);
	if(ret < 0)
		AUD_THROW(AUD_ERROR_FFMPEG, codec_error);

	if(!got_output)
		return;
#else
	sample_t* outbuf = m_output_buffer.getBuffer();

	packet.size = avcodec_encode_audio(m_codecCtx, reinterpret_cast<uint8_t*>(outbuf), m_output_buffer.getSize(), reinterpret_cast<short*>(data));
	if(m_codecCtx->coded_frame && m_codecCtx->coded_frame->pts != AV_NOPTS_VALUE)
		packet.pts = av_rescale_q(m_codecCtx->coded_frame->pts, m_codecCtx->time_base, m_stream->time_base);
	packet.flags |= AV_PKT_FLAG_KEY;
	packet.data = reinterpret_cast<uint8_t*>(outbuf);
#endif

	if(packet.pts != AV_NOPTS_VALUE)
		packet.pts = av_rescale_q(packet.pts, m_codecCtx->time_base, m_stream->time_base);
	if(packet.dts != AV_NOPTS_VALUE)
		packet.dts = av_rescale_q(packet.dts, m_codecCtx->time_base, m_stream->time_base);
	if(packet.duration > 0)
		packet.duration = av_rescale_q(packet.duration, m_codecCtx->time_base, m_stream->time_base);

	packet.stream_index = m_stream->index;

	packet.flags |= AV_PKT_FLAG_KEY;

	if(av_interleaved_write_frame(m_formatCtx, &packet)) {
		av_free_packet(&packet);
		AUD_THROW(AUD_ERROR_FFMPEG, write_error);
	}

	av_free_packet(&packet);
}
Пример #15
0
static int avi_write_header(AVFormatContext *s)
{
    AVIContext *avi = s->priv_data;
    AVIOContext *pb = s->pb;
    int bitrate, n, i, nb_frames, au_byterate, au_ssize, au_scale;
    AVCodecContext *stream, *video_enc;
    int64_t list1, list2, strh, strf;
    AVMetadataTag *t = NULL;

    if (s->nb_streams > AVI_MAX_STREAM_COUNT) {
        av_log(s, AV_LOG_ERROR, "AVI does not support >%d streams\n",
               AVI_MAX_STREAM_COUNT);
        return -1;
    }

    for(n=0;n<s->nb_streams;n++) {
        s->streams[n]->priv_data= av_mallocz(sizeof(AVIStream));
        if(!s->streams[n]->priv_data)
            return AVERROR(ENOMEM);
    }

    /* header list */
    avi->riff_id = 0;
    list1 = avi_start_new_riff(s, pb, "AVI ", "hdrl");

    /* avi header */
    ffio_wfourcc(pb, "avih");
    avio_wl32(pb, 14 * 4);
    bitrate = 0;

    video_enc = NULL;
    for(n=0;n<s->nb_streams;n++) {
        stream = s->streams[n]->codec;
        bitrate += stream->bit_rate;
        if (stream->codec_type == AVMEDIA_TYPE_VIDEO)
            video_enc = stream;
    }

    nb_frames = 0;

    if(video_enc){
        avio_wl32(pb, (uint32_t)(INT64_C(1000000) * video_enc->time_base.num / video_enc->time_base.den));
    } else {
        avio_wl32(pb, 0);
    }
    avio_wl32(pb, bitrate / 8); /* XXX: not quite exact */
    avio_wl32(pb, 0); /* padding */
    if (!pb->seekable)
        avio_wl32(pb, AVIF_TRUSTCKTYPE | AVIF_ISINTERLEAVED); /* flags */
    else
        avio_wl32(pb, AVIF_TRUSTCKTYPE | AVIF_HASINDEX | AVIF_ISINTERLEAVED); /* flags */
    avi->frames_hdr_all = avio_tell(pb); /* remember this offset to fill later */
    avio_wl32(pb, nb_frames); /* nb frames, filled later */
    avio_wl32(pb, 0); /* initial frame */
    avio_wl32(pb, s->nb_streams); /* nb streams */
    avio_wl32(pb, 1024 * 1024); /* suggested buffer size */
    if(video_enc){
        avio_wl32(pb, video_enc->width);
        avio_wl32(pb, video_enc->height);
    } else {
        avio_wl32(pb, 0);
        avio_wl32(pb, 0);
    }
    avio_wl32(pb, 0); /* reserved */
    avio_wl32(pb, 0); /* reserved */
    avio_wl32(pb, 0); /* reserved */
    avio_wl32(pb, 0); /* reserved */

    /* stream list */
    for(i=0;i<n;i++) {
        AVIStream *avist= s->streams[i]->priv_data;
        list2 = ff_start_tag(pb, "LIST");
        ffio_wfourcc(pb, "strl");

        stream = s->streams[i]->codec;

        /* stream generic header */
        strh = ff_start_tag(pb, "strh");
        switch(stream->codec_type) {
        case AVMEDIA_TYPE_SUBTITLE:
            // XSUB subtitles behave like video tracks, other subtitles
            // are not (yet) supported.
            if (stream->codec_id != CODEC_ID_XSUB) {
                av_log(s, AV_LOG_ERROR, "Subtitle streams other than DivX XSUB are not supported by the AVI muxer.\n");
                return AVERROR_PATCHWELCOME;
            }
        case AVMEDIA_TYPE_VIDEO: ffio_wfourcc(pb, "vids"); break;
        case AVMEDIA_TYPE_AUDIO: ffio_wfourcc(pb, "auds"); break;
//      case AVMEDIA_TYPE_TEXT : ffio_wfourcc(pb, "txts"); break;
        case AVMEDIA_TYPE_DATA : ffio_wfourcc(pb, "dats"); break;
        }
        if(stream->codec_type == AVMEDIA_TYPE_VIDEO ||
           stream->codec_id == CODEC_ID_XSUB)
            avio_wl32(pb, stream->codec_tag);
        else
            avio_wl32(pb, 1);
        avio_wl32(pb, 0); /* flags */
        avio_wl16(pb, 0); /* priority */
        avio_wl16(pb, 0); /* language */
        avio_wl32(pb, 0); /* initial frame */

        ff_parse_specific_params(stream, &au_byterate, &au_ssize, &au_scale);

        avio_wl32(pb, au_scale); /* scale */
        avio_wl32(pb, au_byterate); /* rate */
        av_set_pts_info(s->streams[i], 64, au_scale, au_byterate);

        avio_wl32(pb, 0); /* start */
        avist->frames_hdr_strm = avio_tell(pb); /* remember this offset to fill later */
        if (!pb->seekable)
            avio_wl32(pb, AVI_MAX_RIFF_SIZE); /* FIXME: this may be broken, but who cares */
        else
            avio_wl32(pb, 0); /* length, XXX: filled later */

        /* suggested buffer size */ //FIXME set at the end to largest chunk
        if(stream->codec_type == AVMEDIA_TYPE_VIDEO)
            avio_wl32(pb, 1024 * 1024);
        else if(stream->codec_type == AVMEDIA_TYPE_AUDIO)
            avio_wl32(pb, 12 * 1024);
        else
            avio_wl32(pb, 0);
        avio_wl32(pb, -1); /* quality */
        avio_wl32(pb, au_ssize); /* sample size */
        avio_wl32(pb, 0);
        avio_wl16(pb, stream->width);
        avio_wl16(pb, stream->height);
        ff_end_tag(pb, strh);

      if(stream->codec_type != AVMEDIA_TYPE_DATA){
        strf = ff_start_tag(pb, "strf");
        switch(stream->codec_type) {
        case AVMEDIA_TYPE_SUBTITLE:
            // XSUB subtitles behave like video tracks, other subtitles
            // are not (yet) supported.
            if (stream->codec_id != CODEC_ID_XSUB) break;
        case AVMEDIA_TYPE_VIDEO:
            ff_put_bmp_header(pb, stream, ff_codec_bmp_tags, 0);
            break;
        case AVMEDIA_TYPE_AUDIO:
            if (ff_put_wav_header(pb, stream) < 0) {
                return -1;
            }
            break;
        default:
            return -1;
        }
        ff_end_tag(pb, strf);
        if ((t = av_metadata_get(s->streams[i]->metadata, "title", NULL, 0))) {
            avi_write_info_tag(s->pb, "strn", t->value);
            t = NULL;
        }
      }

        if (pb->seekable) {
            unsigned char tag[5];
            int j;

            /* Starting to lay out AVI OpenDML master index.
             * We want to make it JUNK entry for now, since we'd
             * like to get away without making AVI an OpenDML one
             * for compatibility reasons.
             */
            avist->indexes.entry = avist->indexes.ents_allocated = 0;
            avist->indexes.indx_start = ff_start_tag(pb, "JUNK");
            avio_wl16(pb, 4);        /* wLongsPerEntry */
            avio_w8(pb, 0);          /* bIndexSubType (0 == frame index) */
            avio_w8(pb, 0);          /* bIndexType (0 == AVI_INDEX_OF_INDEXES) */
            avio_wl32(pb, 0);        /* nEntriesInUse (will fill out later on) */
            ffio_wfourcc(pb, avi_stream2fourcc(tag, i, stream->codec_type));
                                    /* dwChunkId */
            avio_wl64(pb, 0);        /* dwReserved[3]
            avio_wl32(pb, 0);           Must be 0.    */
            for (j=0; j < AVI_MASTER_INDEX_SIZE * 2; j++)
                 avio_wl64(pb, 0);
            ff_end_tag(pb, avist->indexes.indx_start);
        }

        if(   stream->codec_type == AVMEDIA_TYPE_VIDEO
           && s->streams[i]->sample_aspect_ratio.num>0
           && s->streams[i]->sample_aspect_ratio.den>0){
            int vprp= ff_start_tag(pb, "vprp");
            AVRational dar = av_mul_q(s->streams[i]->sample_aspect_ratio,
                                      av_create_rational(stream->width, stream->height));
            int num, den;
            av_reduce(&num, &den, dar.num, dar.den, 0xFFFF);

            avio_wl32(pb, 0); //video format  = unknown
            avio_wl32(pb, 0); //video standard= unknown
            avio_wl32(pb, lrintf(1.0/av_q2d(stream->time_base)));
            avio_wl32(pb, stream->width );
            avio_wl32(pb, stream->height);
            avio_wl16(pb, den);
            avio_wl16(pb, num);
            avio_wl32(pb, stream->width );
            avio_wl32(pb, stream->height);
            avio_wl32(pb, 1); //progressive FIXME

            avio_wl32(pb, stream->height);
            avio_wl32(pb, stream->width );
            avio_wl32(pb, stream->height);
            avio_wl32(pb, stream->width );
            avio_wl32(pb, 0);
            avio_wl32(pb, 0);

            avio_wl32(pb, 0);
            avio_wl32(pb, 0);
            ff_end_tag(pb, vprp);
        }

        ff_end_tag(pb, list2);
    }

    if (pb->seekable) {
        /* AVI could become an OpenDML one, if it grows beyond 2Gb range */
        avi->odml_list = ff_start_tag(pb, "JUNK");
        ffio_wfourcc(pb, "odml");
        ffio_wfourcc(pb, "dmlh");
        avio_wl32(pb, 248);
        for (i = 0; i < 248; i+= 4)
             avio_wl32(pb, 0);
        ff_end_tag(pb, avi->odml_list);
    }

    ff_end_tag(pb, list1);

    list2 = ff_start_tag(pb, "LIST");
    ffio_wfourcc(pb, "INFO");
    ff_metadata_conv(&s->metadata, ff_avi_metadata_conv, NULL);
    for (i = 0; *ff_avi_tags[i]; i++) {
        if ((t = av_metadata_get(s->metadata, ff_avi_tags[i], NULL, AV_METADATA_MATCH_CASE)))
            avi_write_info_tag(s->pb, t->key, t->value);
    }
    ff_end_tag(pb, list2);

    /* some padding for easier tag editing */
    list2 = ff_start_tag(pb, "JUNK");
    for (i = 0; i < 1016; i += 4)
        avio_wl32(pb, 0);
    ff_end_tag(pb, list2);

    avi->movi_list = ff_start_tag(pb, "LIST");
    ffio_wfourcc(pb, "movi");

    avio_flush(pb);

    return 0;
}
Пример #16
0
/**
 * Grabs a frame from gdi (public device demuxer API).
 *
 * @param s1 Context from avformat core
 * @param pkt Packet holding the grabbed frame
 * @return frame size in bytes
 */
static int gdigrab_read_packet(AVFormatContext *s1, AVPacket *pkt)
{
    struct gdigrab *gdigrab = s1->priv_data;

    HDC        dest_hdc   = gdigrab->dest_hdc;
    HDC        source_hdc = gdigrab->source_hdc;
    RECT       clip_rect  = gdigrab->clip_rect;
    AVRational time_base  = gdigrab->time_base;
    int64_t    time_frame = gdigrab->time_frame;

    BITMAPFILEHEADER bfh;
    int file_size = gdigrab->header_size + gdigrab->frame_size;

    int64_t curtime, delay;

    /* Calculate the time of the next frame */
    time_frame += INT64_C(1000000);

    /* Run Window message processing queue */
    if (gdigrab->show_region)
        gdigrab_region_wnd_update(s1, gdigrab);

    /* wait based on the frame rate */
    for (;;) {
        curtime = av_gettime();
        delay = time_frame * av_q2d(time_base) - curtime;
        if (delay <= 0) {
            if (delay < INT64_C(-1000000) * av_q2d(time_base)) {
                time_frame += INT64_C(1000000);
            }
            break;
        }
        if (s1->flags & AVFMT_FLAG_NONBLOCK) {
            return AVERROR(EAGAIN);
        } else {
            av_usleep(delay);
        }
    }

    if (av_new_packet(pkt, file_size) < 0)
        return AVERROR(ENOMEM);
    pkt->pts = curtime;

    /* Blit screen grab */
    if (!BitBlt(dest_hdc, 0, 0,
                clip_rect.right - clip_rect.left,
                clip_rect.bottom - clip_rect.top,
                source_hdc,
                clip_rect.left, clip_rect.top, SRCCOPY | CAPTUREBLT)) {
        WIN32_API_ERROR("Failed to capture image");
        return AVERROR(EIO);
    }
    if (gdigrab->draw_mouse)
        paint_mouse_pointer(s1, gdigrab);

    /* Copy bits to packet data */

    bfh.bfType = 0x4d42; /* "BM" in little-endian */
    bfh.bfSize = file_size;
    bfh.bfReserved1 = 0;
    bfh.bfReserved2 = 0;
    bfh.bfOffBits = gdigrab->header_size;

    memcpy(pkt->data, &bfh, sizeof(bfh));

    memcpy(pkt->data + sizeof(bfh), &gdigrab->bmi.bmiHeader, sizeof(gdigrab->bmi.bmiHeader));

    if (gdigrab->bmi.bmiHeader.biBitCount <= 8)
        GetDIBColorTable(dest_hdc, 0, 1 << gdigrab->bmi.bmiHeader.biBitCount,
                (RGBQUAD *) (pkt->data + sizeof(bfh) + sizeof(gdigrab->bmi.bmiHeader)));

    memcpy(pkt->data + gdigrab->header_size, gdigrab->buffer, gdigrab->frame_size);

    gdigrab->time_frame = time_frame;

    return gdigrab->header_size + gdigrab->frame_size;
}
Пример #17
0
static ImBuf *avi_fetchibuf(struct anim *anim, int position)
{
	ImBuf *ibuf = NULL;
	int *tmp;
	int y;
	
	if (anim == NULL) return (NULL);

#if defined(_WIN32) && !defined(FREE_WINDOWS)
	if (anim->avistreams) {
		LPBITMAPINFOHEADER lpbi;

		if (anim->pgf) {
			lpbi = AVIStreamGetFrame(anim->pgf, position + AVIStreamStart(anim->pavi[anim->firstvideo]));
			if (lpbi) {
				ibuf = IMB_ibImageFromMemory((unsigned char *) lpbi, 100, IB_rect, "<avi_fetchibuf>");
//Oh brother...
			}
		}
	}
	else {
#else
	if (1) {
#endif
		ibuf = IMB_allocImBuf(anim->x, anim->y, 24, IB_rect);

		tmp = AVI_read_frame(anim->avi, AVI_FORMAT_RGB32, position,
		                     AVI_get_stream(anim->avi, AVIST_VIDEO, 0));
		
		if (tmp == NULL) {
			printf("Error reading frame from AVI");
			IMB_freeImBuf(ibuf);
			return NULL;
		}

		for (y = 0; y < anim->y; y++) {
			memcpy(&(ibuf->rect)[((anim->y - y) - 1) * anim->x],  &tmp[y * anim->x],
			       anim->x * 4);
		}
		
		MEM_freeN(tmp);
	}
	
	ibuf->profile = IB_PROFILE_SRGB;
	
	return ibuf;
}

#ifdef WITH_FFMPEG

extern void do_init_ffmpeg(void);

static int startffmpeg(struct anim *anim)
{
	int i, videoStream;

	AVCodec *pCodec;
	AVFormatContext *pFormatCtx = NULL;
	AVCodecContext *pCodecCtx;
	int frs_num;
	double frs_den;
	int streamcount;

#ifdef FFMPEG_SWSCALE_COLOR_SPACE_SUPPORT
	/* The following for color space determination */
	int srcRange, dstRange, brightness, contrast, saturation;
	int *table;
	const int *inv_table;
#endif

	if (anim == 0) return(-1);

	streamcount = anim->streamindex;

	do_init_ffmpeg();

	if (avformat_open_input(&pFormatCtx, anim->name, NULL, NULL) != 0) {
		return -1;
	}

	if (av_find_stream_info(pFormatCtx) < 0) {
		av_close_input_file(pFormatCtx);
		return -1;
	}

	av_dump_format(pFormatCtx, 0, anim->name, 0);


	/* Find the video stream */
	videoStream = -1;

	for (i = 0; i < pFormatCtx->nb_streams; i++)
		if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
			if (streamcount > 0) {
				streamcount--;
				continue;
			}
			videoStream = i;
			break;
		}

	if (videoStream == -1) {
		av_close_input_file(pFormatCtx);
		return -1;
	}

	pCodecCtx = pFormatCtx->streams[videoStream]->codec;

	/* Find the decoder for the video stream */
	pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
	if (pCodec == NULL) {
		av_close_input_file(pFormatCtx);
		return -1;
	}

	pCodecCtx->workaround_bugs = 1;

	if (avcodec_open(pCodecCtx, pCodec) < 0) {
		av_close_input_file(pFormatCtx);
		return -1;
	}

	anim->duration = ceil(pFormatCtx->duration *
	                      av_q2d(pFormatCtx->streams[videoStream]->r_frame_rate) /
	                      AV_TIME_BASE);

	frs_num = pFormatCtx->streams[videoStream]->r_frame_rate.num;
	frs_den = pFormatCtx->streams[videoStream]->r_frame_rate.den;

	frs_den *= AV_TIME_BASE;

	while (frs_num % 10 == 0 && frs_den >= 2.0 && frs_num > 10) {
		frs_num /= 10;
		frs_den /= 10;
	}

	anim->frs_sec = frs_num;
	anim->frs_sec_base = frs_den;

	anim->params = 0;

	anim->x = pCodecCtx->width;
	anim->y = pCodecCtx->height;
	anim->interlacing = 0;
	anim->orientation = 0;
	anim->framesize = anim->x * anim->y * 4;

	anim->curposition = -1;
	anim->last_frame = 0;
	anim->last_pts = -1;
	anim->next_pts = -1;
	anim->next_packet.stream_index = -1;

	anim->pFormatCtx = pFormatCtx;
	anim->pCodecCtx = pCodecCtx;
	anim->pCodec = pCodec;
	anim->videoStream = videoStream;

	anim->pFrame = avcodec_alloc_frame();
	anim->pFrameComplete = FALSE;
	anim->pFrameDeinterlaced = avcodec_alloc_frame();
	anim->pFrameRGB = avcodec_alloc_frame();

	if (avpicture_get_size(PIX_FMT_RGBA, anim->x, anim->y) !=
	    anim->x * anim->y * 4)
	{
		fprintf(stderr,
		        "ffmpeg has changed alloc scheme ... ARGHHH!\n");
		avcodec_close(anim->pCodecCtx);
		av_close_input_file(anim->pFormatCtx);
		av_free(anim->pFrameRGB);
		av_free(anim->pFrameDeinterlaced);
		av_free(anim->pFrame);
		anim->pCodecCtx = NULL;
		return -1;
	}

	if (anim->ib_flags & IB_animdeinterlace) {
		avpicture_fill((AVPicture *) anim->pFrameDeinterlaced,
		               MEM_callocN(avpicture_get_size(
		                               anim->pCodecCtx->pix_fmt,
		                               anim->x, anim->y),
		                           "ffmpeg deinterlace"),
		               anim->pCodecCtx->pix_fmt, anim->x, anim->y);
	}

	if (pCodecCtx->has_b_frames) {
		anim->preseek = 25; /* FIXME: detect gopsize ... */
	}
	else {
		anim->preseek = 0;
	}
	
	anim->img_convert_ctx = sws_getContext(
	        anim->pCodecCtx->width,
	        anim->pCodecCtx->height,
	        anim->pCodecCtx->pix_fmt,
	        anim->pCodecCtx->width,
	        anim->pCodecCtx->height,
	        PIX_FMT_RGBA,
	        SWS_FAST_BILINEAR | SWS_PRINT_INFO,
	        NULL, NULL, NULL);
		
	if (!anim->img_convert_ctx) {
		fprintf(stderr,
		        "Can't transform color space??? Bailing out...\n");
		avcodec_close(anim->pCodecCtx);
		av_close_input_file(anim->pFormatCtx);
		av_free(anim->pFrameRGB);
		av_free(anim->pFrameDeinterlaced);
		av_free(anim->pFrame);
		anim->pCodecCtx = NULL;
		return -1;
	}

#ifdef FFMPEG_SWSCALE_COLOR_SPACE_SUPPORT
	/* Try do detect if input has 0-255 YCbCR range (JFIF Jpeg MotionJpeg) */
	if (!sws_getColorspaceDetails(anim->img_convert_ctx, (int **)&inv_table, &srcRange,
	                              &table, &dstRange, &brightness, &contrast, &saturation))
	{
		srcRange = srcRange || anim->pCodecCtx->color_range == AVCOL_RANGE_JPEG;
		inv_table = sws_getCoefficients(anim->pCodecCtx->colorspace);

		if (sws_setColorspaceDetails(anim->img_convert_ctx, (int *)inv_table, srcRange,
		                             table, dstRange, brightness, contrast, saturation))
		{
			printf("Warning: Could not set libswscale colorspace details.\n");
		}
	}
	else {
		printf("Warning: Could not set libswscale colorspace details.\n");
	}
#endif
		
	return (0);
}
Пример #18
0
static int avi_write_header(AVFormatContext *s)
{
    AVIContext *avi = s->priv_data;
    AVIOContext *pb = s->pb;
    int bitrate, n, i, nb_frames, au_byterate, au_ssize, au_scale;
    AVCodecContext *video_enc;
    AVStream *video_st = NULL;
    int64_t list1, list2, strh, strf;
    AVDictionaryEntry *t = NULL;
    int padding;

    if (s->nb_streams > AVI_MAX_STREAM_COUNT) {
        av_log(s, AV_LOG_ERROR, "AVI does not support >%d streams\n",
               AVI_MAX_STREAM_COUNT);
        return AVERROR(EINVAL);
    }

    for (n = 0; n < s->nb_streams; n++) {
        s->streams[n]->priv_data = av_mallocz(sizeof(AVIStream));
        if (!s->streams[n]->priv_data)
            return AVERROR(ENOMEM);
    }

    /* header list */
    avi->riff_id = 0;
    list1 = avi_start_new_riff(s, pb, "AVI ", "hdrl");

    /* avi header */
    ffio_wfourcc(pb, "avih");
    avio_wl32(pb, 14 * 4);
    bitrate = 0;

    video_enc = NULL;
    for (n = 0; n < s->nb_streams; n++) {
        AVCodecContext *codec = s->streams[n]->codec;
        bitrate += codec->bit_rate;
        if (codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            video_enc = codec;
            video_st = s->streams[n];
        }
    }

    nb_frames = 0;

    // TODO: should be avg_frame_rate
    if (video_st)
        avio_wl32(pb, (uint32_t) (INT64_C(1000000) * video_st->time_base.num /
                                  video_st->time_base.den));
    else
        avio_wl32(pb, 0);
    avio_wl32(pb, bitrate / 8); /* XXX: not quite exact */
    avio_wl32(pb, 0); /* padding */
    if (!pb->seekable)
        avio_wl32(pb, AVIF_TRUSTCKTYPE | AVIF_ISINTERLEAVED);  /* flags */
    else
        avio_wl32(pb, AVIF_TRUSTCKTYPE | AVIF_HASINDEX | AVIF_ISINTERLEAVED);  /* flags */
    avi->frames_hdr_all = avio_tell(pb); /* remember this offset to fill later */
    avio_wl32(pb, nb_frames); /* nb frames, filled later */
    avio_wl32(pb, 0); /* initial frame */
    avio_wl32(pb, s->nb_streams); /* nb streams */
    avio_wl32(pb, 1024 * 1024); /* suggested buffer size */
    if (video_enc) {
        avio_wl32(pb, video_enc->width);
        avio_wl32(pb, video_enc->height);
    } else {
        avio_wl32(pb, 0);
        avio_wl32(pb, 0);
    }
    avio_wl32(pb, 0); /* reserved */
    avio_wl32(pb, 0); /* reserved */
    avio_wl32(pb, 0); /* reserved */
    avio_wl32(pb, 0); /* reserved */

    /* stream list */
    for (i = 0; i < n; i++) {
        AVStream *st = s->streams[i];
        AVCodecContext *enc = st->codec;
        AVIStream *avist = st->priv_data;
        list2 = ff_start_tag(pb, "LIST");
        ffio_wfourcc(pb, "strl");

        /* stream generic header */
        strh = ff_start_tag(pb, "strh");
        switch (enc->codec_type) {
        case AVMEDIA_TYPE_SUBTITLE:
            // XSUB subtitles behave like video tracks, other subtitles
            // are not (yet) supported.
            if (enc->codec_id != AV_CODEC_ID_XSUB) {
                av_log(s, AV_LOG_ERROR,
                       "Subtitle streams other than DivX XSUB are not supported by the AVI muxer.\n");
                return AVERROR_PATCHWELCOME;
            }
        case AVMEDIA_TYPE_VIDEO:
            ffio_wfourcc(pb, "vids");
            break;
        case AVMEDIA_TYPE_AUDIO:
            ffio_wfourcc(pb, "auds");
            break;
//      case AVMEDIA_TYPE_TEXT:
//          ffio_wfourcc(pb, "txts");
//          break;
        case AVMEDIA_TYPE_DATA:
            ffio_wfourcc(pb, "dats");
            break;
        }
        if (enc->codec_type == AVMEDIA_TYPE_VIDEO ||
            enc->codec_id == AV_CODEC_ID_XSUB)
            avio_wl32(pb, enc->codec_tag);
        else
            avio_wl32(pb, 1);
        avio_wl32(pb, 0); /* flags */
        avio_wl16(pb, 0); /* priority */
        avio_wl16(pb, 0); /* language */
        avio_wl32(pb, 0); /* initial frame */

        ff_parse_specific_params(st, &au_byterate, &au_ssize, &au_scale);

        if (   enc->codec_type == AVMEDIA_TYPE_VIDEO
            && enc->codec_id != AV_CODEC_ID_XSUB
            && au_byterate > 1000LL*au_scale) {
            au_byterate = 600;
            au_scale    = 1;
        }
        avpriv_set_pts_info(st, 64, au_scale, au_byterate);
        if (enc->codec_id == AV_CODEC_ID_XSUB)
            au_scale = au_byterate = 0;

        avio_wl32(pb, au_scale); /* scale */
        avio_wl32(pb, au_byterate); /* rate */

        avio_wl32(pb, 0); /* start */
        /* remember this offset to fill later */
        avist->frames_hdr_strm = avio_tell(pb);
        if (!pb->seekable)
            /* FIXME: this may be broken, but who cares */
            avio_wl32(pb, AVI_MAX_RIFF_SIZE);
        else
            avio_wl32(pb, 0);  /* length, XXX: filled later */

        /* suggested buffer size, is set to largest chunk size in avi_write_trailer */
        if (enc->codec_type == AVMEDIA_TYPE_VIDEO)
            avio_wl32(pb, 1024 * 1024);
        else if (enc->codec_type == AVMEDIA_TYPE_AUDIO)
            avio_wl32(pb, 12 * 1024);
        else
            avio_wl32(pb, 0);
        avio_wl32(pb, -1); /* quality */
        avio_wl32(pb, au_ssize); /* sample size */
        avio_wl32(pb, 0);
        avio_wl16(pb, enc->width);
        avio_wl16(pb, enc->height);
        ff_end_tag(pb, strh);

        if (enc->codec_type != AVMEDIA_TYPE_DATA) {
            int ret;
            enum AVPixelFormat pix_fmt;

            strf = ff_start_tag(pb, "strf");
            switch (enc->codec_type) {
            case AVMEDIA_TYPE_SUBTITLE:
                /* XSUB subtitles behave like video tracks, other subtitles
                 * are not (yet) supported. */
                if (enc->codec_id != AV_CODEC_ID_XSUB)
                    break;
            case AVMEDIA_TYPE_VIDEO:
                /* WMP expects RGB 5:5:5 rawvideo in avi to have bpp set to 16. */
                if (  !enc->codec_tag
                    && enc->codec_id == AV_CODEC_ID_RAWVIDEO
                    && enc->pix_fmt == AV_PIX_FMT_RGB555LE
                    && enc->bits_per_coded_sample == 15)
                    enc->bits_per_coded_sample = 16;
                ff_put_bmp_header(pb, enc, ff_codec_bmp_tags, 0, 0);
                pix_fmt = avpriv_find_pix_fmt(avpriv_pix_fmt_bps_avi,
                                              enc->bits_per_coded_sample);
                if (   !enc->codec_tag
                    && enc->codec_id == AV_CODEC_ID_RAWVIDEO
                    && enc->pix_fmt != pix_fmt
                    && enc->pix_fmt != AV_PIX_FMT_NONE)
                    av_log(s, AV_LOG_ERROR, "%s rawvideo cannot be written to avi, output file will be unreadable\n",
                          av_get_pix_fmt_name(enc->pix_fmt));
                break;
            case AVMEDIA_TYPE_AUDIO:
                if ((ret = ff_put_wav_header(pb, enc, 0)) < 0)
                    return ret;
                break;
            default:
                av_log(s, AV_LOG_ERROR,
                    "Invalid or not supported codec type '%s' found in the input\n",
                    (char *)av_x_if_null(av_get_media_type_string(enc->codec_type), "?"));
                return AVERROR(EINVAL);
            }
            ff_end_tag(pb, strf);
            if ((t = av_dict_get(st->metadata, "title", NULL, 0))) {
                ff_riff_write_info_tag(s->pb, "strn", t->value);
                t = NULL;
            }
            if (enc->codec_id == AV_CODEC_ID_XSUB
            && (t = av_dict_get(s->streams[i]->metadata, "language", NULL, 0))) {
                const char* langstr = av_convert_lang_to(t->value, AV_LANG_ISO639_1);
                t = NULL;
                if (langstr) {
                    char* str = av_asprintf("Subtitle - %s-xx;02", langstr);
                    ff_riff_write_info_tag(s->pb, "strn", str);
                    av_free(str);
                }
            }
        }

        if (pb->seekable) {
            unsigned char tag[5];
            int j;

            /* Starting to lay out AVI OpenDML master index.
             * We want to make it JUNK entry for now, since we'd
             * like to get away without making AVI an OpenDML one
             * for compatibility reasons. */
            avist->indexes.entry      = avist->indexes.ents_allocated = 0;
            avist->indexes.indx_start = ff_start_tag(pb, "JUNK");
            avio_wl16(pb, 4);   /* wLongsPerEntry */
            avio_w8(pb, 0);     /* bIndexSubType (0 == frame index) */
            avio_w8(pb, 0);     /* bIndexType (0 == AVI_INDEX_OF_INDEXES) */
            avio_wl32(pb, 0);   /* nEntriesInUse (will fill out later on) */
            ffio_wfourcc(pb, avi_stream2fourcc(tag, i, enc->codec_type));
                                /* dwChunkId */
            avio_wl64(pb, 0);   /* dwReserved[3] */
            // avio_wl32(pb, 0);   /* Must be 0.    */
            for (j = 0; j < AVI_MASTER_INDEX_SIZE * 2; j++)
                avio_wl64(pb, 0);
            ff_end_tag(pb, avist->indexes.indx_start);
        }

        if (enc->codec_type == AVMEDIA_TYPE_VIDEO   &&
            st->sample_aspect_ratio.num > 0 &&
            st->sample_aspect_ratio.den > 0) {
            int vprp       = ff_start_tag(pb, "vprp");
            AVRational dar = av_mul_q(st->sample_aspect_ratio,
                                      (AVRational) { enc->width,
                                                     enc->height });
            int num, den;
            av_reduce(&num, &den, dar.num, dar.den, 0xFFFF);

            avio_wl32(pb, 0); // video format   = unknown
            avio_wl32(pb, 0); // video standard = unknown
            // TODO: should be avg_frame_rate
            avio_wl32(pb, lrintf(1.0 / av_q2d(st->time_base)));
            avio_wl32(pb, enc->width);
            avio_wl32(pb, enc->height);
            avio_wl16(pb, den);
            avio_wl16(pb, num);
            avio_wl32(pb, enc->width);
            avio_wl32(pb, enc->height);
            avio_wl32(pb, 1); // progressive FIXME

            avio_wl32(pb, enc->height);
            avio_wl32(pb, enc->width);
            avio_wl32(pb, enc->height);
            avio_wl32(pb, enc->width);
            avio_wl32(pb, 0);
            avio_wl32(pb, 0);

            avio_wl32(pb, 0);
            avio_wl32(pb, 0);
            ff_end_tag(pb, vprp);
        }

        ff_end_tag(pb, list2);
    }

    if (pb->seekable) {
        /* AVI could become an OpenDML one, if it grows beyond 2Gb range */
        avi->odml_list = ff_start_tag(pb, "JUNK");
        ffio_wfourcc(pb, "odml");
        ffio_wfourcc(pb, "dmlh");
        avio_wl32(pb, 248);
        for (i = 0; i < 248; i += 4)
            avio_wl32(pb, 0);
        ff_end_tag(pb, avi->odml_list);
    }

    ff_end_tag(pb, list1);

    ff_riff_write_info(s);


    padding = s->metadata_header_padding;
    if (padding < 0)
        padding = 1016;

    /* some padding for easier tag editing */
    if (padding) {
        list2 = ff_start_tag(pb, "JUNK");
        for (i = padding; i > 0; i -= 4)
            avio_wl32(pb, 0);
        ff_end_tag(pb, list2);
    }

    avi->movi_list = ff_start_tag(pb, "LIST");
    ffio_wfourcc(pb, "movi");

    avio_flush(pb);

    return 0;
}
Пример #19
0
static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
{
    int (*filter_frame)(AVFilterLink *, AVFrame *);
    AVFilterContext *dstctx = link->dst;
    AVFilterPad *dst = link->dstpad;
    AVFrame *out;
    int ret;
    AVFilterCommand *cmd= link->dst->command_queue;
    int64_t pts;

    if (link->closed) {
        av_frame_free(&frame);
        return AVERROR_EOF;
    }

    if (!(filter_frame = dst->filter_frame))
        filter_frame = default_filter_frame;

    /* copy the frame if needed */
    if (dst->needs_writable && !av_frame_is_writable(frame)) {
        av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n");

        /* Maybe use ff_copy_buffer_ref instead? */
        switch (link->type) {
        case AVMEDIA_TYPE_VIDEO:
            out = ff_get_video_buffer(link, link->w, link->h);
            break;
        case AVMEDIA_TYPE_AUDIO:
            out = ff_get_audio_buffer(link, frame->nb_samples);
            break;
        default:
            return AVERROR(EINVAL);
        }
        if (!out) {
            av_frame_free(&frame);
            return AVERROR(ENOMEM);
        }
        av_frame_copy_props(out, frame);

        switch (link->type) {
        case AVMEDIA_TYPE_VIDEO:
            av_image_copy(out->data, out->linesize, (const uint8_t **)frame->data, frame->linesize,
                          frame->format, frame->width, frame->height);
            break;
        case AVMEDIA_TYPE_AUDIO:
            av_samples_copy(out->extended_data, frame->extended_data,
                            0, 0, frame->nb_samples,
                            av_get_channel_layout_nb_channels(frame->channel_layout),
                            frame->format);
            break;
        default:
            return AVERROR(EINVAL);
        }

        av_frame_free(&frame);
    } else
        out = frame;

    while(cmd && cmd->time <= out->pts * av_q2d(link->time_base)) {
        av_log(link->dst, AV_LOG_DEBUG,
               "Processing command time:%f command:%s arg:%s\n",
               cmd->time, cmd->command, cmd->arg);
        avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags);
        ff_command_queue_pop(link->dst);
        cmd= link->dst->command_queue;
    }

    pts = out->pts;
    if (dstctx->enable_str) {
        int64_t pos = av_frame_get_pkt_pos(out);
        dstctx->var_values[VAR_N] = link->frame_count;
        dstctx->var_values[VAR_T] = pts == AV_NOPTS_VALUE ? NAN : pts * av_q2d(link->time_base);
        dstctx->var_values[VAR_POS] = pos == -1 ? NAN : pos;

        dstctx->is_disabled = fabs(av_expr_eval(dstctx->enable, dstctx->var_values, NULL)) < 0.5;
        if (dstctx->is_disabled &&
                (dstctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC))
            filter_frame = default_filter_frame;
    }
    ret = filter_frame(link, out);
    link->frame_count++;
    link->frame_requested = 0;
    ff_update_link_current_pts(link, pts);
    return ret;
}
Пример #20
0
int audio_decode_frame(VideoState* is, uint8_t* audio_buf, int buf_size,
		double* pts_ptr) {
	int len1, data_size = 0;
	AVPacket *pkt = &is->audio_pkt;
	double pts;
	int n;

	for (;;) {
		// A packet might have more than one frame, so you may have to call it several times to get all the data out of the packet.
		while (is->audio_pkt_size > 0) {
			int got_frame = 0;
			len1 = avcodec_decode_audio4(is->audio_ctx, &is->audio_frame,
					&got_frame, pkt);
			printf("avcodec_decode_audio4:%d\n", len1);
			fflush(stdout);
			if (len1 < 0) {
				is->audio_pkt_size = 0;
				break;
			}
			is->audio_pkt_data += len1;
			is->audio_pkt_size -= len1;
			data_size = 0;
			if (got_frame) {
				int convert_len = swr_convert(is->au_convert_ctx, &audio_buf,
				MAX_AUDIO_FRAME_SIZE, (const uint8_t**) is->audio_frame.data,
						is->audio_frame.nb_samples);
				data_size = convert_len * is->audio_frame.channels
						* av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);

//				data_size = av_samples_get_buffer_size(NULL, frame.channels,
//						frame.nb_samples, AV_SAMPLE_FMT_S16, 1);
				printf("data_size:%d\n", data_size);
				assert(data_size <= buf_size);
				// We use swr_convert so we don't need to memcpy.
//				memcpy(audio_buf, frame.data[0], data_size);
			}
			if (data_size <= 0) {
				continue;
			}

			pts = is->audio_clock;
			*pts_ptr = pts;
			n = av_get_bytes_per_sample(AV_SAMPLE_FMT_S16)
					* is->audio_ctx->channels;
			is->audio_clock += (double) data_size
					/ (double) (n * is->audio_ctx->sample_rate);
			printf("avcodec_decode_audio4 is->audio_clock=%lf\n",
					(double) data_size
							/ (double) (n * is->audio_ctx->sample_rate));
			return data_size;
		}
		if (pkt->data)
			av_free_packet(pkt);
		if (is->quit) {
			return -1;
		}

		if (packet_queue_get(&is->audioq, pkt, 1, 0) < 0) {
			return -1;
		}
		if (pkt->data == flush_pkt.data) {
			avcodec_flush_buffers(is->audio_ctx);
			continue;
		}
		is->audio_pkt_data = pkt->data;
		is->audio_pkt_size = pkt->size;

		if (pkt->pts != AV_NOPTS_VALUE) {
			is->audio_clock = av_q2d(is->audio_st->time_base) * pkt->pts;
			printf("audio packet_queue_get time_base=%lf audio_clock=%lf\n",
					av_q2d(is->audio_st->time_base), is->audio_clock);
		}
	}
}
Пример #21
0
static int startffmpeg(struct anim *anim)
{
    int i, videoStream;

    AVCodec *pCodec;
    AVFormatContext *pFormatCtx = NULL;
    AVCodecContext *pCodecCtx;
    AVRational frame_rate;
    int frs_num;
    double frs_den;
    int streamcount;

#ifdef FFMPEG_SWSCALE_COLOR_SPACE_SUPPORT
    /* The following for color space determination */
    int srcRange, dstRange, brightness, contrast, saturation;
    int *table;
    const int *inv_table;
#endif

    if (anim == NULL) return(-1);

    streamcount = anim->streamindex;

    if (avformat_open_input(&pFormatCtx, anim->name, NULL, NULL) != 0) {
        return -1;
    }

    if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
        avformat_close_input(&pFormatCtx);
        return -1;
    }

    av_dump_format(pFormatCtx, 0, anim->name, 0);


    /* Find the video stream */
    videoStream = -1;

    for (i = 0; i < pFormatCtx->nb_streams; i++)
        if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            if (streamcount > 0) {
                streamcount--;
                continue;
            }
            videoStream = i;
            break;
        }

    if (videoStream == -1) {
        avformat_close_input(&pFormatCtx);
        return -1;
    }

    pCodecCtx = pFormatCtx->streams[videoStream]->codec;

    /* Find the decoder for the video stream */
    pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
    if (pCodec == NULL) {
        avformat_close_input(&pFormatCtx);
        return -1;
    }

    pCodecCtx->workaround_bugs = 1;

    if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
        avformat_close_input(&pFormatCtx);
        return -1;
    }

    frame_rate = av_get_r_frame_rate_compat(pFormatCtx->streams[videoStream]);
    if (pFormatCtx->streams[videoStream]->nb_frames != 0) {
        anim->duration = pFormatCtx->streams[videoStream]->nb_frames;
    }
    else {
        anim->duration = ceil(pFormatCtx->duration *
                              av_q2d(frame_rate) /
                              AV_TIME_BASE);
    }

    frs_num = frame_rate.num;
    frs_den = frame_rate.den;

    frs_den *= AV_TIME_BASE;

    while (frs_num % 10 == 0 && frs_den >= 2.0 && frs_num > 10) {
        frs_num /= 10;
        frs_den /= 10;
    }

    anim->frs_sec = frs_num;
    anim->frs_sec_base = frs_den;

    anim->params = 0;

    anim->x = pCodecCtx->width;
    anim->y = av_get_cropped_height_from_codec(pCodecCtx);

    anim->pFormatCtx = pFormatCtx;
    anim->pCodecCtx = pCodecCtx;
    anim->pCodec = pCodec;
    anim->videoStream = videoStream;

    anim->interlacing = 0;
    anim->orientation = 0;
    anim->framesize = anim->x * anim->y * 4;

    anim->curposition = -1;
    anim->last_frame = 0;
    anim->last_pts = -1;
    anim->next_pts = -1;
    anim->next_packet.stream_index = -1;

    anim->pFrame = av_frame_alloc();
    anim->pFrameComplete = false;
    anim->pFrameDeinterlaced = av_frame_alloc();
    anim->pFrameRGB = av_frame_alloc();

    if (need_aligned_ffmpeg_buffer(anim)) {
        anim->pFrameRGB->format = AV_PIX_FMT_RGBA;
        anim->pFrameRGB->width  = anim->x;
        anim->pFrameRGB->height = anim->y;

        if (av_frame_get_buffer(anim->pFrameRGB, 32) < 0) {
            fprintf(stderr, "Could not allocate frame data.\n");
            avcodec_close(anim->pCodecCtx);
            avformat_close_input(&anim->pFormatCtx);
            av_frame_free(&anim->pFrameRGB);
            av_frame_free(&anim->pFrameDeinterlaced);
            av_frame_free(&anim->pFrame);
            anim->pCodecCtx = NULL;
            return -1;
        }
    }

    if (avpicture_get_size(AV_PIX_FMT_RGBA, anim->x, anim->y) !=
            anim->x * anim->y * 4)
    {
        fprintf(stderr,
                "ffmpeg has changed alloc scheme ... ARGHHH!\n");
        avcodec_close(anim->pCodecCtx);
        avformat_close_input(&anim->pFormatCtx);
        av_frame_free(&anim->pFrameRGB);
        av_frame_free(&anim->pFrameDeinterlaced);
        av_frame_free(&anim->pFrame);
        anim->pCodecCtx = NULL;
        return -1;
    }

    if (anim->ib_flags & IB_animdeinterlace) {
        avpicture_fill((AVPicture *) anim->pFrameDeinterlaced,
                       MEM_callocN(avpicture_get_size(
                                       anim->pCodecCtx->pix_fmt,
                                       anim->pCodecCtx->width,
                                       anim->pCodecCtx->height),
                                   "ffmpeg deinterlace"),
                       anim->pCodecCtx->pix_fmt,
                       anim->pCodecCtx->width,
                       anim->pCodecCtx->height);
    }

    if (pCodecCtx->has_b_frames) {
        anim->preseek = 25; /* FIXME: detect gopsize ... */
    }
    else {
        anim->preseek = 0;
    }

    anim->img_convert_ctx = sws_getContext(
                                anim->x,
                                anim->y,
                                anim->pCodecCtx->pix_fmt,
                                anim->x,
                                anim->y,
                                AV_PIX_FMT_RGBA,
                                SWS_FAST_BILINEAR | SWS_PRINT_INFO | SWS_FULL_CHR_H_INT,
                                NULL, NULL, NULL);

    if (!anim->img_convert_ctx) {
        fprintf(stderr,
                "Can't transform color space??? Bailing out...\n");
        avcodec_close(anim->pCodecCtx);
        avformat_close_input(&anim->pFormatCtx);
        av_frame_free(&anim->pFrameRGB);
        av_frame_free(&anim->pFrameDeinterlaced);
        av_frame_free(&anim->pFrame);
        anim->pCodecCtx = NULL;
        return -1;
    }

#ifdef FFMPEG_SWSCALE_COLOR_SPACE_SUPPORT
    /* Try do detect if input has 0-255 YCbCR range (JFIF Jpeg MotionJpeg) */
    if (!sws_getColorspaceDetails(anim->img_convert_ctx, (int **)&inv_table, &srcRange,
                                  &table, &dstRange, &brightness, &contrast, &saturation))
    {
        srcRange = srcRange || anim->pCodecCtx->color_range == AVCOL_RANGE_JPEG;
        inv_table = sws_getCoefficients(anim->pCodecCtx->colorspace);

        if (sws_setColorspaceDetails(anim->img_convert_ctx, (int *)inv_table, srcRange,
                                     table, dstRange, brightness, contrast, saturation))
        {
            fprintf(stderr, "Warning: Could not set libswscale colorspace details.\n");
        }
    }
    else {
        fprintf(stderr, "Warning: Could not set libswscale colorspace details.\n");
    }
#endif

    return (0);
}
Пример #22
0
static int config_input(AVFilterLink *link)
{
    AVFilterContext *ctx = link->dst;
    CropContext *crop = ctx->priv;
    const AVPixFmtDescriptor *pix_desc = &av_pix_fmt_descriptors[link->format];
    int ret;
    const char *expr;
    double res;

    crop->var_values[VAR_IN_W]  = crop->var_values[VAR_IW] = ctx->inputs[0]->w;
    crop->var_values[VAR_IN_H]  = crop->var_values[VAR_IH] = ctx->inputs[0]->h;
    crop->var_values[VAR_A]     = (float) link->w / link->h;
    crop->var_values[VAR_SAR]   = link->sample_aspect_ratio.num ? av_q2d(link->sample_aspect_ratio) : 1;
    crop->var_values[VAR_DAR]   = crop->var_values[VAR_A] * crop->var_values[VAR_SAR];
    crop->var_values[VAR_HSUB]  = 1<<pix_desc->log2_chroma_w;
    crop->var_values[VAR_VSUB]  = 1<<pix_desc->log2_chroma_h;
    crop->var_values[VAR_X]     = NAN;
    crop->var_values[VAR_Y]     = NAN;
    crop->var_values[VAR_OUT_W] = crop->var_values[VAR_OW] = NAN;
    crop->var_values[VAR_OUT_H] = crop->var_values[VAR_OH] = NAN;
    crop->var_values[VAR_N]     = 0;
    crop->var_values[VAR_T]     = NAN;
    crop->var_values[VAR_POS]   = NAN;

    av_image_fill_max_pixsteps(crop->max_step, NULL, pix_desc);
    crop->hsub = av_pix_fmt_descriptors[link->format].log2_chroma_w;
    crop->vsub = av_pix_fmt_descriptors[link->format].log2_chroma_h;

    if ((ret = av_expr_parse_and_eval(&res, (expr = crop->ow_expr),
                                      var_names, crop->var_values,
                                      NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) goto fail_expr;
    crop->var_values[VAR_OUT_W] = crop->var_values[VAR_OW] = res;
    if ((ret = av_expr_parse_and_eval(&res, (expr = crop->oh_expr),
                                      var_names, crop->var_values,
                                      NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) goto fail_expr;
    crop->var_values[VAR_OUT_H] = crop->var_values[VAR_OH] = res;
    /* evaluate again ow as it may depend on oh */
    if ((ret = av_expr_parse_and_eval(&res, (expr = crop->ow_expr),
                                      var_names, crop->var_values,
                                      NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) goto fail_expr;
    crop->var_values[VAR_OUT_W] = crop->var_values[VAR_OW] = res;
    if (normalize_double(&crop->w, crop->var_values[VAR_OUT_W]) < 0 ||
        normalize_double(&crop->h, crop->var_values[VAR_OUT_H]) < 0) {
        av_log(ctx, AV_LOG_ERROR,
               "Too big value or invalid expression for out_w/ow or out_h/oh. "
               "Maybe the expression for out_w:'%s' or for out_h:'%s' is self-referencing.\n",
               crop->ow_expr, crop->oh_expr);
        return AVERROR(EINVAL);
    }
    crop->w &= ~((1 << crop->hsub) - 1);
    crop->h &= ~((1 << crop->vsub) - 1);

    if ((ret = av_expr_parse(&crop->x_pexpr, crop->x_expr, var_names,
                             NULL, NULL, NULL, NULL, 0, ctx)) < 0 ||
        (ret = av_expr_parse(&crop->y_pexpr, crop->y_expr, var_names,
                             NULL, NULL, NULL, NULL, 0, ctx)) < 0)
        return AVERROR(EINVAL);

    if (crop->keep_aspect) {
        AVRational dar = av_mul_q(link->sample_aspect_ratio,
                                  (AVRational){ link->w, link->h });
        av_reduce(&crop->out_sar.num, &crop->out_sar.den,
                  dar.num * crop->h, dar.den * crop->w, INT_MAX);
    } else
        crop->out_sar = link->sample_aspect_ratio;

    av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d sar:%d/%d -> w:%d h:%d sar:%d/%d\n",
           link->w, link->h, link->sample_aspect_ratio.num, link->sample_aspect_ratio.den,
           crop->w, crop->h, crop->out_sar.num, crop->out_sar.den);

    if (crop->w <= 0 || crop->h <= 0 ||
        crop->w > link->w || crop->h > link->h) {
        av_log(ctx, AV_LOG_ERROR,
               "Invalid too big or non positive size for width '%d' or height '%d'\n",
               crop->w, crop->h);
        return AVERROR(EINVAL);
    }

    /* set default, required in the case the first computed value for x/y is NAN */
    crop->x = (link->w - crop->w) / 2;
    crop->y = (link->h - crop->h) / 2;
    crop->x &= ~((1 << crop->hsub) - 1);
    crop->y &= ~((1 << crop->vsub) - 1);
    return 0;

fail_expr:
    av_log(NULL, AV_LOG_ERROR, "Error when evaluating the expression '%s'\n", expr);
    return ret;
}
Пример #23
0
static int flv_write_header(AVFormatContext *s)
{
    AVIOContext *pb = s->pb;
    FLVContext *flv = s->priv_data;
    AVCodecContext *audio_enc = NULL, *video_enc = NULL, *data_enc = NULL;
    int i, metadata_count = 0;
    double framerate = 0.0;
    int64_t metadata_size_pos, data_size, metadata_count_pos;
    AVDictionaryEntry *tag = NULL;

    for (i = 0; i < s->nb_streams; i++) {
        AVCodecContext *enc = s->streams[i]->codec;
        FLVStreamContext *sc;
        switch (enc->codec_type) {
        case AVMEDIA_TYPE_VIDEO:
            if (s->streams[i]->avg_frame_rate.den &&
                s->streams[i]->avg_frame_rate.num) {
                framerate = av_q2d(s->streams[i]->avg_frame_rate);
            } else {
                framerate = 1 / av_q2d(s->streams[i]->codec->time_base);
            }
            video_enc = enc;
            if (enc->codec_tag == 0) {
                av_log(s, AV_LOG_ERROR, "Video codec '%s' for stream %d is not compatible with FLV\n",
                       avcodec_get_name(enc->codec_id), i);
                return AVERROR(EINVAL);
            }
            break;
        case AVMEDIA_TYPE_AUDIO:
            audio_enc = enc;
            if (get_audio_flags(s, enc) < 0)
                return AVERROR_INVALIDDATA;
            break;
        case AVMEDIA_TYPE_DATA:
            if (enc->codec_id != AV_CODEC_ID_TEXT) {
                av_log(s, AV_LOG_ERROR, "Data codec '%s' for stream %d is not compatible with FLV\n",
                       avcodec_get_name(enc->codec_id), i);
                return AVERROR_INVALIDDATA;
            }
            data_enc = enc;
            break;
        default:
            av_log(s, AV_LOG_ERROR, "Codec type '%s' for stream %d is not compatible with FLV\n",
                   av_get_media_type_string(enc->codec_type), i);
            return AVERROR(EINVAL);
        }
        avpriv_set_pts_info(s->streams[i], 32, 1, 1000); /* 32 bit pts in ms */

        sc = av_mallocz(sizeof(FLVStreamContext));
        if (!sc)
            return AVERROR(ENOMEM);
        s->streams[i]->priv_data = sc;
        sc->last_ts = -1;
    }

    flv->delay = AV_NOPTS_VALUE;

    avio_write(pb, "FLV", 3);
    avio_w8(pb, 1);
    avio_w8(pb, FLV_HEADER_FLAG_HASAUDIO * !!audio_enc +
                FLV_HEADER_FLAG_HASVIDEO * !!video_enc);
    avio_wb32(pb, 9);
    avio_wb32(pb, 0);

    for (i = 0; i < s->nb_streams; i++)
        if (s->streams[i]->codec->codec_tag == 5) {
            avio_w8(pb, 8);     // message type
            avio_wb24(pb, 0);   // include flags
            avio_wb24(pb, 0);   // time stamp
            avio_wb32(pb, 0);   // reserved
            avio_wb32(pb, 11);  // size
            flv->reserved = 5;
        }

    /* write meta_tag */
    avio_w8(pb, 18);            // tag type META
    metadata_size_pos = avio_tell(pb);
    avio_wb24(pb, 0);           // size of data part (sum of all parts below)
    avio_wb24(pb, 0);           // timestamp
    avio_wb32(pb, 0);           // reserved

    /* now data of data_size size */

    /* first event name as a string */
    avio_w8(pb, AMF_DATA_TYPE_STRING);
    put_amf_string(pb, "onMetaData"); // 12 bytes

    /* mixed array (hash) with size and string/type/data tuples */
    avio_w8(pb, AMF_DATA_TYPE_MIXEDARRAY);
    metadata_count_pos = avio_tell(pb);
    metadata_count = 5 * !!video_enc +
                     5 * !!audio_enc +
                     1 * !!data_enc  +
                     2; // +2 for duration and file size

    avio_wb32(pb, metadata_count);

    put_amf_string(pb, "duration");
    flv->duration_offset= avio_tell(pb);

    // fill in the guessed duration, it'll be corrected later if incorrect
    put_amf_double(pb, s->duration / AV_TIME_BASE);

    if (video_enc) {
        put_amf_string(pb, "width");
        put_amf_double(pb, video_enc->width);

        put_amf_string(pb, "height");
        put_amf_double(pb, video_enc->height);

        put_amf_string(pb, "videodatarate");
        put_amf_double(pb, video_enc->bit_rate / 1024.0);

        put_amf_string(pb, "framerate");
        put_amf_double(pb, framerate);

        put_amf_string(pb, "videocodecid");
        put_amf_double(pb, video_enc->codec_tag);
    }

    if (audio_enc) {
        put_amf_string(pb, "audiodatarate");
        put_amf_double(pb, audio_enc->bit_rate / 1024.0);

        put_amf_string(pb, "audiosamplerate");
        put_amf_double(pb, audio_enc->sample_rate);

        put_amf_string(pb, "audiosamplesize");
        put_amf_double(pb, audio_enc->codec_id == AV_CODEC_ID_PCM_U8 ? 8 : 16);

        put_amf_string(pb, "stereo");
        put_amf_bool(pb, audio_enc->channels == 2);

        put_amf_string(pb, "audiocodecid");
        put_amf_double(pb, audio_enc->codec_tag);
    }

    if (data_enc) {
        put_amf_string(pb, "datastream");
        put_amf_double(pb, 0.0);
    }

    while ((tag = av_dict_get(s->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) {
        if(   !strcmp(tag->key, "width")
            ||!strcmp(tag->key, "height")
            ||!strcmp(tag->key, "videodatarate")
            ||!strcmp(tag->key, "framerate")
            ||!strcmp(tag->key, "videocodecid")
            ||!strcmp(tag->key, "audiodatarate")
            ||!strcmp(tag->key, "audiosamplerate")
            ||!strcmp(tag->key, "audiosamplesize")
            ||!strcmp(tag->key, "stereo")
            ||!strcmp(tag->key, "audiocodecid")
            ||!strcmp(tag->key, "duration")
            ||!strcmp(tag->key, "onMetaData")
        ){
            av_log(s, AV_LOG_DEBUG, "Ignoring metadata for %s\n", tag->key);
            continue;
        }
        put_amf_string(pb, tag->key);
        avio_w8(pb, AMF_DATA_TYPE_STRING);
        put_amf_string(pb, tag->value);
        metadata_count++;
    }

    put_amf_string(pb, "filesize");
    flv->filesize_offset = avio_tell(pb);
    put_amf_double(pb, 0); // delayed write

    put_amf_string(pb, "");
    avio_w8(pb, AMF_END_OF_OBJECT);

    /* write total size of tag */
    data_size = avio_tell(pb) - metadata_size_pos - 10;

    avio_seek(pb, metadata_count_pos, SEEK_SET);
    avio_wb32(pb, metadata_count);

    avio_seek(pb, metadata_size_pos, SEEK_SET);
    avio_wb24(pb, data_size);
    avio_skip(pb, data_size + 10 - 3);
    avio_wb32(pb, data_size + 11);

    for (i = 0; i < s->nb_streams; i++) {
        AVCodecContext *enc = s->streams[i]->codec;
        if (enc->codec_id == AV_CODEC_ID_AAC || enc->codec_id == AV_CODEC_ID_H264 || enc->codec_id == AV_CODEC_ID_MPEG4) {
            int64_t pos;
            avio_w8(pb, enc->codec_type == AVMEDIA_TYPE_VIDEO ?
                    FLV_TAG_TYPE_VIDEO : FLV_TAG_TYPE_AUDIO);
            avio_wb24(pb, 0); // size patched later
            avio_wb24(pb, 0); // ts
            avio_w8(pb, 0);   // ts ext
            avio_wb24(pb, 0); // streamid
            pos = avio_tell(pb);
            if (enc->codec_id == AV_CODEC_ID_AAC) {
                avio_w8(pb, get_audio_flags(s, enc));
                avio_w8(pb, 0); // AAC sequence header
                avio_write(pb, enc->extradata, enc->extradata_size);
            } else {
                avio_w8(pb, enc->codec_tag | FLV_FRAME_KEY); // flags
                avio_w8(pb, 0); // AVC sequence header
                avio_wb24(pb, 0); // composition time
                ff_isom_write_avcc(pb, enc->extradata, enc->extradata_size);
            }
            data_size = avio_tell(pb) - pos;
            avio_seek(pb, -data_size - 10, SEEK_CUR);
            avio_wb24(pb, data_size);
            avio_skip(pb, data_size + 10 - 3);
            avio_wb32(pb, data_size + 11); // previous tag size
        }
    }

    return 0;
}
Пример #24
0
int audio_decode_frame(VideoState *is, double *pts_ptr) {
  int len1, data_size = 0, n;
  AVPacket *pkt = &is->audio_pkt;
  double pts;

  for(;;) {
    while(is->audio_pkt_size > 0) {
      int got_frame = 0;
      len1 = avcodec_decode_audio4(is->audio_st->codec, &is->audio_frame, &got_frame, pkt);
      if(len1 < 0) {
	/* if error, skip frame */
	is->audio_pkt_size = 0;
	break;
      }
      if (got_frame)
      {
    	  if (is->audio_frame.format != AV_SAMPLE_FMT_S16) {
    		  data_size = decode_frame_from_packet(is, is->audio_frame);
    	  } else {
            data_size =
              av_samples_get_buffer_size
              (
                  NULL,
                  is->audio_st->codec->channels,
                  is->audio_frame.nb_samples,
                  is->audio_st->codec->sample_fmt,
                  1
              );
            memcpy(is->audio_buf, is->audio_frame.data[0], data_size);
    	  }
      }
      is->audio_pkt_data += len1;
      is->audio_pkt_size -= len1;
      if(data_size <= 0) {
	/* No data yet, get more frames */
	continue;
      }
      pts = is->audio_clock;
      *pts_ptr = pts;
      n = 2 * is->audio_st->codec->channels;
      is->audio_clock += (double)data_size /
	(double)(n * is->audio_st->codec->sample_rate);

      /* We have data, return it and come back for more later */
      return data_size;
    }
    if(pkt->data)
      av_free_packet(pkt);

    if(is->quit) {
      return -1;
    }
    /* next packet */
    if(packet_queue_get(&is->audioq, pkt, 1) < 0) {
      return -1;
    }
    is->audio_pkt_data = pkt->data;
    is->audio_pkt_size = pkt->size;
    /* if update, update the audio clock w/pts */
    if(pkt->pts != AV_NOPTS_VALUE) {
      is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
    }
  }
}
Пример #25
0
// decode one audio packet and return its uncompressed size
static int audio_decode_frame(struct GroovePlaylist *playlist, struct GrooveFile *file) {
    struct GroovePlaylistPrivate *p = (struct GroovePlaylistPrivate *) playlist;
    struct GrooveFilePrivate *f = (struct GrooveFilePrivate *) file;

    AVPacket *pkt = &f->audio_pkt;
    AVCodecContext *dec = f->audio_st->codec;

    AVPacket *pkt_temp = &p->audio_pkt_temp;
    *pkt_temp = *pkt;

    // update the audio clock with the pts if we can
    if (pkt->pts != AV_NOPTS_VALUE)
        f->audio_clock = av_q2d(f->audio_st->time_base) * pkt->pts;

    int max_data_size = 0;
    int len1, got_frame;
    int new_packet = 1;
    AVFrame *in_frame = p->in_frame;

    // NOTE: the audio packet can contain several frames
    while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
        new_packet = 0;

        len1 = avcodec_decode_audio4(dec, in_frame, &got_frame, pkt_temp);
        if (len1 < 0) {
            // if error, we skip the frame
            pkt_temp->size = 0;
            return -1;
        }

        pkt_temp->data += len1;
        pkt_temp->size -= len1;

        if (!got_frame) {
            // stop sending empty packets if the decoder is finished
            if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
                return 0;
            continue;
        }

        // push the audio data from decoded frame into the filtergraph
        int err = av_buffersrc_write_frame(p->abuffer_ctx, in_frame);
        if (err < 0) {
            av_strerror(err, p->strbuf, sizeof(p->strbuf));
            av_log(NULL, AV_LOG_ERROR, "error writing frame to buffersrc: %s\n",
                    p->strbuf);
            return -1;
        }

        // for each data format in the sink map, pull filtered audio from its
        // buffersink, turn it into a GrooveBuffer and then increment the ref
        // count for each sink in that stack.
        struct SinkMap *map_item = p->sink_map;
        double clock_adjustment = 0;
        while (map_item) {
            struct GrooveSink *example_sink = map_item->stack_head->sink;
            int data_size = 0;
            for (;;) {
                AVFrame *oframe = av_frame_alloc();
                int err = example_sink->buffer_sample_count == 0 ?
                    av_buffersink_get_frame(map_item->abuffersink_ctx, oframe) :
                    av_buffersink_get_samples(map_item->abuffersink_ctx, oframe, example_sink->buffer_sample_count);
                if (err == AVERROR_EOF || err == AVERROR(EAGAIN)) {
                    av_frame_free(&oframe);
                    break;
                }
                if (err < 0) {
                    av_frame_free(&oframe);
                    av_log(NULL, AV_LOG_ERROR, "error reading buffer from buffersink\n");
                    return -1;
                }
                struct GrooveBuffer *buffer = frame_to_groove_buffer(playlist, example_sink, oframe);
                if (!buffer) {
                    av_frame_free(&oframe);
                    return -1;
                }
                data_size += buffer->size;
                struct SinkStack *stack_item = map_item->stack_head;
                // we hold this reference to avoid cleanups until at least this loop
                // is done and we call unref after it.
                groove_buffer_ref(buffer);
                while (stack_item) {
                    struct GrooveSink *sink = stack_item->sink;
                    struct GrooveSinkPrivate *s = (struct GrooveSinkPrivate *) sink;
                    // as soon as we call groove_queue_put, this buffer could be unref'd.
                    // so we ref before putting it in the queue, and unref if it failed.
                    groove_buffer_ref(buffer);
                    if (groove_queue_put(s->audioq, buffer) < 0) {
                        av_log(NULL, AV_LOG_ERROR, "unable to put buffer in queue\n");
                        groove_buffer_unref(buffer);
                    }
                    stack_item = stack_item->next;
                }
                groove_buffer_unref(buffer);
            }
            if (data_size > max_data_size) {
                max_data_size = data_size;
                clock_adjustment = data_size / (double)example_sink->bytes_per_sec;
            }
            map_item = map_item->next;
        }

        // if no pts, then estimate it
        if (pkt->pts == AV_NOPTS_VALUE)
            f->audio_clock += clock_adjustment;
        return max_data_size;
    }
    return max_data_size;
}
size_t FFmpegDecoderAudio::decodeFrame(void * const buffer, const size_t size)
{
    for (;;)
    {
        // Decode current packet

        while (m_bytes_remaining > 0)
        {
            int data_size = size;

            const int bytes_decoded = decode_audio(m_context, reinterpret_cast<int16_t*>(buffer), &data_size, m_packet_data, m_bytes_remaining, m_swr_context, m_out_sample_rate, m_out_nb_channels, m_out_sample_format);

            if (bytes_decoded < 0)
            {
                // if error, skip frame
                m_bytes_remaining = 0;
                break;
            }

            m_bytes_remaining -= bytes_decoded;
            m_packet_data += bytes_decoded;

            // If we have some data, return it and come back for more later.
            if (data_size > 0)
                return data_size;
        }

        // Get next packet

        if (m_packet.valid())
            m_packet.clear();

        if (m_exit)
            return 0;

        bool is_empty = true;
        m_packet = m_packets.tryPop(is_empty);

        if (is_empty)
            return 0;

        if (m_packet.type == FFmpegPacket::PACKET_DATA)
        {
            if (m_packet.packet.pts != int64_t(AV_NOPTS_VALUE))
            {
                const double pts = av_q2d(m_stream->time_base) * m_packet.packet.pts;
                m_clocks.audioSetBufferEndPts(pts);
            }

            m_bytes_remaining = m_packet.packet.size;
            m_packet_data = m_packet.packet.data;
        }
        else if (m_packet.type == FFmpegPacket::PACKET_END_OF_STREAM)
        {
            m_end_of_stream = true;
        }
        else if (m_packet.type == FFmpegPacket::PACKET_FLUSH)
        {
            avcodec_flush_buffers(m_context);
        }

        // just output silence when we reached the end of stream
        if (m_end_of_stream)
        {
            memset(buffer, 0, size);
            return size;
        }
    }
}
Пример #27
0
static double modify_qscale(MpegEncContext *s, RateControlEntry *rce, double q, int frame_num){
    RateControlContext *rcc= &s->rc_context;
    int qmin, qmax;
    const int pict_type= rce->new_pict_type;
    const double buffer_size= s->avctx->rc_buffer_size;
    const double fps= 1/av_q2d(s->avctx->time_base);
    const double min_rate= s->avctx->rc_min_rate / fps;
    const double max_rate= s->avctx->rc_max_rate / fps;

    get_qminmax(&qmin, &qmax, s, pict_type);

    /* modulation */
    if(s->avctx->rc_qmod_freq && frame_num%s->avctx->rc_qmod_freq==0 && pict_type==FF_P_TYPE)
        q*= s->avctx->rc_qmod_amp;

//printf("q:%f\n", q);
    /* buffer overflow/underflow protection */
    if(buffer_size){
        double expected_size= rcc->buffer_index;
        double q_limit;

        if(min_rate){
            double d= 2*(buffer_size - expected_size)/buffer_size;
            if(d>1.0) d=1.0;
            else if(d<0.0001) d=0.0001;
            q*= pow(d, 1.0/s->avctx->rc_buffer_aggressivity);

            q_limit= bits2qp(rce, FFMAX((min_rate - buffer_size + rcc->buffer_index) * s->avctx->rc_min_vbv_overflow_use, 1));
            if(q > q_limit){
                if(s->avctx->debug&FF_DEBUG_RC){
                    av_log(s->avctx, AV_LOG_DEBUG, "limiting QP %f -> %f\n", q, q_limit);
                }
                q= q_limit;
            }
        }

        if(max_rate){
            double d= 2*expected_size/buffer_size;
            if(d>1.0) d=1.0;
            else if(d<0.0001) d=0.0001;
            q/= pow(d, 1.0/s->avctx->rc_buffer_aggressivity);

            q_limit= bits2qp(rce, FFMAX(rcc->buffer_index * s->avctx->rc_max_available_vbv_use, 1));
            if(q < q_limit){
                if(s->avctx->debug&FF_DEBUG_RC){
                    av_log(s->avctx, AV_LOG_DEBUG, "limiting QP %f -> %f\n", q, q_limit);
                }
                q= q_limit;
            }
        }
    }
//printf("q:%f max:%f min:%f size:%f index:%d bits:%f agr:%f\n", q,max_rate, min_rate, buffer_size, rcc->buffer_index, bits, s->avctx->rc_buffer_aggressivity);
    if(s->avctx->rc_qsquish==0.0 || qmin==qmax){
        if     (q<qmin) q=qmin;
        else if(q>qmax) q=qmax;
    }else{
        double min2= log(qmin);
        double max2= log(qmax);

        q= log(q);
        q= (q - min2)/(max2-min2) - 0.5;
        q*= -4.0;
        q= 1.0/(1.0 + exp(q));
        q= q*(max2-min2) + min2;

        q= exp(q);
    }

    return q;
}
Пример #28
0
static int index_rebuild_ffmpeg(FFmpegIndexBuilderContext *context,
                                short *stop, short *do_update, float *progress)
{
	AVFrame *in_frame = 0;
	AVPacket next_packet;
	uint64_t stream_size;

	memset(&next_packet, 0, sizeof(AVPacket));

	in_frame = av_frame_alloc();

	stream_size = avio_size(context->iFormatCtx->pb);

	context->frame_rate = av_q2d(av_get_r_frame_rate_compat(context->iFormatCtx, context->iStream));
	context->pts_time_base = av_q2d(context->iStream->time_base);

	while (av_read_frame(context->iFormatCtx, &next_packet) >= 0) {
		int frame_finished = 0;
		float next_progress =  (float)((int)floor(((double) next_packet.pos) * 100 /
		                                          ((double) stream_size) + 0.5)) / 100;

		if (*progress != next_progress) {
			*progress = next_progress;
			*do_update = true;
		}

		if (*stop) {
			av_free_packet(&next_packet);
			break;
		}

		if (next_packet.stream_index == context->videoStream) {
			if (next_packet.flags & AV_PKT_FLAG_KEY) {
				context->last_seek_pos = context->seek_pos;
				context->last_seek_pos_dts = context->seek_pos_dts;
				context->seek_pos = next_packet.pos;
				context->seek_pos_dts = next_packet.dts;
				context->seek_pos_pts = next_packet.pts;
			}

			avcodec_decode_video2(
			        context->iCodecCtx, in_frame, &frame_finished,
			        &next_packet);
		}

		if (frame_finished) {
			index_rebuild_ffmpeg_proc_decoded_frame(
				context, &next_packet, in_frame);
		}
		av_free_packet(&next_packet);
	}

	/* process pictures still stuck in decoder engine after EOF
	 * according to ffmpeg docs using 0-size packets.
	 *
	 * At least, if we haven't already stopped... */

	/* this creates the 0-size packet and prevents a memory leak. */
	av_free_packet(&next_packet);

	if (!*stop) {
		int frame_finished;

		do {
			frame_finished = 0;

			avcodec_decode_video2(
				context->iCodecCtx, in_frame, &frame_finished,
				&next_packet);

			if (frame_finished) {
				index_rebuild_ffmpeg_proc_decoded_frame(
					context, &next_packet, in_frame);
			}
		} while (frame_finished);
	}

	av_free(in_frame);

	return 1;
}
Пример #29
0
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
{
    float q;
    int qmin, qmax;
    float br_compensation;
    double diff;
    double short_term_q;
    double fps;
    int picture_number= s->picture_number;
    int64_t wanted_bits;
    RateControlContext *rcc= &s->rc_context;
    AVCodecContext *a= s->avctx;
    RateControlEntry local_rce, *rce;
    double bits;
    double rate_factor;
    int var;
    const int pict_type= s->pict_type;
    Picture * const pic= &s->current_picture;
    emms_c();

#if CONFIG_LIBXVID
    if((s->flags&CODEC_FLAG_PASS2) && s->avctx->rc_strategy == FF_RC_STRATEGY_XVID)
        return ff_xvid_rate_estimate_qscale(s, dry_run);
#endif

    get_qminmax(&qmin, &qmax, s, pict_type);

    fps= 1/av_q2d(s->avctx->time_base);
//printf("input_pic_num:%d pic_num:%d frame_rate:%d\n", s->input_picture_number, s->picture_number, s->frame_rate);
        /* update predictors */
    if(picture_number>2 && !dry_run){
        const int last_var= s->last_pict_type == FF_I_TYPE ? rcc->last_mb_var_sum : rcc->last_mc_mb_var_sum;
        update_predictor(&rcc->pred[s->last_pict_type], rcc->last_qscale, sqrt(last_var), s->frame_bits);
    }

    if(s->flags&CODEC_FLAG_PASS2){
        assert(picture_number>=0);
        assert(picture_number<rcc->num_entries);
        rce= &rcc->entry[picture_number];
        wanted_bits= rce->expected_bits;
    }else{
        Picture *dts_pic;
        rce= &local_rce;

        //FIXME add a dts field to AVFrame and ensure its set and use it here instead of reordering
        //but the reordering is simpler for now until h.264 b pyramid must be handeld
        if(s->pict_type == FF_B_TYPE || s->low_delay)
            dts_pic= s->current_picture_ptr;
        else
            dts_pic= s->last_picture_ptr;

//if(dts_pic)
//            av_log(NULL, AV_LOG_ERROR, "%Ld %Ld %Ld %d\n", s->current_picture_ptr->pts, s->user_specified_pts, dts_pic->pts, picture_number);

        if(!dts_pic || dts_pic->pts == AV_NOPTS_VALUE)
            wanted_bits= (uint64_t)(s->bit_rate*(double)picture_number/fps);
        else
            wanted_bits= (uint64_t)(s->bit_rate*(double)dts_pic->pts/fps);
    }

    diff= s->total_bits - wanted_bits;
    br_compensation= (a->bit_rate_tolerance - diff)/a->bit_rate_tolerance;
    if(br_compensation<=0.0) br_compensation=0.001;

    var= pict_type == FF_I_TYPE ? pic->mb_var_sum : pic->mc_mb_var_sum;

    short_term_q = 0; /* avoid warning */
    if(s->flags&CODEC_FLAG_PASS2){
        if(pict_type!=FF_I_TYPE)
            assert(pict_type == rce->new_pict_type);

        q= rce->new_qscale / br_compensation;
//printf("%f %f %f last:%d var:%d type:%d//\n", q, rce->new_qscale, br_compensation, s->frame_bits, var, pict_type);
    }else{
        rce->pict_type=
        rce->new_pict_type= pict_type;
        rce->mc_mb_var_sum= pic->mc_mb_var_sum;
        rce->mb_var_sum   = pic->   mb_var_sum;
        rce->qscale   = FF_QP2LAMBDA * 2;
        rce->f_code   = s->f_code;
        rce->b_code   = s->b_code;
        rce->misc_bits= 1;

        bits= predict_size(&rcc->pred[pict_type], rce->qscale, sqrt(var));
        if(pict_type== FF_I_TYPE){
            rce->i_count   = s->mb_num;
            rce->i_tex_bits= bits;
            rce->p_tex_bits= 0;
            rce->mv_bits= 0;
        }else{
            rce->i_count   = 0; //FIXME we do know this approx
            rce->i_tex_bits= 0;
            rce->p_tex_bits= bits*0.9;

            rce->mv_bits= bits*0.1;
        }
        rcc->i_cplx_sum [pict_type] += rce->i_tex_bits*rce->qscale;
        rcc->p_cplx_sum [pict_type] += rce->p_tex_bits*rce->qscale;
        rcc->mv_bits_sum[pict_type] += rce->mv_bits;
        rcc->frame_count[pict_type] ++;

        bits= rce->i_tex_bits + rce->p_tex_bits;
        rate_factor= rcc->pass1_wanted_bits/rcc->pass1_rc_eq_output_sum * br_compensation;

        q= get_qscale(s, rce, rate_factor, picture_number);
        if (q < 0)
            return -1;

        assert(q>0.0);
//printf("%f ", q);
        q= get_diff_limited_q(s, rce, q);
//printf("%f ", q);
        assert(q>0.0);

        if(pict_type==FF_P_TYPE || s->intra_only){ //FIXME type dependent blur like in 2-pass
            rcc->short_term_qsum*=a->qblur;
            rcc->short_term_qcount*=a->qblur;

            rcc->short_term_qsum+= q;
            rcc->short_term_qcount++;
//printf("%f ", q);
            q= short_term_q= rcc->short_term_qsum/rcc->short_term_qcount;
//printf("%f ", q);
        }
        assert(q>0.0);

        q= modify_qscale(s, rce, q, picture_number);

        rcc->pass1_wanted_bits+= s->bit_rate/fps;

        assert(q>0.0);
    }

    if(s->avctx->debug&FF_DEBUG_RC){
        av_log(s->avctx, AV_LOG_DEBUG, "%c qp:%d<%2.1f<%d %d want:%d total:%d comp:%f st_q:%2.2f size:%d var:%d/%d br:%d fps:%d\n",
        av_get_pict_type_char(pict_type), qmin, q, qmax, picture_number, (int)wanted_bits/1000, (int)s->total_bits/1000,
        br_compensation, short_term_q, s->frame_bits, pic->mb_var_sum, pic->mc_mb_var_sum, s->bit_rate/1000, (int)fps
        );
    }

    if     (q<qmin) q=qmin;
    else if(q>qmax) q=qmax;

    if(s->adaptive_quant)
        adaptive_quantization(s, q);
    else
        q= (int)(q + 0.5);

    if(!dry_run){
        rcc->last_qscale= q;
        rcc->last_mc_mb_var_sum= pic->mc_mb_var_sum;
        rcc->last_mb_var_sum= pic->mb_var_sum;
    }
#if 0
{
    static int mvsum=0, texsum=0;
    mvsum += s->mv_bits;
    texsum += s->i_tex_bits + s->p_tex_bits;
    printf("%d %d//\n\n", mvsum, texsum);
}
#endif
    return q;
}
Пример #30
0
int audio_decode_frame(VideoState *is, double *pts_ptr) {

  int len1, data_size = 0, n;
  AVPacket *pkt = &is->audio_pkt;
  double pts;

  for(;;) {
    while(is->audio_pkt_size > 0) {
      int got_frame = 0;
      len1 = avcodec_decode_audio4(is->audio_st->codec, &is->audio_frame, &got_frame, pkt);
      if(len1 < 0) {
	/* if error, skip frame */
	is->audio_pkt_size = 0;
	break;
      }
      if (got_frame)
      {
          /*
          data_size = 
            av_samples_get_buffer_size
            (
                NULL, 
                is->audio_st->codec->channels,
                is->audio_frame.nb_samples,
                is->audio_st->codec->sample_fmt,
                1
            );
          memcpy(is->audio_buf, is->audio_frame.data[0], data_size);
          */
          data_size =av_samples_get_buffer_size
                    (
                        NULL,
                        is->audio_st->codec->channels,
                        is->audio_frame.nb_samples,
                        AV_SAMPLE_FMT_S16,
                        0
                    );
                  if (!swr_ctx)
                  {
                      swr_ctx = swr_alloc();
                      if (!swr_ctx)
                      {
                          printf("Could not allocate swr context\n");
                          exit(1);
                      }
                      /* set options */
                      av_opt_set_int(swr_ctx, "in_channel_layout", is->audio_st->codec->channel_layout, 0);
                      av_opt_set_int(swr_ctx, "in_sample_rate", is->audio_st->codec->sample_rate, 0);
                      av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", is->audio_st->codec->sample_fmt, 0);
                      av_opt_set_int(swr_ctx, "out_channel_layout", is->audio_st->codec->channel_layout, 0);
                      av_opt_set_int(swr_ctx, "out_sample_rate", is->audio_st->codec->sample_rate, 0);
                      av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
                      if (swr_init(swr_ctx) < 0)
                      {
                          printf("Failed to initialize the resampling context\n");
                          exit(1);
                      }
                      decode_buffer = (uint8_t*) malloc(data_size);
                  }
                  int ret = swr_convert(swr_ctx, &decode_buffer, is->audio_frame.nb_samples, (const uint8_t **) &is->audio_frame.data[0],
                                          is->audio_frame.nb_samples);
                  if (ret < 0)
                  {
                      printf("Error while converting\n");
                      break;
                  }
                  memcpy(is->audio_buf, decode_buffer, data_size);

      }
      is->audio_pkt_data += len1;
      is->audio_pkt_size -= len1;
      if(data_size <= 0) {
	/* No data yet, get more frames */
	continue;
      }
      pts = is->audio_clock;
      *pts_ptr = pts;
      n = 2 * is->audio_st->codec->channels;
      is->audio_clock += (double)data_size /
	(double)(n * is->audio_st->codec->sample_rate);

      /* We have data, return it and come back for more later */
      return data_size;
    }
    if(pkt->data)
      av_free_packet(pkt);

    if(is->quit) {
      return -1;
    }
    /* next packet */
    if(packet_queue_get(&is->audioq, pkt, 1) < 0) {
      return -1;
    }
    is->audio_pkt_data = pkt->data;
    is->audio_pkt_size = pkt->size;
    /* if update, update the audio clock w/pts */
    if(pkt->pts != AV_NOPTS_VALUE) {
      is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
    }
  }
}