Example #1
0
/**
 * \brief initialize mjpeg encoder
 *
 * This routine is to set up the parameters and initialize the mjpeg encoder.
 * It does all the initializations needed of lower level routines.
 * The formats accepted by this encoder is YUV422P and YUV420
 *
 * \param w width in pixels of the image to encode, must be a multiple of 16
 * \param h height in pixels of the image to encode, must be a multiple of 8
 * \param y_rsize size of each plane row Y component
 * \param y_rsize size of each plane row U component
 * \param v_rsize size of each plane row V component
 * \param cu "cheap upsample". Set to 0 for YUV422 format, 1 for YUV420 format
 *           when set to 1, the encoder will assume that there is only half th
 *           number of rows of chroma information, and every chroma row is
 *           duplicated.
 * \param q quality parameter for the mjpeg encode. Between 1 and 20 where 1
 *	    is best quality and 20 is the worst quality.
 * \param b monochrome flag. When set to 1, the mjpeg output is monochrome.
 *          In that case, the colour information is omitted, and actually the
 *          colour planes are not touched.
 *
 * \returns an appropriately set up jpeg_enc_t structure
 *
 * The actual plane buffer addreses are passed by jpeg_enc_frame().
 *
 * The encoder doesn't know anything about interlacing, the halve height
 * needs to be passed and the double rowstride. Which field gets encoded
 * is decided by what buffers are passed to mjpeg_encode_frame()
 */
static jpeg_enc_t *jpeg_enc_init(int w, int h, int y_rsize,
			  int u_rsize, int v_rsize,
		int cu, int q, int b) {
	jpeg_enc_t *j;
	int i = 0;
	VERBOSE("JPEG encoder init: %dx%d %d %d %d cu=%d q=%d bw=%d\n",
			w, h, y_rsize, u_rsize, v_rsize, cu, q, b);

	j = av_mallocz(sizeof(jpeg_enc_t));
	if (j == NULL) return NULL;

	j->s = av_mallocz(sizeof(MpegEncContext));
	if (j->s == NULL) {
		av_free(j);
		return NULL;
	}

	/* info on how to access the pixels */
	j->y_rs = y_rsize;
	j->u_rs = u_rsize;
	j->v_rs = v_rsize;

	j->s->width = w;		// image width and height
	j->s->height = h;
	j->s->qscale = q;		// Encoding quality

	j->s->out_format = FMT_MJPEG;
	j->s->intra_only = 1;		// Generate only intra pictures for jpeg
	j->s->encoding = 1;		// Set mode to encode
	j->s->pict_type = AV_PICTURE_TYPE_I;
	j->s->y_dc_scale = 8;
	j->s->c_dc_scale = 8;

	/*
	 * This sets up the MCU (Minimal Code Unit) number
	 * of appearances of the various component
	 * for the SOF0 table in the generated MJPEG.
	 * The values are not used for anything else.
	 * The current setup is simply YUV422, with two horizontal Y components
	 * for every UV component.
	 */
	//FIXME j->s->mjpeg_write_tables = 1;	// setup to write tables
	j->s->mjpeg_vsample[0] = 1;	// 1 appearance of Y vertically
	j->s->mjpeg_vsample[1] = 1;	// 1 appearance of U vertically
	j->s->mjpeg_vsample[2] = 1;	// 1 appearance of V vertically
	j->s->mjpeg_hsample[0] = 2;	// 2 appearances of Y horizontally
	j->s->mjpeg_hsample[1] = 1;	// 1 appearance of U horizontally
	j->s->mjpeg_hsample[2] = 1;	// 1 appearance of V horizontally

	j->cheap_upsample = cu;
	j->bw = b;

	init_avcodec();

	// Build mjpeg huffman code tables, setting up j->s->mjpeg_ctx
	if (ff_mjpeg_encode_init(j->s) < 0) {
		av_free(j->s);
		av_free(j);
		return NULL;
	}

	/* alloc bogus avctx to keep MPV_common_init from segfaulting */
	j->s->avctx = avcodec_alloc_context();
	if (j->s->avctx == NULL) {
		av_free(j->s);
		av_free(j);
		return NULL;
	}

	// Set some a minimum amount of default values that are needed
	// Indicates that we should generated normal MJPEG
	j->s->avctx->codec_id = AV_CODEC_ID_MJPEG;
	// Which DCT method to use. AUTO will select the fastest one
	j->s->avctx->dct_algo = FF_DCT_AUTO;
	j->s->intra_quant_bias= 1<<(QUANT_BIAS_SHIFT-1); //(a + x/2)/x
	// indicate we 'decode' to jpeg 4:2:2
	j->s->avctx->pix_fmt = PIX_FMT_YUVJ422P;

	j->s->avctx->thread_count = 1;

	/* make MPV_common_init allocate important buffers, like s->block
	 * Also initializes dsputil */
	if (ff_MPV_common_init(j->s) < 0) {
		av_free(j->s);
		av_free(j);
		return NULL;
	}

	/* correct the value for sc->mb_height. MPV_common_init put other
	 * values there */
	j->s->mb_height = j->s->height/8;
	j->s->mb_intra = 1;

	// Init q matrix
	j->s->intra_matrix[0] = ff_mpeg1_default_intra_matrix[0];
	for (i = 1; i < 64; i++)
		j->s->intra_matrix[i] = av_clip_uint8(
			(ff_mpeg1_default_intra_matrix[i]*j->s->qscale) >> 3);

	// precompute matrix
	convert_matrix(j->s, j->s->q_intra_matrix, j->s->q_intra_matrix16,
			j->s->intra_matrix, j->s->intra_quant_bias, 8, 8);

	/* Pick up the selection of the optimal get_pixels() routine
	 * to use, which was done in  MPV_common_init() */
	get_pixels = j->s->dsp.get_pixels;

	return j;
}
Example #2
0
static ResampleContext *resample_init(ResampleContext *c, int out_rate, int in_rate, int filter_size, int phase_shift, int linear,
                                    double cutoff0, enum AVSampleFormat format, enum SwrFilterType filter_type, double kaiser_beta,
                                    double precision, int cheby, int exact_rational)
{
    double cutoff = cutoff0? cutoff0 : 0.97;
    double factor= FFMIN(out_rate * cutoff / in_rate, 1.0);
    int phase_count= 1<<phase_shift;
    int phase_count_compensation = phase_count;

    if (exact_rational) {
        int phase_count_exact, phase_count_exact_den;

        av_reduce(&phase_count_exact, &phase_count_exact_den, out_rate, in_rate, INT_MAX);
        if (phase_count_exact <= phase_count) {
            phase_count_compensation = phase_count_exact * (phase_count / phase_count_exact);
            phase_count = phase_count_exact;
        }
    }

    if (!c || c->phase_count != phase_count || c->linear!=linear || c->factor != factor
           || c->filter_length != FFMAX((int)ceil(filter_size/factor), 1) || c->format != format
           || c->filter_type != filter_type || c->kaiser_beta != kaiser_beta) {
        c = av_mallocz(sizeof(*c));
        if (!c)
            return NULL;

        c->format= format;

        c->felem_size= av_get_bytes_per_sample(c->format);

        switch(c->format){
        case AV_SAMPLE_FMT_S16P:
            c->filter_shift = 15;
            break;
        case AV_SAMPLE_FMT_S32P:
            c->filter_shift = 30;
            break;
        case AV_SAMPLE_FMT_FLTP:
        case AV_SAMPLE_FMT_DBLP:
            c->filter_shift = 0;
            break;
        default:
            av_log(NULL, AV_LOG_ERROR, "Unsupported sample format\n");
            av_assert0(0);
        }

        if (filter_size/factor > INT32_MAX/256) {
            av_log(NULL, AV_LOG_ERROR, "Filter length too large\n");
            goto error;
        }

        c->phase_count   = phase_count;
        c->linear        = linear;
        c->factor        = factor;
        c->filter_length = FFMAX((int)ceil(filter_size/factor), 1);
        c->filter_alloc  = FFALIGN(c->filter_length, 8);
        c->filter_bank   = av_calloc(c->filter_alloc, (phase_count+1)*c->felem_size);
        c->filter_type   = filter_type;
        c->kaiser_beta   = kaiser_beta;
        c->phase_count_compensation = phase_count_compensation;
        if (!c->filter_bank)
            goto error;
        if (build_filter(c, (void*)c->filter_bank, factor, c->filter_length, c->filter_alloc, phase_count, 1<<c->filter_shift, filter_type, kaiser_beta))
            goto error;
        memcpy(c->filter_bank + (c->filter_alloc*phase_count+1)*c->felem_size, c->filter_bank, (c->filter_alloc-1)*c->felem_size);
        memcpy(c->filter_bank + (c->filter_alloc*phase_count  )*c->felem_size, c->filter_bank + (c->filter_alloc - 1)*c->felem_size, c->felem_size);
    }

    c->compensation_distance= 0;
    if(!av_reduce(&c->src_incr, &c->dst_incr, out_rate, in_rate * (int64_t)phase_count, INT32_MAX/2))
        goto error;
    while (c->dst_incr < (1<<20) && c->src_incr < (1<<20)) {
        c->dst_incr *= 2;
        c->src_incr *= 2;
    }
    c->ideal_dst_incr = c->dst_incr;
    c->dst_incr_div   = c->dst_incr / c->src_incr;
    c->dst_incr_mod   = c->dst_incr % c->src_incr;

    c->index= -phase_count*((c->filter_length-1)/2);
    c->frac= 0;

    swri_resample_dsp_init(c);

    return c;
error:
    av_freep(&c->filter_bank);
    av_free(c);
    return NULL;
}
Example #3
0
/**
 * This is the real routine that does the uninit of the ZRMJPEG filter
 *
 * \param j pointer to jpeg_enc structure
 */
static void jpeg_enc_uninit(jpeg_enc_t *j) {
	ff_mjpeg_encode_close(j->s);
	av_free(j->s);
	av_free(j);
}
Example #4
0
/**
 * builds a polyphase filterbank.
 * @param factor resampling factor
 * @param scale wanted sum of coefficients for each filter
 * @param filter_type  filter type
 * @param kaiser_beta  kaiser window beta
 * @return 0 on success, negative on error
 */
static int build_filter(ResampleContext *c, void *filter, double factor, int tap_count, int alloc, int phase_count, int scale,
                        int filter_type, double kaiser_beta){
    int ph, i;
    int ph_nb = phase_count % 2 ? phase_count : phase_count / 2 + 1;
    double x, y, w, t, s;
    double *tab = av_malloc_array(tap_count+1,  sizeof(*tab));
    double *sin_lut = av_malloc_array(ph_nb, sizeof(*sin_lut));
    const int center= (tap_count-1)/2;
    int ret = AVERROR(ENOMEM);

    if (!tab || !sin_lut)
        goto fail;

    /* if upsampling, only need to interpolate, no filter */
    if (factor > 1.0)
        factor = 1.0;

    if (factor == 1.0) {
        for (ph = 0; ph < ph_nb; ph++)
            sin_lut[ph] = sin(M_PI * ph / phase_count);
    }
    for(ph = 0; ph < ph_nb; ph++) {
        double norm = 0;
        s = sin_lut[ph];
        for(i=0;i<=tap_count;i++) {
            x = M_PI * ((double)(i - center) - (double)ph / phase_count) * factor;
            if (x == 0) y = 1.0;
            else if (factor == 1.0)
                y = s / x;
            else
                y = sin(x) / x;
            switch(filter_type){
            case SWR_FILTER_TYPE_CUBIC:{
                const float d= -0.5; //first order derivative = -0.5
                x = fabs(((double)(i - center) - (double)ph / phase_count) * factor);
                if(x<1.0) y= 1 - 3*x*x + 2*x*x*x + d*(            -x*x + x*x*x);
                else      y=                       d*(-4 + 8*x - 5*x*x + x*x*x);
                break;}
            case SWR_FILTER_TYPE_BLACKMAN_NUTTALL:
                w = 2.0*x / (factor*tap_count);
                t = -cos(w);
                y *= 0.3635819 - 0.4891775 * t + 0.1365995 * (2*t*t-1) - 0.0106411 * (4*t*t*t - 3*t);
                break;
            case SWR_FILTER_TYPE_KAISER:
                w = 2.0*x / (factor*tap_count*M_PI);
                y *= bessel(kaiser_beta*sqrt(FFMAX(1-w*w, 0)));
                break;
            default:
                av_assert0(0);
            }

            tab[i] = y;
            s = -s;
            if (i < tap_count)
                norm += y;
        }

        /* normalize so that an uniform color remains the same */
        switch(c->format){
        case AV_SAMPLE_FMT_S16P:
            for(i=0;i<tap_count;i++)
                ((int16_t*)filter)[ph * alloc + i] = av_clip_int16(lrintf(tab[i] * scale / norm));
            if (phase_count % 2) break;
            if (tap_count % 2 == 0 || tap_count == 1) {
                for (i = 0; i < tap_count; i++)
                    ((int16_t*)filter)[(phase_count-ph) * alloc + tap_count-1-i] = ((int16_t*)filter)[ph * alloc + i];
            }
            else {
                for (i = 1; i <= tap_count; i++)
                    ((int16_t*)filter)[(phase_count-ph) * alloc + tap_count-i] =
                        av_clip_int16(lrintf(tab[i] * scale / (norm - tab[0] + tab[tap_count])));
            }
            break;
        case AV_SAMPLE_FMT_S32P:
            for(i=0;i<tap_count;i++)
                ((int32_t*)filter)[ph * alloc + i] = av_clipl_int32(llrint(tab[i] * scale / norm));
            if (phase_count % 2) break;
            if (tap_count % 2 == 0 || tap_count == 1) {
                for (i = 0; i < tap_count; i++)
                    ((int32_t*)filter)[(phase_count-ph) * alloc + tap_count-1-i] = ((int32_t*)filter)[ph * alloc + i];
            }
            else {
                for (i = 1; i <= tap_count; i++)
                    ((int32_t*)filter)[(phase_count-ph) * alloc + tap_count-i] =
                        av_clipl_int32(llrint(tab[i] * scale / (norm - tab[0] + tab[tap_count])));
            }
            break;
        case AV_SAMPLE_FMT_FLTP:
            for(i=0;i<tap_count;i++)
                ((float*)filter)[ph * alloc + i] = tab[i] * scale / norm;
            if (phase_count % 2) break;
            if (tap_count % 2 == 0 || tap_count == 1) {
                for (i = 0; i < tap_count; i++)
                    ((float*)filter)[(phase_count-ph) * alloc + tap_count-1-i] = ((float*)filter)[ph * alloc + i];
            }
            else {
                for (i = 1; i <= tap_count; i++)
                    ((float*)filter)[(phase_count-ph) * alloc + tap_count-i] = tab[i] * scale / (norm - tab[0] + tab[tap_count]);
            }
            break;
        case AV_SAMPLE_FMT_DBLP:
            for(i=0;i<tap_count;i++)
                ((double*)filter)[ph * alloc + i] = tab[i] * scale / norm;
            if (phase_count % 2) break;
            if (tap_count % 2 == 0 || tap_count == 1) {
                for (i = 0; i < tap_count; i++)
                    ((double*)filter)[(phase_count-ph) * alloc + tap_count-1-i] = ((double*)filter)[ph * alloc + i];
            }
            else {
                for (i = 1; i <= tap_count; i++)
                    ((double*)filter)[(phase_count-ph) * alloc + tap_count-i] = tab[i] * scale / (norm - tab[0] + tab[tap_count]);
            }
            break;
        }
    }
#if 0
    {
#define LEN 1024
        int j,k;
        double sine[LEN + tap_count];
        double filtered[LEN];
        double maxff=-2, minff=2, maxsf=-2, minsf=2;
        for(i=0; i<LEN; i++){
            double ss=0, sf=0, ff=0;
            for(j=0; j<LEN+tap_count; j++)
                sine[j]= cos(i*j*M_PI/LEN);
            for(j=0; j<LEN; j++){
                double sum=0;
                ph=0;
                for(k=0; k<tap_count; k++)
                    sum += filter[ph * tap_count + k] * sine[k+j];
                filtered[j]= sum / (1<<FILTER_SHIFT);
                ss+= sine[j + center] * sine[j + center];
                ff+= filtered[j] * filtered[j];
                sf+= sine[j + center] * filtered[j];
            }
            ss= sqrt(2*ss/LEN);
            ff= sqrt(2*ff/LEN);
            sf= 2*sf/LEN;
            maxff= FFMAX(maxff, ff);
            minff= FFMIN(minff, ff);
            maxsf= FFMAX(maxsf, sf);
            minsf= FFMIN(minsf, sf);
            if(i%11==0){
                av_log(NULL, AV_LOG_ERROR, "i:%4d ss:%f ff:%13.6e-%13.6e sf:%13.6e-%13.6e\n", i, ss, maxff, minff, maxsf, minsf);
                minff=minsf= 2;
                maxff=maxsf= -2;
            }
        }
    }
#endif

    ret = 0;
fail:
    av_free(tab);
    av_free(sin_lut);
    return ret;
}
Example #5
0
static int film_read_header(AVFormatContext *s,
                            AVFormatParameters *ap)
{
    FilmDemuxContext *film = s->priv_data;
    AVIOContext *pb = s->pb;
    AVStream *st;
    unsigned char scratch[256];
    int i;
    unsigned int data_offset;
    unsigned int audio_frame_counter;

    film->sample_table = NULL;
    film->stereo_buffer = NULL;
    film->stereo_buffer_size = 0;

    /* load the main FILM header */
    if (avio_read(pb, scratch, 16) != 16)
        return AVERROR(EIO);
    data_offset = AV_RB32(&scratch[4]);
    film->version = AV_RB32(&scratch[8]);

    /* load the FDSC chunk */
    if (film->version == 0) {
        /* special case for Lemmings .film files; 20-byte header */
        if (avio_read(pb, scratch, 20) != 20)
            return AVERROR(EIO);
        /* make some assumptions about the audio parameters */
        film->audio_type = CODEC_ID_PCM_S8;
        film->audio_samplerate = 22050;
        film->audio_channels = 1;
        film->audio_bits = 8;
    } else {
        /* normal Saturn .cpk files; 32-byte header */
        if (avio_read(pb, scratch, 32) != 32)
            return AVERROR(EIO);
        film->audio_samplerate = AV_RB16(&scratch[24]);
        film->audio_channels = scratch[21];
        film->audio_bits = scratch[22];
        if (film->audio_bits == 8)
            film->audio_type = CODEC_ID_PCM_S8;
        else if (film->audio_bits == 16)
            film->audio_type = CODEC_ID_PCM_S16BE;
        else
            film->audio_type = CODEC_ID_NONE;
    }

    if (AV_RB32(&scratch[0]) != FDSC_TAG)
        return AVERROR_INVALIDDATA;

    if (AV_RB32(&scratch[8]) == CVID_TAG) {
        film->video_type = CODEC_ID_CINEPAK;
    } else
        film->video_type = CODEC_ID_NONE;

    /* initialize the decoder streams */
    if (film->video_type) {
        st = av_new_stream(s, 0);
        if (!st)
            return AVERROR(ENOMEM);
        film->video_stream_index = st->index;
        st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
        st->codec->codec_id = film->video_type;
        st->codec->codec_tag = 0;  /* no fourcc */
        st->codec->width = AV_RB32(&scratch[16]);
        st->codec->height = AV_RB32(&scratch[12]);
    }

    if (film->audio_type) {
        st = av_new_stream(s, 0);
        if (!st)
            return AVERROR(ENOMEM);
        film->audio_stream_index = st->index;
        st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
        st->codec->codec_id = film->audio_type;
        st->codec->codec_tag = 1;
        st->codec->channels = film->audio_channels;
        st->codec->bits_per_coded_sample = film->audio_bits;
        st->codec->sample_rate = film->audio_samplerate;
        st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
            st->codec->bits_per_coded_sample;
        st->codec->block_align = st->codec->channels *
            st->codec->bits_per_coded_sample / 8;
    }

    /* load the sample table */
    if (avio_read(pb, scratch, 16) != 16)
        return AVERROR(EIO);
    if (AV_RB32(&scratch[0]) != STAB_TAG)
        return AVERROR_INVALIDDATA;
    film->base_clock = AV_RB32(&scratch[8]);
    film->sample_count = AV_RB32(&scratch[12]);
    if(film->sample_count >= UINT_MAX / sizeof(film_sample))
        return -1;
    film->sample_table = av_malloc(film->sample_count * sizeof(film_sample));

    for(i=0; i<s->nb_streams; i++)
        av_set_pts_info(s->streams[i], 33, 1, film->base_clock);

    audio_frame_counter = 0;
    for (i = 0; i < film->sample_count; i++) {
        /* load the next sample record and transfer it to an internal struct */
        if (avio_read(pb, scratch, 16) != 16) {
            av_free(film->sample_table);
            return AVERROR(EIO);
        }
        film->sample_table[i].sample_offset =
            data_offset + AV_RB32(&scratch[0]);
        film->sample_table[i].sample_size = AV_RB32(&scratch[4]);
        if (AV_RB32(&scratch[8]) == 0xFFFFFFFF) {
            film->sample_table[i].stream = film->audio_stream_index;
            film->sample_table[i].pts = audio_frame_counter;
            film->sample_table[i].pts *= film->base_clock;
            film->sample_table[i].pts /= film->audio_samplerate;

            audio_frame_counter += (film->sample_table[i].sample_size /
                (film->audio_channels * film->audio_bits / 8));
        } else {
            film->sample_table[i].stream = film->video_stream_index;
            film->sample_table[i].pts = AV_RB32(&scratch[8]) & 0x7FFFFFFF;
            film->sample_table[i].keyframe = (scratch[8] & 0x80) ? 0 : 1;
        }
    }

    film->current_sample = 0;

    return 0;
}
Example #6
0
static void tm2_free_codes(TM2Codes *code)
{
    av_free(code->recode);
    if(code->vlc.table)
        free_vlc(&code->vlc);
}
Example #7
0
int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
    const uint64_t fuzz_tag = FUZZ_TAG;
    FuzzDataBuffer buffer;
    const uint8_t *last = data;
    const uint8_t *end = data + size;
    uint32_t it = 0;
    int (*decode_handler)(AVCodecContext *avctx, AVFrame *picture,
                          int *got_picture_ptr,
                          const AVPacket *avpkt) = NULL;

    if (!c) {
#ifdef FFMPEG_DECODER
#define DECODER_SYMBOL0(CODEC) ff_##CODEC##_decoder
#define DECODER_SYMBOL(CODEC) DECODER_SYMBOL0(CODEC)
        extern AVCodec DECODER_SYMBOL(FFMPEG_DECODER);
        avcodec_register(&DECODER_SYMBOL(FFMPEG_DECODER));

        c = &DECODER_SYMBOL(FFMPEG_DECODER);
#else
        avcodec_register_all();
        c = AVCodecInitialize(FFMPEG_CODEC);  // Done once.
#endif
        av_log_set_level(AV_LOG_PANIC);
    }

    // Unsupported
    if (c->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU)
        return 0;

    switch (c->type) {
    case AVMEDIA_TYPE_AUDIO   : decode_handler = avcodec_decode_audio4; break;
    case AVMEDIA_TYPE_VIDEO   : decode_handler = avcodec_decode_video2; break;
    case AVMEDIA_TYPE_SUBTITLE: decode_handler = subtitle_handler     ; break;
    }

    AVCodecContext* ctx = avcodec_alloc_context3(NULL);
    if (!ctx)
        error("Failed memory allocation");

    ctx->max_pixels = 4096 * 4096; //To reduce false positive OOM and hangs

    if (size > 1024) {
        GetByteContext gbc;
        bytestream2_init(&gbc, data + size - 1024, 1024);
        ctx->width                              = bytestream2_get_le32(&gbc);
        ctx->height                             = bytestream2_get_le32(&gbc);
        ctx->bit_rate                           = bytestream2_get_le64(&gbc);
        ctx->bits_per_coded_sample              = bytestream2_get_le32(&gbc);
        if (av_image_check_size(ctx->width, ctx->height, 0, ctx))
            ctx->width = ctx->height = 0;
        size -= 1024;
    }

    int res = avcodec_open2(ctx, c, NULL);
    if (res < 0) {
        av_free(ctx);
        return 0; // Failure of avcodec_open2() does not imply that a issue was found
    }

    FDBCreate(&buffer);
    int got_frame;
    AVFrame *frame = av_frame_alloc();
    if (!frame)
        error("Failed memory allocation");

    // Read very simple container
    AVPacket avpkt;
    while (data < end && it < maxiteration) {
        // Search for the TAG
        while (data + sizeof(fuzz_tag) < end) {
            if (data[0] == (fuzz_tag & 0xFF) && AV_RN64(data) == fuzz_tag)
                break;
            data++;
        }
        if (data + sizeof(fuzz_tag) > end)
            data = end;

        FDBPrepare(&buffer, &avpkt, last, data - last);
        data += sizeof(fuzz_tag);
        last = data;

        // Iterate through all data
        while (avpkt.size > 0 && it++ < maxiteration) {
            av_frame_unref(frame);
            int ret = decode_handler(ctx, frame, &got_frame, &avpkt);

            if (it > 20)
                ctx->error_concealment = 0;

            if (ret <= 0 || ret > avpkt.size)
               break;
            if (ctx->codec_type != AVMEDIA_TYPE_AUDIO)
                ret = avpkt.size;
            avpkt.data += ret;
            avpkt.size -= ret;
        }
    }

    av_init_packet(&avpkt);
    avpkt.data = NULL;
    avpkt.size = 0;

    do {
        got_frame = 0;
        decode_handler(ctx, frame, &got_frame, &avpkt);
    } while (got_frame == 1 && it++ < maxiteration);

    av_frame_free(&frame);
    avcodec_free_context(&ctx);
    av_freep(&ctx);
    FDBDesroy(&buffer);
    return 0;
}
Example #8
0
static int vfw_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
    struct vfw_ctx *ctx = s->priv_data;
    AVCodecContext *codec;
    AVStream *st;
    int devnum;
    int bisize;
    BITMAPINFO *bi;
    CAPTUREPARMS cparms;
    DWORD biCompression;
    WORD biBitCount;
    int ret;
    AVRational fps;

    if (!strcmp(s->filename, "list")) {
        for (devnum = 0; devnum <= 9; devnum++) {
            char driver_name[256];
            char driver_ver[256];
            ret = capGetDriverDescription(devnum,
                                          driver_name, sizeof(driver_name),
                                          driver_ver, sizeof(driver_ver));
            if (ret) {
                av_log(s, AV_LOG_INFO, "Driver %d\n", devnum);
                av_log(s, AV_LOG_INFO, " %s\n", driver_name);
                av_log(s, AV_LOG_INFO, " %s\n", driver_ver);
            }
        }
        return AVERROR(EIO);
    }

#if FF_API_FORMAT_PARAMETERS
    if (ap->time_base.num)
        fps = (AVRational){ap->time_base.den, ap->time_base.num};
#endif

    ctx->hwnd = capCreateCaptureWindow(NULL, 0, 0, 0, 0, 0, HWND_MESSAGE, 0);
    if(!ctx->hwnd) {
        av_log(s, AV_LOG_ERROR, "Could not create capture window.\n");
        return AVERROR(EIO);
    }

    /* If atoi fails, devnum==0 and the default device is used */
    devnum = atoi(s->filename);

    ret = SendMessage(ctx->hwnd, WM_CAP_DRIVER_CONNECT, devnum, 0);
    if(!ret) {
        av_log(s, AV_LOG_ERROR, "Could not connect to device.\n");
        DestroyWindow(ctx->hwnd);
        return AVERROR(ENODEV);
    }

    SendMessage(ctx->hwnd, WM_CAP_SET_OVERLAY, 0, 0);
    SendMessage(ctx->hwnd, WM_CAP_SET_PREVIEW, 0, 0);

    ret = SendMessage(ctx->hwnd, WM_CAP_SET_CALLBACK_VIDEOSTREAM, 0,
                      (LPARAM) videostream_cb);
    if(!ret) {
        av_log(s, AV_LOG_ERROR, "Could not set video stream callback.\n");
        goto fail_io;
    }

    SetWindowLongPtr(ctx->hwnd, GWLP_USERDATA, (LONG_PTR) s);

    st = av_new_stream(s, 0);
    if(!st) {
        vfw_read_close(s);
        return AVERROR(ENOMEM);
    }

    /* Set video format */
    bisize = SendMessage(ctx->hwnd, WM_CAP_GET_VIDEOFORMAT, 0, 0);
    if(!bisize)
        goto fail_io;
    bi = av_malloc(bisize);
    if(!bi) {
        vfw_read_close(s);
        return AVERROR(ENOMEM);
    }
    ret = SendMessage(ctx->hwnd, WM_CAP_GET_VIDEOFORMAT, bisize, (LPARAM) bi);
    if(!ret)
        goto fail_bi;

    dump_bih(s, &bi->bmiHeader);


    if (ctx->video_size) {
        ret = av_parse_video_size(&bi->bmiHeader.biWidth, &bi->bmiHeader.biHeight, ctx->video_size);
        if (ret < 0) {
            av_log(s, AV_LOG_ERROR, "Couldn't parse video size.\n");
            goto fail_bi;
        }
    }
#if FF_API_FORMAT_PARAMETERS
    if (ap->width > 0)
        bi->bmiHeader.biWidth = ap->width;
    if (ap->height > 0)
        bi->bmiHeader.biHeight = ap->height;
#endif

    if (0) {
        /* For testing yet unsupported compressions
         * Copy these values from user-supplied verbose information */
        bi->bmiHeader.biWidth       = 320;
        bi->bmiHeader.biHeight      = 240;
        bi->bmiHeader.biPlanes      = 1;
        bi->bmiHeader.biBitCount    = 12;
        bi->bmiHeader.biCompression = MKTAG('I','4','2','0');
        bi->bmiHeader.biSizeImage   = 115200;
        dump_bih(s, &bi->bmiHeader);
    }

    ret = SendMessage(ctx->hwnd, WM_CAP_SET_VIDEOFORMAT, bisize, (LPARAM) bi);
    if(!ret) {
        av_log(s, AV_LOG_ERROR, "Could not set Video Format.\n");
        goto fail_bi;
    }

    biCompression = bi->bmiHeader.biCompression;
    biBitCount = bi->bmiHeader.biBitCount;

    av_free(bi);

    /* Set sequence setup */
    ret = SendMessage(ctx->hwnd, WM_CAP_GET_SEQUENCE_SETUP, sizeof(cparms),
                      (LPARAM) &cparms);
    if(!ret)
        goto fail_io;

    dump_captureparms(s, &cparms);

    cparms.fYield = 1; // Spawn a background thread
    cparms.dwRequestMicroSecPerFrame =
                               (fps.den*1000000) / fps.num;
    cparms.fAbortLeftMouse = 0;
    cparms.fAbortRightMouse = 0;
    cparms.fCaptureAudio = 0;
    cparms.vKeyAbort = 0;

    ret = SendMessage(ctx->hwnd, WM_CAP_SET_SEQUENCE_SETUP, sizeof(cparms),
                      (LPARAM) &cparms);
    if(!ret)
        goto fail_io;

    codec = st->codec;
    codec->time_base = (AVRational){fps.den, fps.num};
    codec->codec_type = AVMEDIA_TYPE_VIDEO;
    codec->width  = bi->bmiHeader.biWidth;
    codec->height = bi->bmiHeader.biHeight;
    codec->pix_fmt = vfw_pixfmt(biCompression, biBitCount);
    if(codec->pix_fmt == PIX_FMT_NONE) {
        codec->codec_id = vfw_codecid(biCompression);
        if(codec->codec_id == CODEC_ID_NONE) {
            av_log(s, AV_LOG_ERROR, "Unknown compression type. "
                             "Please report verbose (-v 9) debug information.\n");
            vfw_read_close(s);
            return AVERROR_PATCHWELCOME;
        }
        codec->bits_per_coded_sample = biBitCount;
    } else {
        codec->codec_id = CODEC_ID_RAWVIDEO;
        if(biCompression == BI_RGB) {
            codec->bits_per_coded_sample = biBitCount;
            codec->extradata = av_malloc(9 + FF_INPUT_BUFFER_PADDING_SIZE);
            if (codec->extradata) {
                codec->extradata_size = 9;
                memcpy(codec->extradata, "BottomUp", 9);
            }
        }
    }

    av_set_pts_info(st, 32, 1, 1000);

    ctx->mutex = CreateMutex(NULL, 0, NULL);
    if(!ctx->mutex) {
        av_log(s, AV_LOG_ERROR, "Could not create Mutex.\n" );
        goto fail_io;
    }
    ctx->event = CreateEvent(NULL, 1, 0, NULL);
    if(!ctx->event) {
        av_log(s, AV_LOG_ERROR, "Could not create Event.\n" );
        goto fail_io;
    }

    ret = SendMessage(ctx->hwnd, WM_CAP_SEQUENCE_NOFILE, 0, 0);
    if(!ret) {
        av_log(s, AV_LOG_ERROR, "Could not start capture sequence.\n" );
        goto fail_io;
    }

    return 0;

fail_bi:
    av_free(bi);

fail_io:
    vfw_read_close(s);
    return AVERROR(EIO);
}
static void FFMpegPlayerAndroid_play(JNIEnv *env, jobject obj) {
	AVPacket				packet;
	int						result = -1;
	int 					samples_size = AVCODEC_MAX_AUDIO_FRAME_SIZE;
	int16_t*				samples;
	AVFrame*				pFrameRGB;
	
	// Allocate an AVFrame structure
	if(ffmpeg_video.initzialized) {
		pFrameRGB = FFMpegPlayerAndroid_createFrame(env);
		if (pFrameRGB == NULL) {
			jniThrowException(env,
							  "java/io/IOException",
							  "Couldn't crate frame buffer");
			return;
		}
	}
	
	if(ffmpeg_audio.initzialized) {
		samples = (int16_t *) av_malloc(samples_size);
		if(AudioDriver_register() != ANDROID_AUDIOTRACK_RESULT_SUCCESS) {
			jniThrowException(env,
							  "java/io/IOException",
							  "Couldn't register audio track");
			return;
		}
	}
	
	status = STATE_PLAYING;
	while (status != STATE_STOPING) {

		if(status == STATE_PAUSE) {
			usleep(50);
			continue;
		}

		if((result = av_read_frame(ffmpeg_fields.pFormatCtx, &packet)) < 0) {
			status = STATE_STOPING;
			continue;
		}

		// Is this a packet from the video stream?
		if (packet.stream_index == ffmpeg_video.stream &&
				ffmpeg_video.initzialized) {
			if(FFMpegPlayerAndroid_processVideo(env, obj, &packet, pFrameRGB) < 0) {
				__android_log_print(ANDROID_LOG_ERROR, TAG, "Frame wasn't finished by video decoder");
			}
		} else if (packet.stream_index == ffmpeg_audio.stream &&
				ffmpeg_audio.initzialized && ffmpeg_audio.decode) {
			if(FFMpegPlayerAndroid_processAudio(env, &packet, samples, samples_size) < 0 ) {
				return; // exception occured so return to java
			}
		}

		// Free the packet that was allocated by av_read_frame
		av_free_packet(&packet);
	}
	
	if(ffmpeg_video.initzialized) {
		if(VideoDriver_unregister() != ANDROID_SURFACE_RESULT_SUCCESS) {
			jniThrowException(env,
							  "java/io/IOException",
							  "Couldn't unregister vide surface");
		}
	}
	if(ffmpeg_audio.initzialized) {
		if(AudioDriver_unregister() != ANDROID_AUDIOTRACK_RESULT_SUCCESS) {
			jniThrowException(env,
							  "java/io/IOException",
							  "Couldn't unregister audio track");
		}
	}
	
	av_free( samples );
	
	// Free the RGB image
	av_free(pFrameRGB);

	status = STATE_STOPED;
	__android_log_print(ANDROID_LOG_INFO, TAG, "end of playing");
}
Example #10
0
static int tm2_build_huff_table(TM2Context *ctx, TM2Codes *code)
{
    TM2Huff huff;
    int res = 0;

    huff.val_bits = get_bits(&ctx->gb, 5);
    huff.max_bits = get_bits(&ctx->gb, 5);
    huff.min_bits = get_bits(&ctx->gb, 5);
    huff.nodes = get_bits_long(&ctx->gb, 17);
    huff.num = 0;

    /* check for correct codes parameters */
    if((huff.val_bits < 1) || (huff.val_bits > 32) ||
       (huff.max_bits < 0) || (huff.max_bits > 32)) {
        av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect tree parameters - literal length: %i, max code length: %i\n",
               huff.val_bits, huff.max_bits);
        return -1;
    }
    if((huff.nodes < 0) || (huff.nodes > 0x10000)) {
        av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect number of Huffman tree nodes: %i\n", huff.nodes);
        return -1;
    }
    /* one-node tree */
    if(huff.max_bits == 0)
        huff.max_bits = 1;

    /* allocate space for codes - it is exactly ceil(nodes / 2) entries */
    huff.max_num = (huff.nodes + 1) >> 1;
    huff.nums = av_mallocz(huff.max_num * sizeof(int));
    huff.bits = av_mallocz(huff.max_num * sizeof(uint32_t));
    huff.lens = av_mallocz(huff.max_num * sizeof(int));

    if(tm2_read_tree(ctx, 0, 0, &huff) == -1)
        res = -1;

    if(huff.num != huff.max_num) {
        av_log(ctx->avctx, AV_LOG_ERROR, "Got less codes than expected: %i of %i\n",
               huff.num, huff.max_num);
        res = -1;
    }

    /* convert codes to vlc_table */
    if(res != -1) {
        int i;

        res = init_vlc(&code->vlc, huff.max_bits, huff.max_num,
                    huff.lens, sizeof(int), sizeof(int),
                    huff.bits, sizeof(uint32_t), sizeof(uint32_t), 0);
        if(res < 0) {
            av_log(ctx->avctx, AV_LOG_ERROR, "Cannot build VLC table\n");
            res = -1;
        } else
            res = 0;
        if(res != -1) {
            code->bits = huff.max_bits;
            code->length = huff.max_num;
            code->recode = av_malloc(code->length * sizeof(int));
            for(i = 0; i < code->length; i++)
                code->recode[i] = huff.nums[i];
        }
    }
    /* free allocated memory */
    av_free(huff.nums);
    av_free(huff.bits);
    av_free(huff.lens);

    return res;
}
Example #11
0
static int open_slave(AVFormatContext *avf, char *slave, TeeSlave *tee_slave)
{
    int i, ret;
    AVDictionary *options = NULL;
    AVDictionaryEntry *entry;
    char *filename;
    char *format = NULL, *select = NULL, *on_fail = NULL;
    AVFormatContext *avf2 = NULL;
    AVStream *st, *st2;
    int stream_count;
    int fullret;
    char *subselect = NULL, *next_subselect = NULL, *first_subselect = NULL, *tmp_select = NULL;

    if ((ret = parse_slave_options(avf, slave, &options, &filename)) < 0)
        return ret;

#define STEAL_OPTION(option, field) do {                                \
        if ((entry = av_dict_get(options, option, NULL, 0))) {          \
            field = entry->value;                                       \
            entry->value = NULL; /* prevent it from being freed */      \
            av_dict_set(&options, option, NULL, 0);                     \
        }                                                               \
    } while (0)

    STEAL_OPTION("f", format);
    STEAL_OPTION("select", select);
    STEAL_OPTION("onfail", on_fail);

    ret = parse_slave_failure_policy_option(on_fail, tee_slave);
    if (ret < 0) {
        av_log(avf, AV_LOG_ERROR,
               "Invalid onfail option value, valid options are 'abort' and 'ignore'\n");
        goto end;
    }

    ret = avformat_alloc_output_context2(&avf2, NULL, format, filename);
    if (ret < 0)
        goto end;
    tee_slave->avf = avf2;
    av_dict_copy(&avf2->metadata, avf->metadata, 0);
    avf2->opaque   = avf->opaque;
    avf2->io_open  = avf->io_open;
    avf2->io_close = avf->io_close;

    tee_slave->stream_map = av_calloc(avf->nb_streams, sizeof(*tee_slave->stream_map));
    if (!tee_slave->stream_map) {
        ret = AVERROR(ENOMEM);
        goto end;
    }

    stream_count = 0;
    for (i = 0; i < avf->nb_streams; i++) {
        st = avf->streams[i];
        if (select) {
            tmp_select = av_strdup(select);  // av_strtok is destructive so we regenerate it in each loop
            if (!tmp_select) {
                ret = AVERROR(ENOMEM);
                goto end;
            }
            fullret = 0;
            first_subselect = tmp_select;
            next_subselect = NULL;
            while (subselect = av_strtok(first_subselect, slave_select_sep, &next_subselect)) {
                first_subselect = NULL;

                ret = avformat_match_stream_specifier(avf, avf->streams[i], subselect);
                if (ret < 0) {
                    av_log(avf, AV_LOG_ERROR,
                           "Invalid stream specifier '%s' for output '%s'\n",
                           subselect, slave);
                    goto end;
                }
                if (ret != 0) {
                    fullret = 1; // match
                    break;
                }
            }
            av_freep(&tmp_select);

            if (fullret == 0) { /* no match */
                tee_slave->stream_map[i] = -1;
                continue;
            }
        }
        tee_slave->stream_map[i] = stream_count++;

        if (!(st2 = avformat_new_stream(avf2, NULL))) {
            ret = AVERROR(ENOMEM);
            goto end;
        }
        st2->id = st->id;
        st2->r_frame_rate        = st->r_frame_rate;
        st2->time_base           = st->time_base;
        st2->start_time          = st->start_time;
        st2->duration            = st->duration;
        st2->nb_frames           = st->nb_frames;
        st2->disposition         = st->disposition;
        st2->sample_aspect_ratio = st->sample_aspect_ratio;
        st2->avg_frame_rate      = st->avg_frame_rate;
        av_dict_copy(&st2->metadata, st->metadata, 0);
        if ((ret = avcodec_parameters_copy(st2->codecpar, st->codecpar)) < 0)
            goto end;
    }

    if (!(avf2->oformat->flags & AVFMT_NOFILE)) {
        if ((ret = avf2->io_open(avf2, &avf2->pb, filename, AVIO_FLAG_WRITE, NULL)) < 0) {
            av_log(avf, AV_LOG_ERROR, "Slave '%s': error opening: %s\n",
                   slave, av_err2str(ret));
            goto end;
        }
    }

    if ((ret = avformat_write_header(avf2, &options)) < 0) {
        av_log(avf, AV_LOG_ERROR, "Slave '%s': error writing header: %s\n",
               slave, av_err2str(ret));
        goto end;
    }
    tee_slave->header_written = 1;

    tee_slave->bsfs = av_calloc(avf2->nb_streams, sizeof(*tee_slave->bsfs));
    if (!tee_slave->bsfs) {
        ret = AVERROR(ENOMEM);
        goto end;
    }

    entry = NULL;
    while (entry = av_dict_get(options, "bsfs", NULL, AV_DICT_IGNORE_SUFFIX)) {
        const char *spec = entry->key + strlen("bsfs");
        if (*spec) {
            if (strspn(spec, slave_bsfs_spec_sep) != 1) {
                av_log(avf, AV_LOG_ERROR,
                       "Specifier separator in '%s' is '%c', but only characters '%s' "
                       "are allowed\n", entry->key, *spec, slave_bsfs_spec_sep);
                ret = AVERROR(EINVAL);
                goto end;
            }
            spec++; /* consume separator */
        }

        for (i = 0; i < avf2->nb_streams; i++) {
            ret = avformat_match_stream_specifier(avf2, avf2->streams[i], spec);
            if (ret < 0) {
                av_log(avf, AV_LOG_ERROR,
                       "Invalid stream specifier '%s' in bsfs option '%s' for slave "
                       "output '%s'\n", spec, entry->key, filename);
                goto end;
            }

            if (ret > 0) {
                av_log(avf, AV_LOG_DEBUG, "spec:%s bsfs:%s matches stream %d of slave "
                       "output '%s'\n", spec, entry->value, i, filename);
                if (tee_slave->bsfs[i]) {
                    av_log(avf, AV_LOG_WARNING,
                           "Duplicate bsfs specification associated to stream %d of slave "
                           "output '%s', filters will be ignored\n", i, filename);
                    continue;
                }
                ret = parse_bsfs(avf, entry->value, &tee_slave->bsfs[i]);
                if (ret < 0) {
                    av_log(avf, AV_LOG_ERROR,
                           "Error parsing bitstream filter sequence '%s' associated to "
                           "stream %d of slave output '%s'\n", entry->value, i, filename);
                    goto end;
                }
            }
        }

        av_dict_set(&options, entry->key, NULL, 0);
    }

    if (options) {
        entry = NULL;
        while ((entry = av_dict_get(options, "", entry, AV_DICT_IGNORE_SUFFIX)))
            av_log(avf2, AV_LOG_ERROR, "Unknown option '%s'\n", entry->key);
        ret = AVERROR_OPTION_NOT_FOUND;
        goto end;
    }

end:
    av_free(format);
    av_free(select);
    av_free(on_fail);
    av_dict_free(&options);
    av_freep(&tmp_select);
    return ret;
}
Example #12
0
void
ff_rdt_parse_close(RDTDemuxContext *s)
{
    av_free(s);
}
Example #13
0
int h264_decoder_init(H264Decoder * decoder, H264StreamInfo * info)
{
    assert(decoder != NULL);
    assert(info != NULL);
    AVCodec *codec = NULL;
    codec = avcodec_find_decoder(AV_CODEC_ID_H264);
    if (!codec) 
    {
        goto last_err; 
    }
    decoder->codec_context = avcodec_alloc_context3(codec);
    if(!decoder->codec_context)
    {
        goto last_err;
    }
    if (codec->capabilities&CODEC_CAP_TRUNCATED)
        decoder->codec_context->flags |= CODEC_FLAG_TRUNCATED;

    if (avcodec_open2(decoder->codec_context, codec, NULL) < 0) 
    {
        goto codec_open_err;
    }
    decoder->yuvframe = av_frame_alloc();
    if(!decoder->yuvframe)
    {
        goto do_err;
    
    }
    decoder->rgbframe = av_frame_alloc();
    if(!decoder->rgbframe)
    {
        goto do_err;
    }

    decoder->rgbframe->format = info->pix_fmt;

    return H264_OK;
do_err:
    if (decoder->yuvframe)
    {
        av_frame_free(&decoder->yuvframe);
    }
    if (decoder->rgbframe)
    {
        av_frame_free(&decoder->rgbframe);
    }

    if(!decoder->codec_context)
    {
        avcodec_close(decoder->codec_context);
    }
codec_open_err:
    
    if(!decoder->codec_context)
    {
        av_free(decoder->codec_context);
    }
last_err:
    decoder->yuvframe = NULL;
    decoder->rgbframe = NULL;
    decoder->sws_context = NULL;
    decoder->codec_context = NULL;
    
    return H264_ERROR;
}
Example #14
0
static void end_ffmpeg_impl(int is_autosplit)
{
	unsigned int i;
	
	PRINT("Closing ffmpeg...\n");

#if 0
	if (audio_stream) { /* SEE UPPER */
		write_audio_frames();
	}
#endif

#ifdef WITH_AUDASPACE
	if (is_autosplit == false) {
		if (audio_mixdown_device) {
			AUD_closeReadDevice(audio_mixdown_device);
			audio_mixdown_device = 0;
		}
	}
#endif

	if (video_stream && video_stream->codec) {
		PRINT("Flushing delayed frames...\n");
		flush_ffmpeg();
	}
	
	if (outfile) {
		av_write_trailer(outfile);
	}
	
	/* Close the video codec */

	if (video_stream && video_stream->codec) {
		avcodec_close(video_stream->codec);
		PRINT("zero video stream %p\n", video_stream);
		video_stream = 0;
	}

	
	/* Close the output file */
	if (outfile) {
		for (i = 0; i < outfile->nb_streams; i++) {
			if (&outfile->streams[i]) {
				av_freep(&outfile->streams[i]);
			}
		}
	}
	/* free the temp buffer */
	if (current_frame) {
		delete_picture(current_frame);
		current_frame = 0;
	}
	if (outfile && outfile->oformat) {
		if (!(outfile->oformat->flags & AVFMT_NOFILE)) {
			avio_close(outfile->pb);
		}
	}
	if (outfile) {
		av_free(outfile);
		outfile = 0;
	}
	if (audio_input_buffer) {
		av_free(audio_input_buffer);
		audio_input_buffer = 0;
	}
#ifndef FFMPEG_HAVE_ENCODE_AUDIO2
	if (audio_output_buffer) {
		av_free(audio_output_buffer);
		audio_output_buffer = 0;
	}
#endif

	if (audio_deinterleave_buffer) {
		av_free(audio_deinterleave_buffer);
		audio_deinterleave_buffer = 0;
	}

	if (img_convert_ctx) {
		sws_freeContext(img_convert_ctx);
		img_convert_ctx = 0;
	}
}
Example #15
0
static int film_read_packet(AVFormatContext *s,
                            AVPacket *pkt)
{
    FilmDemuxContext *film = s->priv_data;
    AVIOContext *pb = s->pb;
    film_sample *sample;
    int ret = 0;
    int i;
    int left, right;

    if (film->current_sample >= film->sample_count)
        return AVERROR(EIO);

    sample = &film->sample_table[film->current_sample];

    /* position the stream (will probably be there anyway) */
    avio_seek(pb, sample->sample_offset, SEEK_SET);

    /* do a special song and dance when loading FILM Cinepak chunks */
    if ((sample->stream == film->video_stream_index) &&
        (film->video_type == CODEC_ID_CINEPAK)) {
        pkt->pos= url_ftell(pb);
        if (av_new_packet(pkt, sample->sample_size))
            return AVERROR(ENOMEM);
        avio_read(pb, pkt->data, sample->sample_size);
    } else if ((sample->stream == film->audio_stream_index) &&
        (film->audio_channels == 2)) {
        /* stereo PCM needs to be interleaved */

        if (av_new_packet(pkt, sample->sample_size))
            return AVERROR(ENOMEM);

        /* make sure the interleave buffer is large enough */
        if (sample->sample_size > film->stereo_buffer_size) {
            av_free(film->stereo_buffer);
            film->stereo_buffer_size = sample->sample_size;
            film->stereo_buffer = av_malloc(film->stereo_buffer_size);
        }

        pkt->pos= url_ftell(pb);
        ret = avio_read(pb, film->stereo_buffer, sample->sample_size);
        if (ret != sample->sample_size)
            ret = AVERROR(EIO);

        left = 0;
        right = sample->sample_size / 2;
        for (i = 0; i < sample->sample_size; ) {
            if (film->audio_bits == 8) {
                pkt->data[i++] = film->stereo_buffer[left++];
                pkt->data[i++] = film->stereo_buffer[right++];
            } else {
                pkt->data[i++] = film->stereo_buffer[left++];
                pkt->data[i++] = film->stereo_buffer[left++];
                pkt->data[i++] = film->stereo_buffer[right++];
                pkt->data[i++] = film->stereo_buffer[right++];
            }
        }
    } else {
        ret= av_get_packet(pb, pkt, sample->sample_size);
        if (ret != sample->sample_size)
            ret = AVERROR(EIO);
    }

    pkt->stream_index = sample->stream;
    pkt->pts = sample->pts;

    film->current_sample++;

    return ret;
}
Example #16
0
int thumbnail_extract_video_frame(void *handle, int64_t time, int flag)
{
    int frameFinished = 0;  
    int tryNum = 0;
    int i = 0;
    int64_t ret;

    struct video_frame *frame = (struct video_frame *)handle;
    struct stream *stream = &frame->stream;
    AVFormatContext *pFormatCtx = stream->pFormatCtx;
    AVPacket        packet;
    AVCodecContext *pCodecCtx = pFormatCtx->streams[stream->videoStream]->codec;

    if (time >= 0) {
        if (av_seek_frame(pFormatCtx, stream->videoStream, time, AVSEEK_FLAG_BACKWARD) < 0) {
            log_error("[thumbnail_extract_video_frame]av_seek_frame failed!");
        }
        find_best_keyframe(pFormatCtx, stream->videoStream, 0, &frame->thumbNailTime, &frame->thumbNailOffset,&frame->maxframesize);
        log_debug("[thumbnail_extract_video_frame:%d]time=%lld time=%lld  offset=%lld!ret=%d\n", 
                __LINE__,time, frame->thumbNailTime, frame->thumbNailOffset, ret);
    }

    if (frame->thumbNailTime>=0 && frame->thumbNailTime != AV_NOPTS_VALUE) {
        log_debug("seek to thumbnail frame by timestamp(%lld)!curoffset=%llx\n", frame->thumbNailTime, avio_tell(pFormatCtx->pb));
        if (av_seek_frame(pFormatCtx, stream->videoStream, frame->thumbNailTime, AVSEEK_FLAG_BACKWARD) < 0) {
            avio_seek(pFormatCtx->pb, frame->thumbNailOffset, 0);
            log_error("[thumbnail_extract_video_frame]av_seek_frame failed!seek to thumbNailOffset %x\n",frame->thumbNailOffset);
        }
        log_debug("after seek by time, offset=%llx!\n", avio_tell(pFormatCtx->pb));
    } else {
        log_debug("seek to thumbnail frame by offset(%lld)!\n", frame->thumbNailOffset);
        ret = avio_seek(pFormatCtx->pb, frame->thumbNailOffset, SEEK_SET);
        log_debug("after seek by offset, offset=%llx!ret=%llx\n", avio_tell(pFormatCtx->pb), ret);
    }

    avcodec_flush_buffers(stream->pCodecCtx);
	i=0;
    while (av_read_next_video_frame(pFormatCtx, &packet,stream->videoStream) >= 0) {
	     AVFrame         *pFrame=NULL;
	     log_debug("[%s] av_read_frame frame size=%d,pts=%lld\n", __FUNCTION__, packet.size,packet.pts);
		 i++;
	     if(packet.size<MAX(frame->maxframesize/10,packet.size) && i < READ_FRAME_MIN){
		 	continue;/*skip small size packets,it maybe a black frame*/
	     }
            avcodec_decode_video2(stream->pCodecCtx, stream->pFrameYUV, &frameFinished, &packet);
	     pFrame=stream->pFrameYUV	;
            log_debug("[%s]decode video frame, finish=%d key=%d offset=%llx type=%dcodec_id=%x,quality=%d tryNum=%d\n", __FUNCTION__,
                      frameFinished, pFrame->key_frame, avio_tell(pFormatCtx->pb), pFrame->pict_type, pCodecCtx->codec_id,pFrame->quality,tryNum);
         if (frameFinished && 
		 ((pFrame->key_frame && pFrame->pict_type == AV_PICTURE_TYPE_I) ||
		 (pFrame->key_frame && pFrame->pict_type == AV_PICTURE_TYPE_SI) ||
		 (pFrame->key_frame && pFrame->pict_type == AV_PICTURE_TYPE_BI) ||
		 (tryNum>4 && pFrame->key_frame) || 
		 (tryNum>5 && pFrame->pict_type == AV_PICTURE_TYPE_I) ||
		 (tryNum>6 && pFrame->pict_type == AV_PICTURE_TYPE_SI) ||
		 (tryNum>7 && pFrame->pict_type == AV_PICTURE_TYPE_BI) ||
		 (tryNum>(TRY_DECODE_MAX-1) && i > READ_FRAME_MAX && pFrame->pict_type == AV_PICTURE_TYPE_P) || 
		 (tryNum>(TRY_DECODE_MAX-1) && i > READ_FRAME_MAX && pFrame->pict_type == AV_PICTURE_TYPE_B) || 
		 (tryNum>(TRY_DECODE_MAX-1) && i > READ_FRAME_MAX && pFrame->pict_type == AV_PICTURE_TYPE_S) || 
		 (0))) /*not find a I FRAME too long,try normal frame*/
          {                
                log_debug("[%s]pCodecCtx->codec_id=%x tryNum=%d\n", __FUNCTION__, pCodecCtx->codec_id, tryNum);

                struct SwsContext *img_convert_ctx;
                img_convert_ctx = sws_getContext(stream->pCodecCtx->width, stream->pCodecCtx->height,
                                                 stream->pCodecCtx->pix_fmt,
                                                 frame->width, frame->height, DEST_FMT, SWS_BICUBIC,
                                                 NULL, NULL, NULL);
                if (img_convert_ctx == NULL) {
                    log_print("can not initialize the coversion context!\n");
                    av_free_packet(&packet);
                    break;
                }

                sws_scale(img_convert_ctx, stream->pFrameYUV->data, stream->pFrameYUV->linesize, 0,
                          frame->height, stream->pFrameRGB->data, stream->pFrameRGB->linesize);
                av_free_packet(&packet);
                goto ret;
        }
        av_free_packet(&packet);
	 if(tryNum++>TRY_DECODE_MAX && i > READ_FRAME_MAX)
	 	break;
	 if(tryNum%10==0)
        amthreadpool_thread_usleep(100);
    }

    if (frame->data) {
        free(frame->data);
    }
    av_free(stream->pFrameRGB);
    av_free(stream->pFrameYUV);
    avcodec_close(stream->pCodecCtx);
    av_close_input_file(stream->pFormatCtx);
    memset(&frame->stream, 0, sizeof(struct stream));
    return -1;

ret:
    return 0;
}
Example #17
0
//-----------------------------------------------------------
//--- Custom "av_dump_format" which will return the video FPS
//--- av_dump_formatFPS(...)
//-----------------------------------------------------------
void av_dump_formatFPS(AVFormatContext *ic, int index, const char *url, int is_output, const int* frameRate)
{
    int i;
    uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL;
    if (ic->nb_streams && !printed)
        return;

    av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
           is_output ? "Output" : "Input",
           index,
           is_output ? ic->oformat->name : ic->iformat->name,
           is_output ? "to" : "from", url);
    dump_metadata(NULL, ic->metadata, "  ");

    if (!is_output) {
        av_log(NULL, AV_LOG_INFO, "  Duration: ");
        if (ic->duration != AV_NOPTS_VALUE) {
            int hours, mins, secs, us;
            int64_t duration = ic->duration + 5000;
            secs  = duration / AV_TIME_BASE;
            us    = duration % AV_TIME_BASE;
            mins  = secs / 60;
            secs %= 60;
            hours = mins / 60;
            mins %= 60;
            av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
                   (100 * us) / AV_TIME_BASE);
        } else {
            av_log(NULL, AV_LOG_INFO, "N/A");
        }
        if (ic->start_time != AV_NOPTS_VALUE) {
            int secs, us;
            av_log(NULL, AV_LOG_INFO, ", start: ");
            secs = ic->start_time / AV_TIME_BASE;
            us   = abs(ic->start_time % AV_TIME_BASE);
            av_log(NULL, AV_LOG_INFO, "%d.%06d",
                   secs, (int) av_rescale(us, 1000000, AV_TIME_BASE));
        }
        av_log(NULL, AV_LOG_INFO, ", bitrate: ");
        if (ic->bit_rate)
            av_log(NULL, AV_LOG_INFO, "%d kb/s", ic->bit_rate / 1000);
        else
            av_log(NULL, AV_LOG_INFO, "N/A");
        av_log(NULL, AV_LOG_INFO, "\n");
    }

    for (i = 0; i < ic->nb_chapters; i++) {
        AVChapter *ch = ic->chapters[i];
        av_log(NULL, AV_LOG_INFO, "    Chapter #%d.%d: ", index, i);
        av_log(NULL, AV_LOG_INFO,
               "start %f, ", ch->start * av_q2d(ch->time_base));
        av_log(NULL, AV_LOG_INFO,
               "end %f\n", ch->end * av_q2d(ch->time_base));

        dump_metadata(NULL, ch->metadata, "    ");
    }

    if (ic->nb_programs) {
        int j, k, total = 0;
        for (j = 0; j < ic->nb_programs; j++) {
            AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
                                                  "name", NULL, 0);
            av_log(NULL, AV_LOG_INFO, "  Program %d %s\n", ic->programs[j]->id,
                   name ? name->value : "");
            dump_metadata(NULL, ic->programs[j]->metadata, "    ");
            for (k = 0; k < ic->programs[j]->nb_stream_indexes; k++) {
                dump_stream_format(ic, ic->programs[j]->stream_index[k],
                                   index, is_output);
                printed[ic->programs[j]->stream_index[k]] = 1;
            }
            total += ic->programs[j]->nb_stream_indexes;
        }
        if (total < ic->nb_streams)
            av_log(NULL, AV_LOG_INFO, "  No Program\n");
    }

    for (i = 0; i < ic->nb_streams; i++)
        if (!printed[i])
            dump_stream_format(ic, i, index, is_output);

    av_free(printed);
}
Example #18
0
int thumbnail_decoder_open(void *handle, const char* filename)
{
    int i;
    int video_index = -1;
    struct video_frame *frame = (struct video_frame *)handle;
    struct stream *stream = &frame->stream;

    log_debug("thumbnail open file:%s\n", filename);
	update_loglevel_setting();
    if (av_open_input_file(&stream->pFormatCtx, filename, NULL, 0, NULL) != 0) {
        log_print("Coundn't open file %s !\n", filename);
        goto err;
    }

    //thumbnail extract frame, so need fast_switch
    stream->pFormatCtx->pb->local_playback = 1;

    if (av_find_stream_info(stream->pFormatCtx) < 0) {
        log_print("Coundn't find stream information !\n");
        goto err1;
    }

    //dump_format(stream->pFormatCtx, 0, filename, 0);
    for (i = 0; i < stream->pFormatCtx->nb_streams; i++) {
        if (stream->pFormatCtx->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO) {
            video_index = i;
            break;
        }
    }

    if (video_index == -1) {
        log_print("Didn't find a video stream!\n");
        goto err1;
    }

    find_thumbnail_frame(stream->pFormatCtx, video_index, &frame->thumbNailTime, &frame->thumbNailOffset,&frame->maxframesize);

    stream->videoStream = video_index;
    stream->pCodecCtx = stream->pFormatCtx->streams[video_index]->codec;
    if (stream->pCodecCtx == NULL) {
        log_print("pCodecCtx is NULL !\n");
    }

    frame->width = stream->pCodecCtx->width;
    frame->height = stream->pCodecCtx->height;

    stream->pCodec = avcodec_find_decoder(stream->pCodecCtx->codec_id);
    if (stream->pCodec == NULL) {
        log_print("Didn't find codec!\n");
        goto err1;
    }

    if (avcodec_open(stream->pCodecCtx, stream->pCodec) < 0) {
        log_print("Couldn't open codec!\n");
        goto err1;
    }

    frame->duration = stream->pFormatCtx->duration;

    stream->pFrameYUV = avcodec_alloc_frame();
    if (stream->pFrameYUV == NULL) {
        log_print("alloc YUV frame failed!\n");
        goto err2;
    }

    stream->pFrameRGB = avcodec_alloc_frame();
    if (stream->pFrameRGB == NULL) {
        log_print("alloc RGB frame failed!\n");
        goto err3;
    }

    frame->DataSize = avpicture_get_size(DEST_FMT, frame->width, frame->height);
    frame->data = (char *)malloc(frame->DataSize);
    if (frame->data == NULL) {
        log_print("alloc buffer failed!\n");
        goto err4;
    }

    avpicture_fill((AVPicture *)stream->pFrameRGB, frame->data, DEST_FMT, frame->width, frame->height);

    return 0;

err4:
    av_free(stream->pFrameRGB);
err3:
    av_free(stream->pFrameYUV);
err2:
    avcodec_close(stream->pCodecCtx);
err1:
    av_close_input_file(stream->pFormatCtx);
err:
    memset(&frame->stream, 0, sizeof(struct stream));

    return -1;
}
Example #19
0
/* Generate a digest reply, according to RFC 2617. */
static char *make_digest_auth(HTTPAuthState *state, const char *username,
                              const char *password, const char *uri,
                              const char *method)
{
    DigestParams *digest = &state->digest_params;
    int len;
    uint32_t cnonce_buf[2];
    char cnonce[17];
    char nc[9];
    int i;
    char A1hash[33], A2hash[33], response[33];
    struct AVMD5 *md5ctx;
    uint8_t hash[16];
    char *authstr;

    digest->nc++;
    snprintf(nc, sizeof(nc), "%08x", digest->nc);

    /* Generate a client nonce. */
    for (i = 0; i < 2; i++)
        cnonce_buf[i] = av_get_random_seed();
    ff_data_to_hex(cnonce, (const uint8_t*) cnonce_buf, sizeof(cnonce_buf), 1);
    cnonce[2*sizeof(cnonce_buf)] = 0;

    md5ctx = av_malloc(av_md5_size);
    if (!md5ctx)
        return NULL;

    av_md5_init(md5ctx);
    update_md5_strings(md5ctx, username, ":", state->realm, ":", password, NULL);
    av_md5_final(md5ctx, hash);
    ff_data_to_hex(A1hash, hash, 16, 1);
    A1hash[32] = 0;

    if (!strcmp(digest->algorithm, "") || !strcmp(digest->algorithm, "MD5")) {
    } else if (!strcmp(digest->algorithm, "MD5-sess")) {
        av_md5_init(md5ctx);
        update_md5_strings(md5ctx, A1hash, ":", digest->nonce, ":", cnonce, NULL);
        av_md5_final(md5ctx, hash);
        ff_data_to_hex(A1hash, hash, 16, 1);
        A1hash[32] = 0;
    } else {
        /* Unsupported algorithm */
        av_free(md5ctx);
        return NULL;
    }

    av_md5_init(md5ctx);
    update_md5_strings(md5ctx, method, ":", uri, NULL);
    av_md5_final(md5ctx, hash);
    ff_data_to_hex(A2hash, hash, 16, 1);
    A2hash[32] = 0;

    av_md5_init(md5ctx);
    update_md5_strings(md5ctx, A1hash, ":", digest->nonce, NULL);
    if (!strcmp(digest->qop, "auth") || !strcmp(digest->qop, "auth-int")) {
        update_md5_strings(md5ctx, ":", nc, ":", cnonce, ":", digest->qop, NULL);
    }
    update_md5_strings(md5ctx, ":", A2hash, NULL);
    av_md5_final(md5ctx, hash);
    ff_data_to_hex(response, hash, 16, 1);
    response[32] = 0;

    av_free(md5ctx);

    if (!strcmp(digest->qop, "") || !strcmp(digest->qop, "auth")) {
    } else if (!strcmp(digest->qop, "auth-int")) {
        /* qop=auth-int not supported */
        return NULL;
    } else {
        /* Unsupported qop value. */
        return NULL;
    }

    len = strlen(username) + strlen(state->realm) + strlen(digest->nonce) +
              strlen(uri) + strlen(response) + strlen(digest->algorithm) +
              strlen(digest->opaque) + strlen(digest->qop) + strlen(cnonce) +
              strlen(nc) + 150;

    authstr = av_malloc(len);
    if (!authstr)
        return NULL;
    snprintf(authstr, len, "Authorization: Digest ");

    /* TODO: Escape the quoted strings properly. */
    av_strlcatf(authstr, len, "username=\"%s\"",   username);
    av_strlcatf(authstr, len, ",realm=\"%s\"",     state->realm);
    av_strlcatf(authstr, len, ",nonce=\"%s\"",     digest->nonce);
    av_strlcatf(authstr, len, ",uri=\"%s\"",       uri);
    av_strlcatf(authstr, len, ",response=\"%s\"",  response);
    if (digest->algorithm[0])
        av_strlcatf(authstr, len, ",algorithm=%s",  digest->algorithm);
    if (digest->opaque[0])
        av_strlcatf(authstr, len, ",opaque=\"%s\"", digest->opaque);
    if (digest->qop[0]) {
        av_strlcatf(authstr, len, ",qop=\"%s\"",    digest->qop);
        av_strlcatf(authstr, len, ",cnonce=\"%s\"", cnonce);
        av_strlcatf(authstr, len, ",nc=%s",         nc);
    }

    av_strlcatf(authstr, len, "\r\n");

    return authstr;
}
Example #20
0
static int sap_write_header(AVFormatContext *s)
{
    struct SAPState *sap = s->priv_data;
    char host[1024], path[1024], url[1024], announce_addr[50] = "";
    char *option_list;
    int port = 9875, base_port = 5004, i, pos = 0, same_port = 0, ttl = 255;
    AVFormatContext **contexts = NULL;
    int ret = 0;
    struct sockaddr_storage localaddr;
    socklen_t addrlen = sizeof(localaddr);
    int udp_fd;
    AVDictionaryEntry* title = av_dict_get(s->metadata, "title", NULL, 0);

    if (!ff_network_init())
        return AVERROR(EIO);

    /* extract hostname and port */
    av_url_split(NULL, 0, NULL, 0, host, sizeof(host), &base_port,
                 path, sizeof(path), s->filename);
    if (base_port < 0)
        base_port = 5004;

    /* search for options */
    option_list = strrchr(path, '?');
    if (option_list) {
        char buf[50];
        if (av_find_info_tag(buf, sizeof(buf), "announce_port", option_list)) {
            port = strtol(buf, NULL, 10);
        }
        if (av_find_info_tag(buf, sizeof(buf), "same_port", option_list)) {
            same_port = strtol(buf, NULL, 10);
        }
        if (av_find_info_tag(buf, sizeof(buf), "ttl", option_list)) {
            ttl = strtol(buf, NULL, 10);
        }
        if (av_find_info_tag(buf, sizeof(buf), "announce_addr", option_list)) {
            av_strlcpy(announce_addr, buf, sizeof(announce_addr));
        }
    }

    if (!announce_addr[0]) {
        struct addrinfo hints = { 0 }, *ai = NULL;
        hints.ai_family = AF_UNSPEC;
        if (getaddrinfo(host, NULL, &hints, &ai)) {
            av_log(s, AV_LOG_ERROR, "Unable to resolve %s\n", host);
            ret = AVERROR(EIO);
            goto fail;
        }
        if (ai->ai_family == AF_INET) {
            /* Also known as sap.mcast.net */
            av_strlcpy(announce_addr, "224.2.127.254", sizeof(announce_addr));
#if HAVE_STRUCT_SOCKADDR_IN6
        } else if (ai->ai_family == AF_INET6) {
            /* With IPv6, you can use the same destination in many different
             * multicast subnets, to choose how far you want it routed.
             * This one is intended to be routed globally. */
            av_strlcpy(announce_addr, "ff0e::2:7ffe", sizeof(announce_addr));
#endif
        } else {
            freeaddrinfo(ai);
            av_log(s, AV_LOG_ERROR, "Host %s resolved to unsupported "
                                    "address family\n", host);
            ret = AVERROR(EIO);
            goto fail;
        }
        freeaddrinfo(ai);
    }

    contexts = av_mallocz(sizeof(AVFormatContext*) * s->nb_streams);
    if (!contexts) {
        ret = AVERROR(ENOMEM);
        goto fail;
    }

    s->start_time_realtime = av_gettime();
    for (i = 0; i < s->nb_streams; i++) {
        URLContext *fd;

        ff_url_join(url, sizeof(url), "rtp", NULL, host, base_port,
                    "?ttl=%d", ttl);
        if (!same_port)
            base_port += 2;
        ret = ffurl_open(&fd, url, AVIO_FLAG_WRITE, &s->interrupt_callback, NULL);
        if (ret) {
            ret = AVERROR(EIO);
            goto fail;
        }
        ret = ff_rtp_chain_mux_open(&contexts[i], s, s->streams[i], fd, 0, i);
        if (ret < 0)
            goto fail;
        s->streams[i]->priv_data = contexts[i];
        av_strlcpy(contexts[i]->filename, url, sizeof(contexts[i]->filename));
    }

    if (s->nb_streams > 0 && title) {
        av_dict_set(&contexts[0]->metadata, "title", title->value, 0);
    }

    ff_url_join(url, sizeof(url), "udp", NULL, announce_addr, port,
                "?ttl=%d&connect=1", ttl);
    ret = ffurl_open(&sap->ann_fd, url, AVIO_FLAG_WRITE,
                     &s->interrupt_callback, NULL);
    if (ret) {
        ret = AVERROR(EIO);
        goto fail;
    }

    udp_fd = ffurl_get_file_handle(sap->ann_fd);
    if (getsockname(udp_fd, (struct sockaddr*) &localaddr, &addrlen)) {
        ret = AVERROR(EIO);
        goto fail;
    }
    if (localaddr.ss_family != AF_INET
#if HAVE_STRUCT_SOCKADDR_IN6
        && localaddr.ss_family != AF_INET6
#endif
        ) {
        av_log(s, AV_LOG_ERROR, "Unsupported protocol family\n");
        ret = AVERROR(EIO);
        goto fail;
    }
    sap->ann_size = 8192;
    sap->ann = av_mallocz(sap->ann_size);
    if (!sap->ann) {
        ret = AVERROR(EIO);
        goto fail;
    }
    sap->ann[pos] = (1 << 5);
#if HAVE_STRUCT_SOCKADDR_IN6
    if (localaddr.ss_family == AF_INET6)
        sap->ann[pos] |= 0x10;
#endif
    pos++;
    sap->ann[pos++] = 0; /* Authentication length */
    AV_WB16(&sap->ann[pos], av_get_random_seed());
    pos += 2;
    if (localaddr.ss_family == AF_INET) {
        memcpy(&sap->ann[pos], &((struct sockaddr_in*)&localaddr)->sin_addr,
               sizeof(struct in_addr));
        pos += sizeof(struct in_addr);
#if HAVE_STRUCT_SOCKADDR_IN6
    } else {
        memcpy(&sap->ann[pos], &((struct sockaddr_in6*)&localaddr)->sin6_addr,
               sizeof(struct in6_addr));
        pos += sizeof(struct in6_addr);
#endif
    }

    av_strlcpy(&sap->ann[pos], "application/sdp", sap->ann_size - pos);
    pos += strlen(&sap->ann[pos]) + 1;

    if (av_sdp_create(contexts, s->nb_streams, &sap->ann[pos],
                      sap->ann_size - pos)) {
        ret = AVERROR_INVALIDDATA;
        goto fail;
    }
    av_freep(&contexts);
    av_log(s, AV_LOG_VERBOSE, "SDP:\n%s\n", &sap->ann[pos]);
    pos += strlen(&sap->ann[pos]);
    sap->ann_size = pos;

    if (sap->ann_size > sap->ann_fd->max_packet_size) {
        av_log(s, AV_LOG_ERROR, "Announcement too large to send in one "
                                "packet\n");
        goto fail;
    }

    return 0;

fail:
    av_free(contexts);
    sap_write_close(s);
    return ret;
}
Example #21
0
static void FDBDesroy(FuzzDataBuffer *FDB) { av_free(FDB->data_); }
Example #22
0
/**
 * Cycle through available pins using the device_filter device, of type
 * devtype, retrieve the first output pin and return the pointer to the
 * object found in *ppin.
 * If ppin is NULL, cycle through all pins listing audio/video capabilities.
 */
static int
dshow_cycle_pins(AVFormatContext *avctx, enum dshowDeviceType devtype,
                 enum dshowSourceFilterType sourcetype, IBaseFilter *device_filter, IPin **ppin)
{
    struct dshow_ctx *ctx = avctx->priv_data;
    IEnumPins *pins = 0;
    IPin *device_pin = NULL;
    IPin *pin;
    int r;

    const GUID *mediatype[2] = { &MEDIATYPE_Video, &MEDIATYPE_Audio };
    const char *devtypename = (devtype == VideoDevice) ? "video" : "audio only";
    const char *sourcetypename = (sourcetype == VideoSourceDevice) ? "video" : "audio";

    int set_format = (devtype == VideoDevice && (ctx->framerate ||
                                                (ctx->requested_width && ctx->requested_height) ||
                                                 ctx->pixel_format != AV_PIX_FMT_NONE ||
                                                 ctx->video_codec_id != AV_CODEC_ID_RAWVIDEO))
                  || (devtype == AudioDevice && (ctx->channels || ctx->sample_rate));
    int format_set = 0;
    int should_show_properties = (devtype == VideoDevice) ? ctx->show_video_device_dialog : ctx->show_audio_device_dialog;

    if (should_show_properties)
        dshow_show_filter_properties(device_filter, avctx);

    r = IBaseFilter_EnumPins(device_filter, &pins);
    if (r != S_OK) {
        av_log(avctx, AV_LOG_ERROR, "Could not enumerate pins.\n");
        return AVERROR(EIO);
    }

    if (!ppin) {
        av_log(avctx, AV_LOG_INFO, "DirectShow %s device options (from %s devices)\n",
               devtypename, sourcetypename);
    }

    while (!device_pin && IEnumPins_Next(pins, 1, &pin, NULL) == S_OK) {
        IKsPropertySet *p = NULL;
        IEnumMediaTypes *types = NULL;
        PIN_INFO info = {0};
        AM_MEDIA_TYPE *type;
        GUID category;
        DWORD r2;
        char *name_buf = NULL;
        wchar_t *pin_id = NULL;
        char *pin_buf = NULL;
        char *desired_pin_name = devtype == VideoDevice ? ctx->video_pin_name : ctx->audio_pin_name;

        IPin_QueryPinInfo(pin, &info);
        IBaseFilter_Release(info.pFilter);

        if (info.dir != PINDIR_OUTPUT)
            goto next;
        if (IPin_QueryInterface(pin, &IID_IKsPropertySet, (void **) &p) != S_OK)
            goto next;
        if (IKsPropertySet_Get(p, &AMPROPSETID_Pin, AMPROPERTY_PIN_CATEGORY,
                               NULL, 0, &category, sizeof(GUID), &r2) != S_OK)
            goto next;
        if (!IsEqualGUID(&category, &PIN_CATEGORY_CAPTURE))
            goto next;
        name_buf = dup_wchar_to_utf8(info.achName);

        r = IPin_QueryId(pin, &pin_id);
        if (r != S_OK) {
            av_log(avctx, AV_LOG_ERROR, "Could not query pin id\n");
            return AVERROR(EIO);
        }
        pin_buf = dup_wchar_to_utf8(pin_id);

        if (!ppin) {
            av_log(avctx, AV_LOG_INFO, " Pin \"%s\" (alternative pin name \"%s\")\n", name_buf, pin_buf);
            dshow_cycle_formats(avctx, devtype, pin, NULL);
            goto next;
        }

        if (desired_pin_name) {
            if(strcmp(name_buf, desired_pin_name) && strcmp(pin_buf, desired_pin_name)) {
                av_log(avctx, AV_LOG_DEBUG, "skipping pin \"%s\" (\"%s\") != requested \"%s\"\n",
                    name_buf, pin_buf, desired_pin_name);
                goto next;
            }
        }

        if (set_format) {
            dshow_cycle_formats(avctx, devtype, pin, &format_set);
            if (!format_set) {
                goto next;
            }
        }
        if (devtype == AudioDevice && ctx->audio_buffer_size) {
            if (dshow_set_audio_buffer_size(avctx, pin) < 0) {
                av_log(avctx, AV_LOG_ERROR, "unable to set audio buffer size %d to pin, using pin anyway...", ctx->audio_buffer_size);
            }
        }

        if (IPin_EnumMediaTypes(pin, &types) != S_OK)
            goto next;

        IEnumMediaTypes_Reset(types);
        /* in case format_set was not called, just verify the majortype */
        while (!device_pin && IEnumMediaTypes_Next(types, 1, &type, NULL) == S_OK) {
            if (IsEqualGUID(&type->majortype, mediatype[devtype])) {
                device_pin = pin;
                av_log(avctx, AV_LOG_DEBUG, "Selecting pin %s on %s\n", name_buf, devtypename);
                goto next;
            }
            CoTaskMemFree(type);
        }

next:
        if (types)
            IEnumMediaTypes_Release(types);
        if (p)
            IKsPropertySet_Release(p);
        if (device_pin != pin)
            IPin_Release(pin);
        av_free(name_buf);
        av_free(pin_buf);
        if (pin_id)
            CoTaskMemFree(pin_id);
    }

    IEnumPins_Release(pins);

    if (ppin) {
        if (set_format && !format_set) {
            av_log(avctx, AV_LOG_ERROR, "Could not set %s options\n", devtypename);
            return AVERROR(EIO);
        }
        if (!device_pin) {
            av_log(avctx, AV_LOG_ERROR,
                "Could not find output pin from %s capture device.\n", devtypename);
            return AVERROR(EIO);
        }
        *ppin = device_pin;
    }

    return 0;
}
Example #23
0
static int
artwork_rescale(AVFormatContext *src_ctx, int s, int out_w, int out_h, int format, struct evbuffer *evbuf)
{
  uint8_t *buf;
  uint8_t *outbuf;

  AVCodecContext *src;

  AVFormatContext *dst_ctx;
  AVCodecContext *dst;
  AVOutputFormat *dst_fmt;
  AVStream *dst_st;

  AVCodec *img_decoder;
  AVCodec *img_encoder;

  int64_t pix_fmt_mask;
  const enum PixelFormat *pix_fmts;

  AVFrame *i_frame;
  AVFrame *o_frame;

  struct SwsContext *swsctx;

  AVPacket pkt;
  int have_frame;

  int outbuf_len;

  int ret;

  src = src_ctx->streams[s]->codec;

  img_decoder = avcodec_find_decoder(src->codec_id);
  if (!img_decoder)
    {
      DPRINTF(E_LOG, L_ART, "No suitable decoder found for artwork %s\n", src_ctx->filename);

      return -1;
    }

  ret = avcodec_open(src, img_decoder);
  if (ret < 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not open codec for decoding: %s\n", strerror(AVUNERROR(ret)));

      return -1;
    }

  /* Set up output */
#if LIBAVFORMAT_VERSION_MAJOR >= 53 || (LIBAVFORMAT_VERSION_MAJOR == 52 && LIBAVFORMAT_VERSION_MINOR >= 45)
  /* FFmpeg 0.6 */
  dst_fmt = av_guess_format("image2", NULL, NULL);
#else
  dst_fmt = guess_format("image2", NULL, NULL);
#endif
  if (!dst_fmt)
    {
      DPRINTF(E_LOG, L_ART, "ffmpeg image2 muxer not available\n");

      ret = -1;
      goto out_close_src;
    }

  dst_fmt->video_codec = CODEC_ID_NONE;

  /* Try to keep same codec if possible */
  if ((src->codec_id == CODEC_ID_PNG) && (format & ART_CAN_PNG))
    dst_fmt->video_codec = CODEC_ID_PNG;
  else if ((src->codec_id == CODEC_ID_MJPEG) && (format & ART_CAN_JPEG))
    dst_fmt->video_codec = CODEC_ID_MJPEG;

  /* If not possible, select new codec */
  if (dst_fmt->video_codec == CODEC_ID_NONE)
    {
      if (format & ART_CAN_PNG)
	dst_fmt->video_codec = CODEC_ID_PNG;
      else if (format & ART_CAN_JPEG)
	dst_fmt->video_codec = CODEC_ID_MJPEG;
    }

  img_encoder = avcodec_find_encoder(dst_fmt->video_codec);
  if (!img_encoder)
    {
      DPRINTF(E_LOG, L_ART, "No suitable encoder found for codec ID %d\n", dst_fmt->video_codec);

      ret = -1;
      goto out_close_src;
    }

  dst_ctx = avformat_alloc_context();
  if (!dst_ctx)
    {
      DPRINTF(E_LOG, L_ART, "Out of memory for format context\n");

      ret = -1;
      goto out_close_src;
    }

  dst_ctx->oformat = dst_fmt;

#if LIBAVFORMAT_VERSION_MAJOR >= 53
  dst_fmt->flags &= ~AVFMT_NOFILE;
#else
  ret = snprintf(dst_ctx->filename, sizeof(dst_ctx->filename), "evbuffer:%p", evbuf);
  if ((ret < 0) || (ret >= sizeof(dst_ctx->filename)))
    {
      DPRINTF(E_LOG, L_ART, "Output artwork URL too long\n");

      ret = -1;
      goto out_free_dst_ctx;
    }
#endif

  dst_st = av_new_stream(dst_ctx, 0);
  if (!dst_st)
    {
      DPRINTF(E_LOG, L_ART, "Out of memory for new output stream\n");

      ret = -1;
      goto out_free_dst_ctx;
    }

  dst = dst_st->codec;

#if LIBAVCODEC_VERSION_MAJOR >= 53 || (LIBAVCODEC_VERSION_MAJOR == 52 && LIBAVCODEC_VERSION_MINOR >= 64)
  avcodec_get_context_defaults2(dst, AVMEDIA_TYPE_VIDEO);
#else
  avcodec_get_context_defaults2(dst, CODEC_TYPE_VIDEO);
#endif

  if (dst_fmt->flags & AVFMT_GLOBALHEADER)
    dst->flags |= CODEC_FLAG_GLOBAL_HEADER;

  dst->codec_id = dst_fmt->video_codec;
#if LIBAVCODEC_VERSION_MAJOR >= 53 || (LIBAVCODEC_VERSION_MAJOR == 52 && LIBAVCODEC_VERSION_MINOR >= 64)
  dst->codec_type = AVMEDIA_TYPE_VIDEO;
#else
  dst->codec_type = CODEC_TYPE_VIDEO;
#endif

  pix_fmt_mask = 0;
  pix_fmts = img_encoder->pix_fmts;
  while (pix_fmts && (*pix_fmts != -1))
    {
      pix_fmt_mask |= (1 << *pix_fmts);
      pix_fmts++;
    }

  dst->pix_fmt = avcodec_find_best_pix_fmt(pix_fmt_mask, src->pix_fmt, 1, NULL);

  if (dst->pix_fmt < 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not determine best pixel format\n");

      ret = -1;
      goto out_free_dst;
    }

  DPRINTF(E_DBG, L_ART, "Selected pixel format: %d\n", dst->pix_fmt);

  dst->time_base.num = 1;
  dst->time_base.den = 25;

  dst->width = out_w;
  dst->height = out_h;

#if LIBAVFORMAT_VERSION_MAJOR <= 52 || (LIBAVFORMAT_VERSION_MAJOR == 53 && LIBAVCODEC_VERSION_MINOR <= 1)
  ret = av_set_parameters(dst_ctx, NULL);
  if (ret < 0)
    {
      DPRINTF(E_LOG, L_ART, "Invalid parameters for artwork output: %s\n", strerror(AVUNERROR(ret)));

      ret = -1;
      goto out_free_dst;
    }
#endif

  /* Open encoder */
  ret = avcodec_open(dst, img_encoder);
  if (ret < 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not open codec for encoding: %s\n", strerror(AVUNERROR(ret)));

      ret = -1;
      goto out_free_dst;
    }

  i_frame = avcodec_alloc_frame();
  o_frame = avcodec_alloc_frame();

  if (!i_frame || !o_frame)
    {
      DPRINTF(E_LOG, L_ART, "Could not allocate input/output frame\n");

      ret = -1;
      goto out_free_frames;
    }

  ret = avpicture_get_size(dst->pix_fmt, src->width, src->height);

  DPRINTF(E_DBG, L_ART, "Artwork buffer size: %d\n", ret);

  buf = (uint8_t *)av_malloc(ret);
  if (!buf)
    {
      DPRINTF(E_LOG, L_ART, "Out of memory for artwork buffer\n");

      ret = -1;
      goto out_free_frames;
    }

  avpicture_fill((AVPicture *)o_frame, buf, dst->pix_fmt, src->width, src->height);

  swsctx = sws_getContext(src->width, src->height, src->pix_fmt,
			  dst->width, dst->height, dst->pix_fmt,
			  SWS_BICUBIC, NULL, NULL, NULL);
  if (!swsctx)
    {
      DPRINTF(E_LOG, L_ART, "Could not get SWS context\n");

      ret = -1;
      goto out_free_buf;
    }

  /* Get frame */
  have_frame = 0;
  while (av_read_frame(src_ctx, &pkt) == 0)
    {
      if (pkt.stream_index != s)
	{
	  av_free_packet(&pkt);
	  continue;
	}

#if LIBAVCODEC_VERSION_MAJOR >= 53 || (LIBAVCODEC_VERSION_MAJOR == 52 && LIBAVCODEC_VERSION_MINOR >= 32)
      /* FFmpeg 0.6 */
      avcodec_decode_video2(src, i_frame, &have_frame, &pkt);
#else
      avcodec_decode_video(src, i_frame, &have_frame, pkt.data, pkt.size);
#endif

      break;
    }

  if (!have_frame)
    {
      DPRINTF(E_LOG, L_ART, "Could not decode artwork\n");

      av_free_packet(&pkt);
      sws_freeContext(swsctx);

      ret = -1;
      goto out_free_buf;
    }

  /* Scale */
#if LIBSWSCALE_VERSION_MAJOR >= 1 || (LIBSWSCALE_VERSION_MAJOR == 0 && LIBSWSCALE_VERSION_MINOR >= 9)
  /* FFmpeg 0.6, libav 0.6+ */
  sws_scale(swsctx, (const uint8_t * const *)i_frame->data, i_frame->linesize, 0, src->height, o_frame->data, o_frame->linesize);
#else
  sws_scale(swsctx, i_frame->data, i_frame->linesize, 0, src->height, o_frame->data, o_frame->linesize);
#endif

  sws_freeContext(swsctx);
  av_free_packet(&pkt);

  /* Open output file */
#if LIBAVFORMAT_VERSION_MAJOR >= 53
  dst_ctx->pb = avio_evbuffer_open(evbuf);
#else
  ret = url_fopen(&dst_ctx->pb, dst_ctx->filename, URL_WRONLY);
#endif
  if (ret < 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not open artwork destination buffer\n");

      ret = -1;
      goto out_free_buf;
    }

  /* Encode frame */
  outbuf_len = dst->width * dst->height * 3;
  if (outbuf_len < FF_MIN_BUFFER_SIZE)
    outbuf_len = FF_MIN_BUFFER_SIZE;

  outbuf = (uint8_t *)av_malloc(outbuf_len);
  if (!outbuf)
    {
      DPRINTF(E_LOG, L_ART, "Out of memory for encoded artwork buffer\n");

#if LIBAVFORMAT_VERSION_MAJOR >= 53
      avio_evbuffer_close(dst_ctx->pb);
#else
      url_fclose(dst_ctx->pb);
#endif

      ret = -1;
      goto out_free_buf;
    }

  ret = avcodec_encode_video(dst, outbuf, outbuf_len, o_frame);
  if (ret <= 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not encode artwork\n");

      ret = -1;
      goto out_fclose_dst;
    }

  av_init_packet(&pkt);
  pkt.stream_index = 0;
  pkt.data = outbuf;
  pkt.size = ret;

#if LIBAVFORMAT_VERSION_MAJOR >= 53 || (LIBAVFORMAT_VERSION_MAJOR == 53 && LIBAVCODEC_VERSION_MINOR >= 3)
  ret = avformat_write_header(dst_ctx, NULL);
#else
  ret = av_write_header(dst_ctx);
#endif
  if (ret != 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not write artwork header: %s\n", strerror(AVUNERROR(ret)));

      ret = -1;
      goto out_fclose_dst;
    }

  ret = av_interleaved_write_frame(dst_ctx, &pkt);

  if (ret != 0)
    {
      DPRINTF(E_LOG, L_ART, "Error writing artwork\n");

      ret = -1;
      goto out_fclose_dst;
    }

  ret = av_write_trailer(dst_ctx);
  if (ret != 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not write artwork trailer: %s\n", strerror(AVUNERROR(ret)));

      ret = -1;
      goto out_fclose_dst;
    }

  switch (dst_fmt->video_codec)
    {
      case CODEC_ID_PNG:
	ret = ART_FMT_PNG;
	break;

      case CODEC_ID_MJPEG:
	ret = ART_FMT_JPEG;
	break;

      default:
	DPRINTF(E_LOG, L_ART, "Unhandled rescale output format\n");
	ret = -1;
	break;
    }

 out_fclose_dst:
#if LIBAVFORMAT_VERSION_MAJOR >= 53
  avio_evbuffer_close(dst_ctx->pb);
#else
  url_fclose(dst_ctx->pb);
#endif
  av_free(outbuf);

 out_free_buf:
  av_free(buf);

 out_free_frames:
  if (i_frame)
    av_free(i_frame);
  if (o_frame)
    av_free(o_frame);
  avcodec_close(dst);

 out_free_dst:
  av_free(dst_st);
  av_free(dst);

 out_free_dst_ctx:
  av_free(dst_ctx);

 out_close_src:
  avcodec_close(src);

  return ret;
}
/* Called from the main */
int main(int argc, char **argv)
{
	AVFormatContext *pFormatCtx = NULL;
	int err;
	int i;
	int videoStream, audioStream;;
	AVCodecContext *pCodecCtx;
	AVCodec         *pCodec;
	AVFrame         *pFrame; 

	AVPacket        packet;
	int             frameFinished;
	float           aspect_ratio;

	AVCodecContext  *aCodecCtx;
	AVCodec         *aCodec;

	SDL_Overlay     *bmp;
	SDL_Surface     *screen;
	SDL_Rect        rect;
	SDL_Event       event;
	SDL_AudioSpec   wanted_spec, spec;

	//ffplay_info("Start.\n");
	
	if(argc < 2) {
		printf("Please provide a movie file\n");
		return -1;
	}

	// Register all formats and codecs
	av_register_all();

	if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
		fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
		exit(1);
	}

	pFormatCtx = avformat_alloc_context();
	
	// Open video file
	err = avformat_open_input(&pFormatCtx, argv[1],NULL,NULL);
	if(err<0)
	{
		printf("error ret =  %d\n",err);
	}

	// Retrieve stream information
	err = avformat_find_stream_info(pFormatCtx, NULL);
	if(err<0)
	{
		printf("error ret =  %d\n",err);
	}

	// Dump information about file onto standard error
	av_dump_format(pFormatCtx, 0, argv[1], 0);

	// Find the first video stream
	videoStream = AVMEDIA_TYPE_UNKNOWN;
	audioStream=AVMEDIA_TYPE_UNKNOWN;
	for(i=0; i<pFormatCtx->nb_streams; i++) {
		if(AVMEDIA_TYPE_VIDEO==pFormatCtx->streams[i]->codec->codec_type &&
			videoStream < 0) {
			videoStream=i;
		}
		if(AVMEDIA_TYPE_AUDIO==pFormatCtx->streams[i]->codec->codec_type &&
			audioStream < 0) {
			audioStream=i;
		}
	}
	
	if(videoStream==AVMEDIA_TYPE_UNKNOWN)
	{
		return -1; // Didn't find a video stream
	}

	if(audioStream==AVMEDIA_TYPE_UNKNOWN)
	{
		return -1; // Didn't find a video stream
	}


	aCodecCtx=pFormatCtx->streams[audioStream]->codec;

	// Set audio settings from codec info
	wanted_spec.freq = aCodecCtx->sample_rate;
	wanted_spec.format = AUDIO_S16SYS;
	wanted_spec.channels = aCodecCtx->channels;
	wanted_spec.silence = 0;
	wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
	wanted_spec.callback = audio_callback;
	wanted_spec.userdata = aCodecCtx;

	if(SDL_OpenAudio(&wanted_spec, &spec) < 0) {
		fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
		return -1;
	}

	aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
	if(!aCodec) {
		fprintf(stderr, "Unsupported codec!\n");
		return -1;
	}

	avcodec_open2(aCodecCtx, aCodec,NULL);

	// audio_st = pFormatCtx->streams[index]
	packet_queue_init(&audioq);
	SDL_PauseAudio(0);


	// Get a pointer to the codec context for the video stream
	pCodecCtx=pFormatCtx->streams[videoStream]->codec;

	// Find the decoder for the video stream
	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
	if(pCodec==NULL) {
		fprintf(stderr, "Unsupported codec!\n");
		return -1; // Codec not found
	}
	// Open codec
	if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)
		return -1; // Could not open codec

	// Allocate video frame
	pFrame=avcodec_alloc_frame();


	// Make a screen to put our video
#ifndef __DARWIN__
	screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);
#else
	screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0);
#endif
	if(!screen) {
		fprintf(stderr, "SDL: could not set video mode - exiting\n");
		exit(1);
	}

	// Allocate a place to put our YUV image on that screen
	bmp = SDL_CreateYUVOverlay(pCodecCtx->width,
			 pCodecCtx->height,
			 SDL_YV12_OVERLAY,
			 screen);

	// Read frames and save first five frames to disk
	i=0;
	while(av_read_frame(pFormatCtx, &packet)>=0)
	{
		// Is this a packet from the video stream?
		if(packet.stream_index==videoStream) 
		{
			// Decode video frame
			avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, 
									&packet);

			// Did we get a video frame?
			if(frameFinished) 
			{
				SDL_LockYUVOverlay(bmp);

				AVPicture pict;
				pict.data[0] = bmp->pixels[0];
				pict.data[1] = bmp->pixels[2];
				pict.data[2] = bmp->pixels[1];

				pict.linesize[0] = bmp->pitches[0];
				pict.linesize[1] = bmp->pitches[2];
				pict.linesize[2] = bmp->pitches[1];

				// Convert the image into YUV format that SDL uses
				#include <libswscale/swscale.h>
				// other codes
				static struct SwsContext *img_convert_ctx;
				// other codes
				img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
												 pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
												 PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
				// other codes
				// Convert the image from its native format to RGB
				sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize,
				 0, pCodecCtx->height, pict.data, pict.linesize);

				SDL_UnlockYUVOverlay(bmp);

				rect.x = 0;
				rect.y = 0;
				rect.w = pCodecCtx->width;
				rect.h = pCodecCtx->height;
				SDL_DisplayYUVOverlay(bmp, &rect);
				av_free_packet(&packet);
			}
		} 
		else if(packet.stream_index==audioStream) 
		{
			packet_queue_put(&audioq, &packet);
		} 
		else 
		{
			av_free_packet(&packet);
		}

		SDL_PollEvent(&event);
		switch(event.type) 
		{
			case SDL_QUIT:
				quit = 1;
				SDL_Quit();
				exit(0);
				break;
			default:
				break;
		}		
	}

	// Free the YUV frame
	av_free(pFrame);

	// Close the codec
	avcodec_close(pCodecCtx);

	// Close the video file
	av_close_input_file(pFormatCtx);
	//ffplay_info("end.\n");

	return 0;
}
Example #25
0
static int parse_playlist(HLSContext *c, const char *url,
                          struct playlist *pls, AVIOContext *in)
{
    int ret = 0, is_segment = 0, is_variant = 0, bandwidth = 0;
    int64_t duration = 0;
    enum KeyType key_type = KEY_NONE;
    uint8_t iv[16] = "";
    int has_iv = 0;
    char key[MAX_URL_SIZE] = "";
    char line[MAX_URL_SIZE];
    const char *ptr;
    int close_in = 0;
    uint8_t *new_url = NULL;

    if (!in) {
        AVDictionary *opts = NULL;
        close_in = 1;
        /* Some HLS servers don't like being sent the range header */
        av_dict_set(&opts, "seekable", "0", 0);

        // broker prior HTTP options that should be consistent across requests
        av_dict_set(&opts, "user-agent", c->user_agent, 0);
        av_dict_set(&opts, "cookies", c->cookies, 0);
        av_dict_set(&opts, "headers", c->headers, 0);

        ret = avio_open2(&in, url, AVIO_FLAG_READ,
                         c->interrupt_callback, &opts);
        av_dict_free(&opts);
        if (ret < 0)
            return ret;
    }

    if (av_opt_get(in, "location", AV_OPT_SEARCH_CHILDREN, &new_url) >= 0)
        url = new_url;

    read_chomp_line(in, line, sizeof(line));
    if (strcmp(line, "#EXTM3U")) {
        ret = AVERROR_INVALIDDATA;
        goto fail;
    }

    if (pls) {
        free_segment_list(pls);
        pls->finished = 0;
    }
    while (!url_feof(in)) {
        read_chomp_line(in, line, sizeof(line));
        if (av_strstart(line, "#EXT-X-STREAM-INF:", &ptr)) {
            struct variant_info info = {{0}};
            is_variant = 1;
            ff_parse_key_value(ptr, (ff_parse_key_val_cb) handle_variant_args,
                               &info);
            bandwidth = atoi(info.bandwidth);
        } else if (av_strstart(line, "#EXT-X-KEY:", &ptr)) {
            struct key_info info = {{0}};
            ff_parse_key_value(ptr, (ff_parse_key_val_cb) handle_key_args,
                               &info);
            key_type = KEY_NONE;
            has_iv = 0;
            if (!strcmp(info.method, "AES-128"))
                key_type = KEY_AES_128;
            if (!strncmp(info.iv, "0x", 2) || !strncmp(info.iv, "0X", 2)) {
                ff_hex_to_data(iv, info.iv + 2);
                has_iv = 1;
            }
            av_strlcpy(key, info.uri, sizeof(key));
        } else if (av_strstart(line, "#EXT-X-TARGETDURATION:", &ptr)) {
            if (!pls) {
                if (!new_variant(c, 0, url, NULL)) {
                    ret = AVERROR(ENOMEM);
                    goto fail;
                }
                pls = c->playlists[c->n_playlists - 1];
            }
            pls->target_duration = atoi(ptr) * AV_TIME_BASE;
        } else if (av_strstart(line, "#EXT-X-MEDIA-SEQUENCE:", &ptr)) {
            if (!pls) {
                if (!new_variant(c, 0, url, NULL)) {
                    ret = AVERROR(ENOMEM);
                    goto fail;
                }
                pls = c->playlists[c->n_playlists - 1];
            }
            pls->start_seq_no = atoi(ptr);
        } else if (av_strstart(line, "#EXT-X-ENDLIST", &ptr)) {
            if (pls)
                pls->finished = 1;
        } else if (av_strstart(line, "#EXTINF:", &ptr)) {
            is_segment = 1;
            duration   = atof(ptr) * AV_TIME_BASE;
        } else if (av_strstart(line, "#", NULL)) {
            continue;
        } else if (line[0]) {
            if (is_variant) {
                if (!new_variant(c, bandwidth, line, url)) {
                    ret = AVERROR(ENOMEM);
                    goto fail;
                }
                is_variant = 0;
                bandwidth  = 0;
            }
            if (is_segment) {
                struct segment *seg;
                if (!pls) {
                    if (!new_variant(c, 0, url, NULL)) {
                        ret = AVERROR(ENOMEM);
                        goto fail;
                    }
                    pls = c->playlists[c->n_playlists - 1];
                }
                seg = av_malloc(sizeof(struct segment));
                if (!seg) {
                    ret = AVERROR(ENOMEM);
                    goto fail;
                }
                seg->duration = duration;
                seg->key_type = key_type;
                if (has_iv) {
                    memcpy(seg->iv, iv, sizeof(iv));
                } else {
                    int seq = pls->start_seq_no + pls->n_segments;
                    memset(seg->iv, 0, sizeof(seg->iv));
                    AV_WB32(seg->iv + 12, seq);
                }
                ff_make_absolute_url(seg->key, sizeof(seg->key), url, key);
                ff_make_absolute_url(seg->url, sizeof(seg->url), url, line);
                dynarray_add(&pls->segments, &pls->n_segments, seg);
                is_segment = 0;
            }
        }
    }
    if (pls)
        pls->last_load_time = av_gettime();

fail:
    av_free(new_url);
    if (close_in)
        avio_close(in);
    return ret;
}
Example #26
0
static void qdm2_extradata_free(PayloadContext *qdm)
{
    av_free(qdm);
}
Example #27
0
void lavc_conv_uninit(struct lavc_conv *priv)
{
    avcodec_close(priv->avctx);
    av_free(priv->avctx);
    talloc_free(priv);
}
Example #28
0
void av_audio_convert_free(AVAudioConvert *ctx)
{
    av_free(ctx);
}
Example #29
0
static int
dshow_read_close(AVFormatContext *s)
{
    struct dshow_ctx *ctx = s->priv_data;
    AVPacketList *pktl;

    if (ctx->control) {
        IMediaControl_Stop(ctx->control);
        IMediaControl_Release(ctx->control);
    }

    if (ctx->media_event)
        IMediaEvent_Release(ctx->media_event);

    if (ctx->graph) {
        IEnumFilters *fenum;
        int r;
        r = IGraphBuilder_EnumFilters(ctx->graph, &fenum);
        if (r == S_OK) {
            IBaseFilter *f;
            IEnumFilters_Reset(fenum);
            while (IEnumFilters_Next(fenum, 1, &f, NULL) == S_OK) {
                if (IGraphBuilder_RemoveFilter(ctx->graph, f) == S_OK)
                    IEnumFilters_Reset(fenum); /* When a filter is removed,
                                                * the list must be reset. */
                IBaseFilter_Release(f);
            }
            IEnumFilters_Release(fenum);
        }
        IGraphBuilder_Release(ctx->graph);
    }

    if (ctx->capture_pin[VideoDevice])
        libAVPin_Release(ctx->capture_pin[VideoDevice]);
    if (ctx->capture_pin[AudioDevice])
        libAVPin_Release(ctx->capture_pin[AudioDevice]);
    if (ctx->capture_filter[VideoDevice])
        libAVFilter_Release(ctx->capture_filter[VideoDevice]);
    if (ctx->capture_filter[AudioDevice])
        libAVFilter_Release(ctx->capture_filter[AudioDevice]);

    if (ctx->device_pin[VideoDevice])
        IPin_Release(ctx->device_pin[VideoDevice]);
    if (ctx->device_pin[AudioDevice])
        IPin_Release(ctx->device_pin[AudioDevice]);
    if (ctx->device_filter[VideoDevice])
        IBaseFilter_Release(ctx->device_filter[VideoDevice]);
    if (ctx->device_filter[AudioDevice])
        IBaseFilter_Release(ctx->device_filter[AudioDevice]);

    if (ctx->device_name[0])
        av_freep(&ctx->device_name[0]);
    if (ctx->device_name[1])
        av_freep(&ctx->device_name[1]);

    if(ctx->mutex)
        CloseHandle(ctx->mutex);
    if(ctx->event[0])
        CloseHandle(ctx->event[0]);
    if(ctx->event[1])
        CloseHandle(ctx->event[1]);

    pktl = ctx->pktl;
    while (pktl) {
        AVPacketList *next = pktl->next;
        av_free_packet(&pktl->pkt);
        av_free(pktl);
        pktl = next;
    }

    CoUninitialize();

    return 0;
}
Example #30
-2
//----------------------------------------------------------------
// main_utf8
// 
int main_utf8(int argc, char **argv)
{
	const char *input = NULL;
	const char *output_prefix = "";
	double target_segment_duration = 0.0;
	char *segment_duration_check = NULL;
	const char *playlist_filename = NULL;
	const char *http_prefix = "";
	long max_tsfiles = 0;
	char *max_tsfiles_check = NULL;
	double prev_segment_time = 0.0;
	double segment_duration = 0.0;
	unsigned int output_index = 1;
	const AVClass *fc = avformat_get_class();
	AVDictionary *format_opts = NULL;
	AVOutputFormat *ofmt = NULL;
	AVFormatContext *ic = NULL;
	AVFormatContext *oc = NULL;
	AVStream *video_st = NULL;
	AVStream *audio_st = NULL;
	AVCodec *codec = NULL;
	char *output_filename = NULL; 	
	int if_save_keyframe = 0;			//add by wanggm
	char *keyframeinfo_filename = NULL;	//add by wanggm
	json_object *obj = NULL;			//add by wanggm
	json_object *info_arr_obj = NULL;	//add by wanggm

	int if_monitor_related_process = 0;	//add by wanggm
	pid_t relatedProcessPid = 1; 		//add by wanggm
	char *pid_filename = NULL;
	int video_index = -1;
	int audio_index = -1;
	int kill_file = 0;
	int decode_done = 0;
	int ret = 0;
	int i = 0;
	TSMStreamLace * streamLace = NULL;
	TSMPlaylist * playlist = NULL;
	const double segment_duration_error_tolerance = 0.05;
	double extra_duration_needed = 0;
	int strict_segment_duration = 0;

	av_log_set_level(AV_LOG_INFO);


	for (i = 1; i < argc; i++)
	{
		if (strcmp(argv[i], "-i") == 0)
		{
			if ((argc - i) <= 1)
				usage(argv, "could not parse -i parameter");
			i++;
			input = argv[i];
		}
		else if (strcmp(argv[i], "-o") == 0)
		{
			if ((argc - i) <= 1)
				usage(argv, "could not parse -o parameter");
			i++;
			output_prefix = argv[i];
		}
		else if (strcmp(argv[i], "-d") == 0)
		{
			if ((argc - i) <= 1)
				usage(argv, "could not parse -d parameter");
			i++;

			target_segment_duration = strtod(argv[i], &segment_duration_check);
			if (segment_duration_check
					== argv[i] || target_segment_duration == HUGE_VAL
					|| target_segment_duration == -HUGE_VAL){
				usage3(argv, "invalid segment duration: ", argv[i]);
			}
		}
		else if (strcmp(argv[i], "-x") == 0)
		{
			if ((argc - i) <= 1)
				usage(argv, "could not parse -x parameter");
			i++;
			playlist_filename = argv[i];
		}
		else if (strcmp(argv[i], "-p") == 0)
		{
			if ((argc - i) <= 1)
				usage(argv, "could not parse -p parameter");
			i++;
			http_prefix = argv[i];
		}
		else if (strcmp(argv[i], "-w") == 0)
		{
			if ((argc - i) <= 1)
				usage(argv, "could not parse -w parameter");
			i++;

			max_tsfiles = strtol(argv[i], &max_tsfiles_check, 10);
			if (max_tsfiles_check == argv[i] || max_tsfiles < 0
					|| max_tsfiles >= INT_MAX)
			{
				usage3(argv, "invalid live stream max window size: ", argv[i]);
			}
		}
		else if (strcmp(argv[i], "-P") == 0)
		{
			if ((argc - i) <= 1)
				usage(argv, "could not parse -P parameter");
			i++;
			pid_filename = argv[i];
		}
		else if (strcmp(argv[i], "--watch-for-kill-file") == 0)
		{
			// end program when it finds a file with name 'kill':
			kill_file = 1;
		}
		else if (strcmp(argv[i], "--strict-segment-duration") == 0)
		{
			// force segment creation on non-keyframe boundaries:
			strict_segment_duration = 1;
		}
		else if (strcmp(argv[i], "--avformat-option") == 0)
		{
			const AVOption *of;
			const char *opt;
			const char *arg;
			if ((argc - i) <= 1)
				usage(argv, "could not parse --avformat-option parameter");
			i++;
			opt = argv[i];
			if ((argc - i) <= 1)
				usage(argv, "could not parse --avformat-option parameter");
			i++;
			arg = argv[i];

			if ((of = av_opt_find(&fc, opt, NULL, 0,
					AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)))
				av_dict_set(&format_opts, opt, arg,
						(of->type == AV_OPT_TYPE_FLAGS) ? AV_DICT_APPEND : 0);
			else
				usage3(argv, "unknown --avformat-option parameter: ", opt);
		}
		else if (strcmp(argv[i], "--loglevel") == 0)
		{
			const char *arg;
			if ((argc - i) <= 1)
				usage(argv, "could not parse --loglevel parameter");
			i++;
			arg = argv[i];

			if (loglevel(arg))
				usage3(argv, "unknown --loglevel parameter: ", arg);
		}
		else if (strcmp(argv[i], "-k") == 0)
		{ //add by wanggm for save key frame information into json file.
			if ((argc - i) <= 1)
				usage(argv, "could not parse -k parameter");
			i++;
			if_save_keyframe = atoi(argv[i]);
		}
		else if( strcmp(argv[i], "-s") == 0)
		{//add by wanggm for set the start index of ts file.
			if ( (argc -i ) <= 1)
				usage(argv, "could not parse -s parmeter");
			i++;
			
			char *output_index_check = NULL;
			output_index  = strtol(argv[i], &output_index_check, 10);
			if ( output_index_check== argv[i] || output_index < 0
					|| output_index >= INT_MAX)
			{
				usage3(argv, "invalid start index of ts file: ", argv[i]);
			}
			
		}
		else if( strcmp(argv[i], "-m") == 0)
		{ // add by wanggm for exit by monitor the process of which pid is given. 
			if ((argc - i) <= 1)
				usage(argv, "could not parse -m parmeter");
			i++;

			if_monitor_related_process = 1;
			unsigned int tmpPid= atoi(argv[i]);
			if( tmpPid > 0)
			{  
				relatedProcessPid = (pid_t) tmpPid;
				fprintf(stdout, "%s I will exit when the process PID= %d exit.\n", getSystemTime(timeChar), relatedProcessPid);
			}
		}
	}

	
	if (!input)
	{
		usage(argv, "-i input file parameter must be specified");
	}

	if (!playlist_filename)
	{
		usage(argv, "-x m3u8 playlist file parameter must be specified");
	}

	if (target_segment_duration == 0.0)
	{
		usage(argv, "-d segment duration parameter must be specified");
	}

	if( output_index <= 0 )
	{
		output_index = 1;	
	}

	if( 1 == if_monitor_related_process)
	{
		pthread_t id;
		pthread_create(&id, NULL, (void*)monitor_process, relatedProcessPid);
	}


	// Create PID file
	if (pid_filename)
	{
		FILE* pid_file = fopen_utf8(pid_filename, "wb");
		if (pid_file)
		{
			fprintf(pid_file, "%d", getpid());
			fclose(pid_file);
		}
	}


	av_register_all();
	avformat_network_init();

	if (!strcmp(input, "-"))
	{
		input = "pipe:";
	}

	output_filename = (char*) malloc(
			sizeof(char) * (strlen(output_prefix) + 15));
	//add by wanggm
	if(  if_save_keyframe == 1)
	{ 
		keyframeinfo_filename = (char*) malloc(
			sizeof(char)* (strlen(output_prefix) + 15));
	}
	if (!output_filename || (1 == if_save_keyframe && !keyframeinfo_filename))
	{
		fprintf(stderr, "%s Could not allocate space for output filenames\n", getSystemTime( timeChar));
		goto error;
	}

	playlist = createPlaylist(max_tsfiles, target_segment_duration,
			http_prefix);
	if (!playlist)
	{
		fprintf(stderr,
				"%s Could not allocate space for m3u8 playlist structure\n", getSystemTime( timeChar));
		goto error;
	}

	ret = avformat_open_input(&ic, input, NULL, (format_opts) ? &format_opts : NULL);

	if (ret != 0)
	{
		fprintf(stderr,
				"%sCould not open input file, make sure it is an mpegts or mp4 file: %d\n", getSystemTime(timeChar), ret);
		goto error;
	}
	av_dict_free(&format_opts);

	if (avformat_find_stream_info(ic, NULL) < 0)
	{
		fprintf(stderr, "%s Could not read stream information\n", getSystemTime( timeChar));
		goto error;
	}

#if LIBAVFORMAT_VERSION_MAJOR > 52 || (LIBAVFORMAT_VERSION_MAJOR == 52 && \
                                       LIBAVFORMAT_VERSION_MINOR >= 45)
	ofmt = av_guess_format("mpegts", NULL, NULL);
#else
	ofmt = guess_format("mpegts", NULL, NULL);
#endif

	if (!ofmt)
	{
		fprintf(stderr, "%s Could not find MPEG-TS muxer\n", getSystemTime( timeChar));
		goto error;
	}

	oc = avformat_alloc_context();
	if (!oc)
	{
		fprintf(stderr, "%s Could not allocated output context\n", getSystemTime( timeChar));
		goto error;
	}
	oc->oformat = ofmt;

	video_index = -1;
	audio_index = -1;

	for (i = 0; i < ic->nb_streams && (video_index < 0 || audio_index < 0); i++)
	{
		switch (ic->streams[i]->codec->codec_type)
		{
		case AVMEDIA_TYPE_VIDEO:
			video_index = i;
			ic->streams[i]->discard = AVDISCARD_NONE;
			video_st = add_output_stream(oc, ic->streams[i]);
			break;
		case AVMEDIA_TYPE_AUDIO:
			audio_index = i;
			ic->streams[i]->discard = AVDISCARD_NONE;
			audio_st = add_output_stream(oc, ic->streams[i]);
			break;
		default:
			ic->streams[i]->discard = AVDISCARD_ALL;
			break;
		}
	}

	av_dump_format(oc, 0, output_prefix, 1);

	if (video_index >= 0)
	{
		codec = avcodec_find_decoder(video_st->codec->codec_id);
		if (!codec)
		{
			fprintf(stderr,
					"%s Could not find video decoder, key frames will not be honored\n", getSystemTime( timeChar));
		}

		if (avcodec_open2(video_st->codec, codec, NULL) < 0)
		{
			fprintf(stderr,
					"%s Could not open video decoder, key frames will not be honored\n", getSystemTime( timeChar));
		}
	}

	snprintf(output_filename, strlen(output_prefix) + 15, "%s-%u.ts",
			output_prefix, output_index);

	if( 1 == if_save_keyframe)
	{ 
		snprintf(keyframeinfo_filename, strlen(output_prefix) + 15,
			"%s-%u.idx", output_prefix, output_index);
		obj = json_object_new_object();
		info_arr_obj = create_json_header(obj);
	}

	if (avio_open(&oc->pb, output_filename, AVIO_FLAG_WRITE) < 0)
	{
		fprintf(stderr, "%s Could not open '%s'\n", getSystemTime( timeChar),output_filename);
		goto error;
	}

	if (avformat_write_header(oc, NULL))
	{
		fprintf(stderr, "%s Could not write mpegts header to first output file\n", getSystemTime( timeChar));
		goto error;
	}

	prev_segment_time = (double) (ic->start_time) / (double) (AV_TIME_BASE);

	streamLace = createStreamLace(ic->nb_streams);

	// add by houmr
	int continue_error_cnt = 0;
	//int pushcnt = 0;
	//int popcnt = 0;
	int tscnt = 0;
	int audiopktcnt = 0;
	int videopktcnt = 0;
	int kfcnt = 0;
	int errpktcnt = 0;
	/////////////////////////
	do
	{
		double segment_time = 0.0;
		AVPacket packet;
		double packetStartTime = 0.0;
		double packetDuration = 0.0;

		if (!decode_done)
		{
			//fprintf(stdout, "%s av_read_frame() begin.\n", getSystemTime( timeChar));
			decode_done = av_read_frame(ic, &packet);
			//fprintf(stdout, "%s av_read_frame() end. packet.size=%d stream_index=%d duration=%d\n", getSystemTime( timeChar), packet.size, packet.stream_index, packet.duration);
			//fprintf(stdout, "%s decode_done=%d\n", getSystemTime( timeChar),decode_done);
			if (!decode_done)
			{
				if (packet.stream_index != video_index
						&& packet.stream_index != audio_index)
				{
					if( ++errpktcnt >= 10)
					{
						decode_done = 1;	
					}
					fprintf(stderr, "%s packet is not video or audio, packet.stream_index=%d\n", getSystemTime( timeChar), packet.stream_index);
					av_free_packet(&packet);
					continue;
				}
				errpktcnt = 0;

				/*printf("orgin : index - %d\t pts = %s\t duration=%d\n", packet.stream_index,
				 av_ts2str(packet.pts), packet.duration);*/
				// add by houmr
				/*if (adjust_pts(&packet, video_index, audio_index) < 0)
				{
					av_free_packet(&packet);
					continue;
				}
				*/
				/////////////////////////////////////
				double timeStamp =
						(double) (packet.pts)
								* (double) (ic->streams[packet.stream_index]->time_base.num)
								/ (double) (ic->streams[packet.stream_index]->time_base.den);

				if (av_dup_packet(&packet) < 0)
				{
					fprintf(stderr, "%s Could not duplicate packet\n" ,getSystemTime( timeChar));
					av_free_packet(&packet);
					break;
				}
				
				/*
				for(int i = 0; i < streamLace->numStreams; ++i)
				{ 
						fprintf(stdout, "streamLace[%d].size=%d\t", i, streamLace->streams[i]->size);
				}
				fprintf(stdout, "\n");
				*/
				insertPacket(streamLace, &packet, timeStamp);
			}
		}

		if (countPackets(streamLace) < 50 && !decode_done)
		{
			/* allow the queue to fill up so that the packets can be sorted properly */
			continue;
		}

		if (!removePacket(streamLace, &packet))
		{
			fprintf(stdout, "%s get packet failed!!\n", getSystemTime( timeChar));
			if (decode_done)
			{
				/* the queue is empty, we are done */
				break;
			}

			assert(decode_done);
			continue;
		}

		//fprintf(stdout, "%s get 1 packet success. packet info: pts=%ld, dts=%ld\n", getSystemTime( timeChar), packet.pts, packet.dts);
		packetStartTime = (double) (packet.pts)
				* (double) (ic->streams[packet.stream_index]->time_base.num)
				/ (double) (ic->streams[packet.stream_index]->time_base.den);

		packetDuration = (double) (packet.duration)
				* (double) (ic->streams[packet.stream_index]->time_base.num)
				/ (double) (ic->streams[packet.stream_index]->time_base.den);

#if !defined(NDEBUG) && (defined(DEBUG) || defined(_DEBUG))
		if (av_log_get_level() >= AV_LOG_VERBOSE)
		fprintf(stderr,
				"%s stream %i, packet [%f, %f)\n",
				getSystemTime( timeChar),
				packet.stream_index,
				packetStartTime,
				packetStartTime + packetDuration);
#endif

		segment_duration = packetStartTime + packetDuration - prev_segment_time;

		// NOTE: segments are supposed to start on a keyframe.
		// If the keyframe interval and segment duration do not match
		// forcing the segment creation for "better seeking behavior"
		// will result in decoding artifacts after seeking or stream switching.
		if (packet.stream_index == video_index
				&& (packet.flags & AV_PKT_FLAG_KEY || strict_segment_duration))
		{
			//This is video packet and ( packet is key frame or strict time is needed )
			segment_time = packetStartTime;
		}
		else if (video_index < 0)
		{
			//This stream doesn't contain video stream
			segment_time = packetStartTime;
		}
		else
		{
			//This is a video packet or a video packet but not key frame 
			segment_time = prev_segment_time;
		}

		//fprintf(stdout, "%s extra_duration_needed=%f\n", getSystemTime( timeChar), extra_duration_needed);
		if (segment_time - prev_segment_time + segment_duration_error_tolerance
				> target_segment_duration + extra_duration_needed)
		{
			fprintf(stdout, "%s segment_time=%lf prev_segment_time=%lf  > target_segment_duration=%lf  extra_duration_needed=%lf\n", getSystemTime( timeChar), segment_time, prev_segment_time,  target_segment_duration, extra_duration_needed);
			fprintf(stdout, "%s File %s contains %d PES packet, of which %d are audio packet, %d are video packet within %d key frame.\n", getSystemTime( timeChar), output_filename, tscnt, audiopktcnt, videopktcnt, kfcnt);
			fflush(stdout);
			/*
			for(int i = 0; i < streamLace->numStreams; ++i)
			{
					fprintf(stdout, "%s streamLace[%d].size=%d\t", getSystemTime( timeChar), i, streamLace->streams[i]->size);
			}
			*/
			tscnt = audiopktcnt = videopktcnt = kfcnt = 0;
			avio_flush(oc->pb);
			avio_close(oc->pb);

			// Keep track of accumulated rounding error to account for it in later chunks.
		/*
			double segment_duration = segment_time - prev_segment_time;
			int rounded_segment_duration = (int) (segment_duration + 0.5);
			extra_duration_needed += (double) rounded_segment_duration
					- segment_duration;
		*/
			double seg_dur = segment_time - prev_segment_time;
			int rounded_segment_duration = (int) (seg_dur + 0.5);
			extra_duration_needed += (target_segment_duration - seg_dur - segment_duration_error_tolerance);
			//fprintf(stdout, "%s ________extra_duration_needed = %lf\n", getSystemTime( timeChar), extra_duration_needed);
			

			updatePlaylist(playlist, playlist_filename, output_filename,
					output_index, rounded_segment_duration);

			
			snprintf(output_filename, strlen(output_prefix) + 15, "%s-%u.ts",
					output_prefix, ++output_index);
			//add by wanggm
			//Save the all the keyframe information into json file
			if( 1 == if_save_keyframe && NULL != obj)
			{ 
				save_json_to_file(keyframeinfo_filename, obj);
				obj = info_arr_obj = NULL;

				snprintf(keyframeinfo_filename, strlen(output_prefix) + 15,
					"%s-%u.idx", output_prefix, output_index);
			}


			if (avio_open(&oc->pb, output_filename, AVIO_FLAG_WRITE) < 0)
			{
				fprintf(stderr, "%s Could not open '%s'\n", getSystemTime( timeChar), output_filename);
				break;
			}

			// close when we find the 'kill' file
			if (kill_file)
			{
				FILE* fp = fopen("kill", "rb");
				if (fp)
				{
					fprintf(stderr, "%s user abort: found kill file\n", getSystemTime( timeChar));
					fclose(fp);
					remove("kill");
					decode_done = 1;
					removeAllPackets(streamLace);
				}
			}
			prev_segment_time = segment_time;
		}

		//add by wanggm.
		++tscnt;
		if( video_index == packet.stream_index)
		{
			++videopktcnt;	
			if(1 == packet.flags)
			{ 
				++kfcnt;	
				if( 1 == if_save_keyframe)
				{
					//If it is key frame, it's information should be saved.
					//fprintf(stdout, "%s packet is keyframe, packet.pts=%ld\n", getSystemTime( timeChar), packet.pts);
					snprintf(keyframeinfo_filename, strlen(output_prefix) + 15,
					"%s-%u.idx", output_prefix, output_index);
					if (NULL == obj && NULL == info_arr_obj)
					{ 
						obj = json_object_new_object();
						info_arr_obj = create_json_header(obj);
					}
					avio_flush(oc->pb);		//flush the previous data into ts file.
					int64_t offset = avio_tell(oc->pb);	//Get the offset of this key frame in the file.
					save_keyframe_info(info_arr_obj, offset, packet.pts);
					//fprintf(stdout, "%s Keyframe.pos=%ld \tkeyframe.pts=%ld\n", getSystemTime( timeChar), offset, (long)packet.pts);
				}
			}
		}else if( audio_index == packet.stream_index)
		{
			++audiopktcnt;	
		}
		//fprintf(stdout, "%s packet is not keyframe.\n", getSystemTime( timeChar));

		ret = av_interleaved_write_frame(oc, &packet);

		if (ret < 0)
		{
			fprintf(stderr, "%s Warning: Could not write frame of stream\n", getSystemTime( timeChar));
			// add by houmr
			continue_error_cnt++;
			if (continue_error_cnt > 10)
			{
				av_free_packet(&packet);
				break;
			}
		}
		else if (ret > 0)
		{
			fprintf(stderr, "%s End of stream requested\n", getSystemTime( timeChar));
			av_free_packet(&packet);
			break;
		}
		else
		{
			// add by houmr error
			continue_error_cnt = 0;
			////////////////////////
		}
		av_free_packet(&packet);
	} while (!decode_done || countPackets(streamLace) > 0);

	av_write_trailer(oc);

	if (video_index >= 0)
	{
		avcodec_close(video_st->codec);
	}

	for (i = 0; i < oc->nb_streams; i++)
	{
		av_freep(&oc->streams[i]->codec);
		av_freep(&oc->streams[i]);
	}

	avio_close(oc->pb);
	av_free(oc);

	updatePlaylist(playlist, playlist_filename, output_filename, output_index,
			segment_duration);
	closePlaylist(playlist);
	releasePlaylist(&playlist);

	//add by wanggm
	if( 1 == if_save_keyframe && obj != NULL)
	{ 
		save_json_to_file(keyframeinfo_filename, obj);
	}

	if (pid_filename)
	{
		remove(pid_filename);
	}
	
	fflush(stdout);
	fflush(stderr);

	return 0;

	error: if (pid_filename)
	{
		remove(pid_filename);
	}

	return 1;

}