Ejemplo n.º 1
3
int main(int argc, char** argv) {
    if (argc != 2) {
        fprintf(stderr, "usage: %s input_file > output_file\n", argv[0]);
        exit(1);
    }

    const int out_channels = 2, out_samples = 512, sample_rate = 44100;

    const int max_buffer_size =
        av_samples_get_buffer_size(
            NULL, out_channels, out_samples, AV_SAMPLE_FMT_FLT, 1);

    // register supported formats and codecs
    av_register_all();

    // allocate empty format context
    // provides methods for reading input packets
    AVFormatContext* fmt_ctx = avformat_alloc_context();
    assert(fmt_ctx);

    // determine input file type and initialize format context
    if (avformat_open_input(&fmt_ctx, argv[1], NULL, NULL) != 0) {
        fprintf(stderr, "error: avformat_open_input()\n");
        exit(1);
    }

    // determine supported codecs for input file streams and add
    // them to format context
    if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
        fprintf(stderr, "error: avformat_find_stream_info()\n");
        exit(1);
    }

#if 0
    av_dump_format(fmt_ctx, 0, argv[1], false);
#endif

    // find audio stream in format context
    size_t stream = 0;
    for (; stream < fmt_ctx->nb_streams; stream++) {
        if (fmt_ctx->streams[stream]->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
            break;
        }
    }
    if (stream == fmt_ctx->nb_streams) {
        fprintf(stderr, "error: no audio stream found\n");
        exit(1);
    }

    // get codec context for audio stream
    // provides methods for decoding input packets received from format context
    AVCodecContext* codec_ctx = fmt_ctx->streams[stream]->codec;
    assert(codec_ctx);

    if (codec_ctx->channel_layout == 0) {
        codec_ctx->channel_layout = AV_CH_FRONT_LEFT | AV_CH_FRONT_RIGHT;
    }

    // find decoder for audio stream
    AVCodec* codec = avcodec_find_decoder(codec_ctx->codec_id);
    if (!codec) {
        fprintf(stderr, "error: avcodec_find_decoder()\n");
        exit(1);
    }

    // initialize codec context with decoder we've found
    if (avcodec_open2(codec_ctx, codec, NULL) < 0) {
        fprintf(stderr, "error: avcodec_open2()\n");
        exit(1);
    }

    // initialize converter from input audio stream to output stream
    // provides methods for converting decoded packets to output stream
    SwrContext* swr_ctx =
        swr_alloc_set_opts(NULL,
                           AV_CH_FRONT_LEFT | AV_CH_FRONT_RIGHT, // output
                           AV_SAMPLE_FMT_FLT,                    // output
                           sample_rate,                          // output
                           codec_ctx->channel_layout,  // input
                           codec_ctx->sample_fmt,      // input
                           codec_ctx->sample_rate,     // input
                           0,
                           NULL);
    if (!swr_ctx) {
        fprintf(stderr, "error: swr_alloc_set_opts()\n");
        exit(1);
    }
    swr_init(swr_ctx);

    // create empty packet for input stream
    AVPacket packet;
    av_init_packet(&packet);
    packet.data = NULL;
    packet.size = 0;

    // allocate empty frame for decoding
    AVFrame* frame = av_frame_alloc();
    assert(frame);

    // allocate buffer for output stream
    uint8_t* buffer = (uint8_t*)av_malloc(max_buffer_size);
    assert(buffer);

    // read packet from input audio file
    while (av_read_frame(fmt_ctx, &packet) >= 0) {
        // skip non-audio packets
        if (packet.stream_index != stream) {
            continue;
        }

        // decode packet to frame
        int got_frame = 0;
        if (avcodec_decode_audio4(codec_ctx, frame, &got_frame, &packet) < 0) {
            fprintf(stderr, "error: avcodec_decode_audio4()\n");
            exit(1);
        }

        if (!got_frame) {
            continue;
        }

        // convert input frame to output buffer
        int got_samples = swr_convert(
            swr_ctx,
            &buffer, out_samples,
            (const uint8_t **)frame->data, frame->nb_samples);

        if (got_samples < 0) {
            fprintf(stderr, "error: swr_convert()\n");
            exit(1);
        }

        while (got_samples > 0) {
            int buffer_size =
                av_samples_get_buffer_size(
                    NULL, out_channels, got_samples, AV_SAMPLE_FMT_FLT, 1);

            assert(buffer_size <= max_buffer_size);

            // write output buffer to stdout
            if (write(STDOUT_FILENO, buffer, buffer_size) != buffer_size) {
                fprintf(stderr, "error: write(stdout)\n");
                exit(1);
            }

            // process samples buffered inside swr context
            got_samples = swr_convert(swr_ctx, &buffer, out_samples, NULL, 0);
            if (got_samples < 0) {
                fprintf(stderr, "error: swr_convert()\n");
                exit(1);
            }
        }

        // free packet created by decoder
        av_free_packet(&packet);
    }

    av_free(buffer);
    av_frame_free(&frame);

    swr_free(&swr_ctx);

    avcodec_close(codec_ctx);
    avformat_close_input(&fmt_ctx);

    return 0;
}
Ejemplo n.º 2
0
status_t
AVCodecDecoder::_NegotiateAudioOutputFormat(media_format* inOutFormat)
{
	TRACE("AVCodecDecoder::_NegotiateAudioOutputFormat()\n");

	media_multi_audio_format outputAudioFormat;
	outputAudioFormat = media_raw_audio_format::wildcard;
	outputAudioFormat.byte_order = B_MEDIA_HOST_ENDIAN;
	outputAudioFormat.frame_rate
		= fInputFormat.u.encoded_audio.output.frame_rate;
	outputAudioFormat.channel_count
		= fInputFormat.u.encoded_audio.output.channel_count;
	outputAudioFormat.format = fInputFormat.u.encoded_audio.output.format;
	outputAudioFormat.buffer_size
		= inOutFormat->u.raw_audio.buffer_size;
	// Check that format is not still a wild card!
	if (outputAudioFormat.format == 0) {
		TRACE("  format still a wild-card, assuming B_AUDIO_SHORT.\n");
		outputAudioFormat.format = media_raw_audio_format::B_AUDIO_SHORT;
	}
	size_t sampleSize = outputAudioFormat.format
		& media_raw_audio_format::B_AUDIO_SIZE_MASK;
	// Check that channel count is not still a wild card!
	if (outputAudioFormat.channel_count == 0) {
		TRACE("  channel_count still a wild-card, assuming stereo.\n");
		outputAudioFormat.channel_count = 2;
	}

	if (outputAudioFormat.buffer_size == 0) {
		outputAudioFormat.buffer_size = 512
			* sampleSize * outputAudioFormat.channel_count;
	}
	inOutFormat->type = B_MEDIA_RAW_AUDIO;
	inOutFormat->u.raw_audio = outputAudioFormat;

	fContext->bit_rate = (int)fInputFormat.u.encoded_audio.bit_rate;
	fContext->frame_size = (int)fInputFormat.u.encoded_audio.frame_size;
	fContext->sample_rate
		= (int)fInputFormat.u.encoded_audio.output.frame_rate;
	fContext->channels = outputAudioFormat.channel_count;
	fContext->block_align = fBlockAlign;
	fContext->extradata = (uint8_t*)fExtraData;
	fContext->extradata_size = fExtraDataSize;

	// TODO: This probably needs to go away, there is some misconception
	// about extra data / info buffer and meta data. See
	// Reader::GetStreamInfo(). The AVFormatReader puts extradata and
	// extradata_size into media_format::MetaData(), but used to ignore
	// the infoBuffer passed to GetStreamInfo(). I think this may be why
	// the code below was added.
	if (fInputFormat.MetaDataSize() > 0) {
		fContext->extradata = (uint8_t*)fInputFormat.MetaData();
		fContext->extradata_size = fInputFormat.MetaDataSize();
	}

	TRACE("  bit_rate %d, sample_rate %d, channels %d, block_align %d, "
		"extradata_size %d\n", fContext->bit_rate, fContext->sample_rate,
		fContext->channels, fContext->block_align, fContext->extradata_size);

	// close any previous instance
	if (fCodecInitDone) {
		fCodecInitDone = false;
		avcodec_close(fContext);
	}

	// open new
	int result = avcodec_open2(fContext, fCodec, NULL);
	fCodecInitDone = (result >= 0);

	fStartTime = 0;
	fOutputFrameSize = sampleSize * outputAudioFormat.channel_count;
	fOutputFrameCount = outputAudioFormat.buffer_size / fOutputFrameSize;
	fOutputFrameRate = outputAudioFormat.frame_rate;

	TRACE("  bit_rate = %d, sample_rate = %d, channels = %d, init = %d, "
		"output frame size: %d, count: %ld, rate: %.2f\n",
		fContext->bit_rate, fContext->sample_rate, fContext->channels,
		result, fOutputFrameSize, fOutputFrameCount, fOutputFrameRate);

	fChunkBuffer = NULL;
	fChunkBufferOffset = 0;
	fChunkBufferSize = 0;
	fAudioDecodeError = false;
	fOutputBufferOffset = 0;
	fOutputBufferSize = 0;

	av_init_packet(&fTempPacket);

	inOutFormat->require_flags = 0;
	inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS;

	if (!fCodecInitDone) {
		TRACE("avcodec_open() failed!\n");
		return B_ERROR;
	}

	return B_OK;
}
Ejemplo n.º 3
0
HRESULT decklink_input_callback::VideoInputFrameArrived(
    IDeckLinkVideoInputFrame *videoFrame, IDeckLinkAudioInputPacket *audioFrame)
{
    void *frameBytes;
    void *audioFrameBytes;
    BMDTimeValue frameTime;
    BMDTimeValue frameDuration;
    int64_t wallclock = 0;

    ctx->frameCount++;
    if (ctx->audio_pts_source == PTS_SRC_WALLCLOCK || ctx->video_pts_source == PTS_SRC_WALLCLOCK)
        wallclock = av_gettime_relative();

    // Handle Video Frame
    if (videoFrame) {
        AVPacket pkt;
        av_init_packet(&pkt);
        if (ctx->frameCount % 25 == 0) {
            unsigned long long qsize = avpacket_queue_size(&ctx->queue);
            av_log(avctx, AV_LOG_DEBUG,
                    "Frame received (#%lu) - Valid (%liB) - QSize %fMB\n",
                    ctx->frameCount,
                    videoFrame->GetRowBytes() * videoFrame->GetHeight(),
                    (double)qsize / 1024 / 1024);
        }

        videoFrame->GetBytes(&frameBytes);
        videoFrame->GetStreamTime(&frameTime, &frameDuration,
                                  ctx->video_st->time_base.den);

        if (videoFrame->GetFlags() & bmdFrameHasNoInputSource) {
            if (videoFrame->GetPixelFormat() == bmdFormat8BitYUV) {
            unsigned bars[8] = {
                0xEA80EA80, 0xD292D210, 0xA910A9A5, 0x90229035,
                0x6ADD6ACA, 0x51EF515A, 0x286D28EF, 0x10801080 };
            int width  = videoFrame->GetWidth();
            int height = videoFrame->GetHeight();
            unsigned *p = (unsigned *)frameBytes;

            for (int y = 0; y < height; y++) {
                for (int x = 0; x < width; x += 2)
                    *p++ = bars[(x * 8) / width];
            }
            }

            if (!no_video) {
                av_log(avctx, AV_LOG_WARNING, "Frame received (#%lu) - No input signal detected "
                        "- Frames dropped %u\n", ctx->frameCount, ++ctx->dropped);
            }
            no_video = 1;
        } else {
            if (no_video) {
                av_log(avctx, AV_LOG_WARNING, "Frame received (#%lu) - Input returned "
                        "- Frames dropped %u\n", ctx->frameCount, ++ctx->dropped);
            }
            no_video = 0;
        }

        pkt.pts = get_pkt_pts(videoFrame, audioFrame, wallclock, ctx->video_pts_source, ctx->video_st->time_base, &initial_video_pts);
        pkt.dts = pkt.pts;

        pkt.duration = frameDuration;
        //To be made sure it still applies
        pkt.flags       |= AV_PKT_FLAG_KEY;
        pkt.stream_index = ctx->video_st->index;
        pkt.data         = (uint8_t *)frameBytes;
        pkt.size         = videoFrame->GetRowBytes() *
                           videoFrame->GetHeight();
        //fprintf(stderr,"Video Frame size %d ts %d\n", pkt.size, pkt.pts);

#if CONFIG_LIBZVBI
        if (!no_video && ctx->teletext_lines && videoFrame->GetPixelFormat() == bmdFormat8BitYUV && videoFrame->GetWidth() == 720) {
            IDeckLinkVideoFrameAncillary *vanc;
            AVPacket txt_pkt;
            uint8_t txt_buf0[1611]; // max 35 * 46 bytes decoded teletext lines + 1 byte data_identifier
            uint8_t *txt_buf = txt_buf0;

            if (videoFrame->GetAncillaryData(&vanc) == S_OK) {
                int i;
                int64_t line_mask = 1;
                txt_buf[0] = 0x10;    // data_identifier - EBU_data
                txt_buf++;
                for (i = 6; i < 336; i++, line_mask <<= 1) {
                    uint8_t *buf;
                    if ((ctx->teletext_lines & line_mask) && vanc->GetBufferForVerticalBlankingLine(i, (void**)&buf) == S_OK) {
                        if (teletext_data_unit_from_vbi_data(i, buf, txt_buf) >= 0)
                            txt_buf += 46;
                    }
                    if (i == 22)
                        i = 317;
                }
                vanc->Release();
                if (txt_buf - txt_buf0 > 1) {
                    int stuffing_units = (4 - ((45 + txt_buf - txt_buf0) / 46) % 4) % 4;
                    while (stuffing_units--) {
                        memset(txt_buf, 0xff, 46);
                        txt_buf[1] = 0x2c; // data_unit_length
                        txt_buf += 46;
                    }
                    av_init_packet(&txt_pkt);
                    txt_pkt.pts = pkt.pts;
                    txt_pkt.dts = pkt.dts;
                    txt_pkt.stream_index = ctx->teletext_st->index;
                    txt_pkt.data = txt_buf0;
                    txt_pkt.size = txt_buf - txt_buf0;
                    if (avpacket_queue_put(&ctx->queue, &txt_pkt) < 0) {
                        ++ctx->dropped;
                    }
                }
            }
        }
#endif

        if (avpacket_queue_put(&ctx->queue, &pkt) < 0) {
            ++ctx->dropped;
        }
    }

    // Handle Audio Frame
    if (audioFrame) {
        AVPacket pkt;
        BMDTimeValue audio_pts;
        av_init_packet(&pkt);

        //hack among hacks
        pkt.size = audioFrame->GetSampleFrameCount() * ctx->audio_st->codecpar->channels * (16 / 8);
        audioFrame->GetBytes(&audioFrameBytes);
        audioFrame->GetPacketTime(&audio_pts, ctx->audio_st->time_base.den);
        pkt.pts = get_pkt_pts(videoFrame, audioFrame, wallclock, ctx->audio_pts_source, ctx->audio_st->time_base, &initial_audio_pts);
        pkt.dts = pkt.pts;

        //fprintf(stderr,"Audio Frame size %d ts %d\n", pkt.size, pkt.pts);
        pkt.flags       |= AV_PKT_FLAG_KEY;
        pkt.stream_index = ctx->audio_st->index;
        pkt.data         = (uint8_t *)audioFrameBytes;

        if (avpacket_queue_put(&ctx->queue, &pkt) < 0) {
            ++ctx->dropped;
        }
    }

    return S_OK;
}
Ejemplo n.º 4
0
static int decode_audio(sh_audio_t *sh_audio,unsigned char *buf,int minlen,int maxlen)
{
    unsigned char *start=NULL;
    int y,len=-1, got_frame;
    AVFrame *frame = avcodec_alloc_frame();

    if (!frame)
        return AVERROR(ENOMEM);

    while(len<minlen) {
        AVPacket pkt;
        int len2=maxlen;
        double pts;
        int x=ds_get_packet_pts(sh_audio->ds,&start, &pts);
        if(x<=0) {
            start = NULL;
            x = 0;
            ds_parse(sh_audio->ds, &start, &x, MP_NOPTS_VALUE, 0);
            if (x <= 0)
                break; // error
        } else {
            int in_size = x;
            int consumed = ds_parse(sh_audio->ds, &start, &x, pts, 0);
            sh_audio->ds->buffer_pos -= in_size - consumed;
        }

        av_init_packet(&pkt);
        pkt.data = start;
        pkt.size = x;
        if (pts != MP_NOPTS_VALUE) {
            sh_audio->pts = pts;
            sh_audio->pts_bytes = 0;
        }
        y=avcodec_decode_audio4(sh_audio->context, frame, &got_frame, &pkt);
//printf("return:%d samples_out:%d bitstream_in:%d sample_sum:%d\n", y, len2, x, len); fflush(stdout);
        // LATM may need many packets to find mux info
        if (y == AVERROR(EAGAIN))
            continue;
        if(y<0) {
            mp_msg(MSGT_DECAUDIO,MSGL_V,"lavc_audio: error\n");
            break;
        }
        if(!sh_audio->parser && y<x)
            sh_audio->ds->buffer_pos+=y-x;  // put back data (HACK!)
        if (!got_frame)
            continue;
        len2 = copy_samples(sh_audio->context, frame, buf, maxlen);
        if (len2 < 0)
            return len2;
        if(len2>0) {
            if (((AVCodecContext *)sh_audio->context)->channels >= 5) {
                int samplesize = av_get_bytes_per_sample(((AVCodecContext *)
                                 sh_audio->context)->sample_fmt);
                reorder_channel_nch(buf, AF_CHANNEL_LAYOUT_LAVC_DEFAULT,
                                    AF_CHANNEL_LAYOUT_MPLAYER_DEFAULT,
                                    ((AVCodecContext *)sh_audio->context)->channels,
                                    len2 / samplesize, samplesize);
            }
            //len=len2;break;
            if(len<0) len=len2;
            else len+=len2;
            buf+=len2;
            maxlen -= len2;
            sh_audio->pts_bytes += len2;
        }
        mp_dbg(MSGT_DECAUDIO,MSGL_DBG2,"Decoded %d -> %d  \n",y,len2);

        if (setup_format(sh_audio, sh_audio->context))
            break;
    }

    av_free(frame);
    return len;
}
Ejemplo n.º 5
0
double VideoStream::ActuallyEncodeFrame( const uint8_t *buffer, int buffer_size, bool add_timestamp, unsigned int timestamp )
{
#ifdef HAVE_LIBSWSCALE
	static struct SwsContext *img_convert_ctx = 0;
#endif // HAVE_LIBSWSCALE

	AVCodecContext *c = ost->codec;

	if ( c->pix_fmt != pf )
	{
		memcpy( tmp_opicture->data[0], buffer, buffer_size );
#ifdef HAVE_LIBSWSCALE
		if ( !img_convert_ctx )
		{
			img_convert_ctx = sws_getCachedContext( NULL, c->width, c->height, pf, c->width, c->height, c->pix_fmt, SWS_BICUBIC, NULL, NULL, NULL );
			if ( !img_convert_ctx )
				Panic( "Unable to initialise image scaling context" );
		}
		sws_scale( img_convert_ctx, tmp_opicture->data, tmp_opicture->linesize, 0, c->height, opicture->data, opicture->linesize );
#else // HAVE_LIBSWSCALE
		Fatal( "swscale is required for MPEG mode" );
#endif // HAVE_LIBSWSCALE
	}
	else
	{
		memcpy( opicture->data[0], buffer, buffer_size );
	}
	AVFrame *opicture_ptr = opicture;
	
	AVPacket *pkt = packet_buffers[packet_index];
	av_init_packet( pkt );
    int got_packet = 0;
	if ( of->flags & AVFMT_RAWPICTURE )
	{
#if LIBAVCODEC_VERSION_CHECK(52, 30, 2, 30, 2)
		pkt->flags |= AV_PKT_FLAG_KEY;
#else
		pkt->flags |= PKT_FLAG_KEY;
#endif
		pkt->stream_index = ost->index;
		pkt->data = (uint8_t *)opicture_ptr;
		pkt->size = sizeof (AVPicture);
        got_packet = 1;
	}
	else
	{
		opicture_ptr->pts = c->frame_number;
		opicture_ptr->quality = c->global_quality;

#if LIBAVFORMAT_VERSION_CHECK(54, 1, 0, 2, 100)
		int ret = avcodec_encode_video2( c, pkt, opicture_ptr, &got_packet );
		if ( ret != 0 )
		{
			Fatal( "avcodec_encode_video2 failed with errorcode %d \"%s\"", ret, av_err2str( ret ) );
		}
#else
		int out_size = avcodec_encode_video( c, video_outbuf, video_outbuf_size, opicture_ptr );
		got_packet = out_size > 0 ? 1 : 0;
		pkt->data = got_packet ? video_outbuf : NULL;
		pkt->size = got_packet ? out_size : 0;
#endif
		if ( got_packet )
		{
			if ( c->coded_frame->key_frame )
			{
#if LIBAVCODEC_VERSION_CHECK(52, 30, 2, 30, 2)
				pkt->flags |= AV_PKT_FLAG_KEY;
#else
				pkt->flags |= PKT_FLAG_KEY;
#endif
			}

			if ( pkt->pts != (int64_t)AV_NOPTS_VALUE )
			{
				pkt->pts = av_rescale_q( pkt->pts, c->time_base, ost->time_base );
			}
			if ( pkt->dts != (int64_t)AV_NOPTS_VALUE )
			{
				pkt->dts = av_rescale_q( pkt->dts, c->time_base, ost->time_base );
			}
			pkt->duration = av_rescale_q( pkt->duration, c->time_base, ost->time_base );
			pkt->stream_index = ost->index;
		}
	}
    
	return ( opicture_ptr->pts);
}
Ejemplo n.º 6
0
/*
 * Audio decoding.
 */
static void audio_decode_example(const char *outfilename, const char *filename)
{
    AVCodec *codec;
    AVCodecContext *c= NULL;
    int len;
    FILE *f, *outfile;
    uint8_t inbuf[AUDIO_INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
    AVPacket avpkt;
    AVFrame *decoded_frame = NULL;

    av_init_packet(&avpkt);

    printf("Audio decoding\n");

    /* find the mpeg audio decoder */
    codec = avcodec_find_decoder(CODEC_ID_MP2);
    if (!codec) {
        fprintf(stderr, "codec not found\n");
        exit(1);
    }

    c = avcodec_alloc_context3(codec);

    /* open it */
    if (avcodec_open2(c, codec, NULL) < 0) {
        fprintf(stderr, "could not open codec\n");
        exit(1);
    }

    f = fopen(filename, "rb");
    if (!f) {
        fprintf(stderr, "could not open %s\n", filename);
        exit(1);
    }
    outfile = fopen(outfilename, "wb");
    if (!outfile) {
        av_free(c);
        exit(1);
    }

    /* decode until eof */
    avpkt.data = inbuf;
    avpkt.size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f);

    while (avpkt.size > 0) {
        int got_frame = 0;

        if (!decoded_frame) {
            if (!(decoded_frame = avcodec_alloc_frame())) {
                fprintf(stderr, "out of memory\n");
                exit(1);
            }
        } else
            avcodec_get_frame_defaults(decoded_frame);

        len = avcodec_decode_audio4(c, decoded_frame, &got_frame, &avpkt);
        if (len < 0) {
            fprintf(stderr, "Error while decoding\n");
            exit(1);
        }
        if (got_frame) {
            /* if a frame has been decoded, output it */
            int data_size = av_samples_get_buffer_size(NULL, c->channels,
                                                       decoded_frame->nb_samples,
                                                       c->sample_fmt, 1);
            fwrite(decoded_frame->data[0], 1, data_size, outfile);
        }
        avpkt.size -= len;
        avpkt.data += len;
        if (avpkt.size < AUDIO_REFILL_THRESH) {
            /* Refill the input buffer, to avoid trying to decode
             * incomplete frames. Instead of this, one could also use
             * a parser, or use a proper container format through
             * libavformat. */
            memmove(inbuf, avpkt.data, avpkt.size);
            avpkt.data = inbuf;
            len = fread(avpkt.data + avpkt.size, 1,
                        AUDIO_INBUF_SIZE - avpkt.size, f);
            if (len > 0)
                avpkt.size += len;
        }
    }

    fclose(outfile);
    fclose(f);

    avcodec_close(c);
    av_free(c);
    av_free(decoded_frame);
}
Ejemplo n.º 7
0
Archivo: sd_lavc.c Proyecto: 0x0all/mpv
static void decode(struct sd *sd, struct demux_packet *packet)
{
    struct MPOpts *opts = sd->opts;
    struct sd_lavc_priv *priv = sd->priv;
    AVCodecContext *ctx = priv->avctx;
    double pts = packet->pts;
    double duration = packet->duration;
    AVSubtitle sub;
    AVPacket pkt;

    // libavformat sets duration==0, even if the duration is unknown.
    // Assume there are no bitmap subs that actually use duration==0 for
    // hidden subtitle events.
    if (duration == 0)
        duration = -1;

    if (pts == MP_NOPTS_VALUE)
        MP_WARN(sd, "Subtitle with unknown start time.\n");

    av_init_packet(&pkt);
    pkt.data = packet->buffer;
    pkt.size = packet->len;
    int got_sub;
    int res = avcodec_decode_subtitle2(ctx, &sub, &got_sub, &pkt);
    if (res < 0 || !got_sub)
        return;

    if (pts != MP_NOPTS_VALUE) {
        if (sub.end_display_time > sub.start_display_time)
            duration = (sub.end_display_time - sub.start_display_time) / 1000.0;
        pts += sub.start_display_time / 1000.0;
    }
    double endpts = MP_NOPTS_VALUE;
    if (pts != MP_NOPTS_VALUE && duration >= 0)
        endpts = pts + duration;

    // set end time of previous sub
    if (priv->subs[0].endpts == MP_NOPTS_VALUE || priv->subs[0].endpts > pts)
        priv->subs[0].endpts = pts;

    alloc_sub(priv);
    struct sub *current = &priv->subs[0];

    current->valid = true;
    current->pts = pts;
    current->endpts = endpts;
    current->avsub = sub;

    MP_TARRAY_GROW(priv, current->inbitmaps, sub.num_rects);
    MP_TARRAY_GROW(priv, current->imgs, sub.num_rects);

    for (int i = 0; i < sub.num_rects; i++) {
        struct AVSubtitleRect *r = sub.rects[i];
        struct sub_bitmap *b = &current->inbitmaps[current->count];
        struct osd_bmp_indexed *img = &current->imgs[current->count];
        if (r->type != SUBTITLE_BITMAP) {
            MP_ERR(sd, "unsupported subtitle type from libavcodec\n");
            continue;
        }
        if (!(r->flags & AV_SUBTITLE_FLAG_FORCED) && opts->forced_subs_only)
            continue;
        if (r->w <= 0 || r->h <= 0)
            continue;
        img->bitmap = r->pict.data[0];
        assert(r->nb_colors > 0);
        assert(r->nb_colors * 4 <= sizeof(img->palette));
        memcpy(img->palette, r->pict.data[1], r->nb_colors * 4);
        b->bitmap = img;
        b->stride = r->pict.linesize[0];
        b->w = r->w;
        b->h = r->h;
        b->x = r->x;
        b->y = r->y;
        current->count++;
    }
}
Ejemplo n.º 8
0
int main(int argc, char *argv[]) {

	SDL_Event event;
	VideoState *is;
	int i;
	puts("start");
	global_mutex_lock = SDL_CreateMutex();

	is = av_mallocz(sizeof(VideoState));
	if (argc < 2){
		fprintf(stderr, "Usage: test <file>\n");
		exit(1);
	}
		
	av_register_all();		// Register all formats and codecs
	puts("avregister");
	//if (av_register_protocol(&e2URLProtocol) < 0){
	//	printf("Error - URL protocol \n");
	//	exit(-1)
	//}
	if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)){
		fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
		exit(1);
	}
	for(i=0; i<MAX_CHANNELS;i++){
		// Make a screen to put our video
		#ifndef __DARWIN__
		screen[i] = SDL_SetVideoMode(640, 480, 0, 0);
		#else
		screen[i] = SDL_SetVideoMode(640, 480, 24, 0);
		#endif
		if (!screen[i]){
			fprintf(stderr, "SDL: could not set video mode - exiting\n");
			exit(1);
		}
	}
	for(i=0; i<MAX_CHANNELS;i++){
		global_video_state[i] = av_mallocz(sizeof(VideoState));
		global_video_state[i]->videoIndex = i;
		puts("screen created");
		printf("i is: %d\n",i);
		av_strlcpy(global_video_state[i]->filename, argv[i+1], sizeof(global_video_state[i]->filename));
		puts("avstrlcpy");	
		global_video_state[i]->pictq_mutex = SDL_CreateMutex();
		global_video_state[i]->pictq_cond = SDL_CreateCond();
		schedule_refresh(global_video_state[i], 40);
		global_video_state[i]->av_sync_type = DEFAULT_AV_SYNC_TYPE;
		global_video_state[i]->parse_tid = SDL_CreateThread(decode_thread, global_video_state[i]);
		puts("main var created");
		if (!global_video_state[i]->parse_tid) {
			av_free(global_video_state[i]);
			return -1;
		}
	}
	av_init_packet(&f_pkt);
	puts("av_init_packet");
	f_pkt.data = (unsigned char*)"FLUSH";

	for (;;) {
		double inc , pos;
		SDL_WaitEvent(&event);
		switch (event.type) {
			case SDL_KEYDOWN:
				switch (event.key.keysym.sym) {
					case SDLK_LEFT:
						inc = -10.0;
						goto do_seek;
					case SDLK_RIGHT:
						inc = 10.0;
						goto do_seek;
					case SDLK_UP:
						inc = 60.0;
						goto do_seek;
					case SDLK_DOWN:
						inc = -60.0;
						goto do_seek;
					do_seek:
						SDL_LockMutex(global_mutex_lock);
						if (global_video_state[global_videoIndex]){
							pos = get_master_clock(global_video_state[global_videoIndex]);
							pos += inc;
							stream_seek(global_video_state[global_videoIndex],(int64_t)(pos *AV_TIME_BASE),inc);
						}
						SDL_UnlockMutex(global_mutex_lock);
						break;
					case SDLK_b:
						global_video_state[global_videoIndex]->color_req = 'b';
						break;
					case SDLK_r:
						global_video_state[global_videoIndex]->color_req = 'r';
						break;
					case SDLK_g:
						global_video_state[global_videoIndex]->color_req = 'g';
						break;
					case SDLK_w:
						global_video_state[global_videoIndex]->color_req = 'w';
						break;
					case SDLK_n:
						global_video_state[global_videoIndex]->color_req = 'n';
						break;
					case SDLK_1:
						change_channel(1);
						break;
					case SDLK_2:
						change_channel(2);
						break;
					case SDLK_3:
						change_channel(3);
						break;
					case SDLK_4:
						change_vidchannel(1);
						break;
					case SDLK_5:
						change_vidchannel(2);
						break;
					case SDLK_6:
						change_vidchannel(3);
						break;
					case SDLK_7:
						change_audchannel(1);
						break;
					case SDLK_8:
						change_audchannel(2);
						break;
					case SDLK_9:
						change_audchannel(3);
						break;
				default:
					break;
				}
				break;
			case FF_QUIT_EVENT:
				case SDL_QUIT:
					for(i=0; i<MAX_CHANNELS; i++){
						global_video_state[i]->quit = 1;
						SDL_CondSignal(global_video_state[i]->audioq.cond);
						SDL_CondSignal(global_video_state[i]->videoq.cond);
					}
					SDL_Quit();
					exit(0);
					break;
				case FF_ALLOC_EVENT:
					alloc_picture(event.user.data1);
					break;
				case FF_REFRESH_EVENT:
					video_refresh_timer(event.user.data1);
					break;
			default:
				break;
		}
	}
	return 0;

}
Ejemplo n.º 9
0
int RemoteCameraRtsp::Capture( Image &image )
{
	AVPacket packet;
	uint8_t* directbuffer;
	int frameComplete = false;
	
	/* Request a writeable buffer of the target image */
	directbuffer = image.WriteBuffer(width, height, colours, subpixelorder);
	if(directbuffer == NULL) {
		Error("Failed requesting writeable buffer for the captured image.");
		return (-1);
	}
	
    while ( true )
    {
        buffer.clear();
        if ( !rtspThread->isRunning() )
            return (-1);

        if ( rtspThread->getFrame( buffer ) )
        {
            Debug( 3, "Read frame %d bytes", buffer.size() );
            Debug( 4, "Address %p", buffer.head() );
            Hexdump( 4, buffer.head(), 16 );

            if ( !buffer.size() )
                return( -1 );

            if(mCodecContext->codec_id == AV_CODEC_ID_H264)
            {
                // SPS and PPS frames should be saved and appended to IDR frames
                int nalType = (buffer.head()[3] & 0x1f);
                
                // SPS
                if(nalType == 7)
                {
                    lastSps = buffer;
                    continue;
                }
                // PPS
                else if(nalType == 8)
                {
                    lastPps = buffer;
                    continue;
                }
                // IDR
                else if(nalType == 5)
                {
                    buffer += lastSps;
                    buffer += lastPps;
                }
            }

            av_init_packet( &packet );
            
	    while ( !frameComplete && buffer.size() > 0 )
	    {
		packet.data = buffer.head();
		packet.size = buffer.size();
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(52, 25, 0)
		int len = avcodec_decode_video2( mCodecContext, mRawFrame, &frameComplete, &packet );
#else
		int len = avcodec_decode_video( mCodecContext, mRawFrame, &frameComplete, packet.data, packet.size );
#endif
		if ( len < 0 )
		{
			Error( "Error while decoding frame %d", frameCount );
			Hexdump( Logger::ERROR, buffer.head(), buffer.size()>256?256:buffer.size() );
			buffer.clear();
			continue;
		}
		Debug( 2, "Frame: %d - %d/%d", frameCount, len, buffer.size() );
		//if ( buffer.size() < 400 )
		   //Hexdump( 0, buffer.head(), buffer.size() );
		   
		buffer -= len;

	    }
            if ( frameComplete ) {
	       
		Debug( 3, "Got frame %d", frameCount );
			    
		avpicture_fill( (AVPicture *)mFrame, directbuffer, imagePixFormat, width, height);
			
#if HAVE_LIBSWSCALE
		if(mConvertContext == NULL) {
			if(config.cpu_extensions && sseversion >= 20) {
				mConvertContext = sws_getContext( mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, imagePixFormat, SWS_BICUBIC | SWS_CPU_CAPS_SSE2, NULL, NULL, NULL );
			} else {
				mConvertContext = sws_getContext( mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, imagePixFormat, SWS_BICUBIC, NULL, NULL, NULL );
			}
			if(mConvertContext == NULL)
				Fatal( "Unable to create conversion context");
		}
	
		if ( sws_scale( mConvertContext, mRawFrame->data, mRawFrame->linesize, 0, mCodecContext->height, mFrame->data, mFrame->linesize ) < 0 )
			Fatal( "Unable to convert raw format %u to target format %u at frame %d", mCodecContext->pix_fmt, imagePixFormat, frameCount );
#else // HAVE_LIBSWSCALE
		Fatal( "You must compile ffmpeg with the --enable-swscale option to use RTSP cameras" );
#endif // HAVE_LIBSWSCALE
	
		frameCount++;

	     } /* frame complete */
	     
	     av_free_packet( &packet );
	} /* getFrame() */
 
	if(frameComplete)
		return (0);
	
    }
    return (0) ;
}
Ejemplo n.º 10
0
  int ExternalOutput::writeVideoData(char* buf, int len){
    if (in!=NULL){
      rtpheader *head = (rtpheader*) buf;
      if (head->payloadtype == RED_90000_PT) {
        int totalLength = 12;

        if (head->extension) {
          totalLength += ntohs(head->extensionlength)*4 + 4; // RTP Extension header
        }
        int rtpHeaderLength = totalLength;
        redheader *redhead = (redheader*) (buf + totalLength);

        //redhead->payloadtype = remoteSdp_.inOutPTMap[redhead->payloadtype];
        if (redhead->payloadtype == VP8_90000_PT) {
          while (redhead->follow) {
            totalLength += redhead->getLength() + 4; // RED header
            redhead = (redheader*) (buf + totalLength);
          }
          // Parse RED packet to VP8 packet.
          // Copy RTP header
          memcpy(deliverMediaBuffer_, buf, rtpHeaderLength);
          // Copy payload data
          memcpy(deliverMediaBuffer_ + totalLength, buf + totalLength + 1, len - totalLength - 1);
          // Copy payload type
          rtpheader *mediahead = (rtpheader*) deliverMediaBuffer_;
          mediahead->payloadtype = redhead->payloadtype;
          buf = reinterpret_cast<char*>(deliverMediaBuffer_);
          len = len - 1 - totalLength + rtpHeaderLength;
        }
      }
      int estimatedFps=0;
      int ret = in->unpackageVideo(reinterpret_cast<unsigned char*>(buf), len,
          unpackagedBufferpart_, &gotUnpackagedFrame_, &estimatedFps);

      if (ret < 0)
        return 0;
      
      if (videoCodec_ == NULL) {
        if ((estimatedFps!=0)&&((estimatedFps < prevEstimatedFps_*(1-0.2))||(estimatedFps > prevEstimatedFps_*(1+0.2)))){
          prevEstimatedFps_ = estimatedFps;
        }
        if (warmupfpsCount_++ == 20){
          if (prevEstimatedFps_==0){
            warmupfpsCount_ = 0;
            return 0;
          }
          if (!this->initContext()){
            ELOG_ERROR("Context cannot be initialized properly, closing...");
            return -1;
          }
        }
        return 0;
      }

      unpackagedSize_ += ret;
      unpackagedBufferpart_ += ret;
      if (unpackagedSize_ > UNPACKAGE_BUFFER_SIZE){
        ELOG_ERROR("Unpackaged size bigget than buffer %d", unpackagedSize_);
      }
      if (gotUnpackagedFrame_ && videoCodec_!=NULL) {
        timeval time;
        gettimeofday(&time, NULL);
        unsigned long long millis = (time.tv_sec * 1000) + (time.tv_usec / 1000);
        if (initTime_ == 0) {
          initTime_ = millis;
        }
        if (millis < initTime_)
        {
          ELOG_WARN("initTime is smaller than currentTime, possible problems when recording ");
        }
        unpackagedBufferpart_ -= unpackagedSize_;

        AVPacket avpkt;
        av_init_packet(&avpkt);
        avpkt.data = unpackagedBufferpart_;
        avpkt.size = unpackagedSize_;
        avpkt.pts = millis - initTime_;
        avpkt.stream_index = 0;
        av_write_frame(context_, &avpkt);
        av_free_packet(&avpkt);
        gotUnpackagedFrame_ = 0;
        unpackagedSize_ = 0;
        unpackagedBufferpart_ = unpackagedBuffer_;

      }
    }
    return 0;
  }
int get_embedded_picture(State **ps, AVPacket *pkt) {
	printf("get_embedded_picture\n");
	int i = 0;
	int got_packet = 0;
	AVFrame *frame = NULL;
	
	State *state = *ps;

	if (!state || !state->pFormatCtx) {
		return FAILURE;
	}

    // read the format headers
    if (state->pFormatCtx->iformat->read_header(state->pFormatCtx) < 0) {
    	printf("Could not read the format header\n");
    	return FAILURE;
    }

    // find the first attached picture, if available
    for (i = 0; i < state->pFormatCtx->nb_streams; i++) {
        if (state->pFormatCtx->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC) {
        	printf("Found album art\n");
        	*pkt = state->pFormatCtx->streams[i]->attached_pic;
        	
        	// Is this a packet from the video stream?
        	if (pkt->stream_index == state->video_stream) {
        		int codec_id = state->video_st->codec->codec_id;
        		
        		// If the image isn't already in a supported format convert it to one
        		if (!is_supported_format(codec_id)) {
        			int got_frame = 0;
        			
                    av_init_packet(pkt);
        			
   			        frame = avcodec_alloc_frame();
        			    	
   			        if (!frame) {
   			        	break;
        			}
   			        
        			if (avcodec_decode_video2(state->video_st->codec, frame, &got_frame, pkt) <= 0) {
        				break;
        			}

        			// Did we get a video frame?
        			if (got_frame) {
        				AVPacket packet;
        	            av_init_packet(&packet);
        	            packet.data = NULL;
        	            packet.size = 0;
        				convert_image(state->video_st->codec, frame, &packet, &got_packet);
        				*pkt = packet;
        				break;
        			}
        		} else {
                	av_init_packet(pkt);
                	pkt->data = state->pFormatCtx->streams[i]->attached_pic.data;
                	pkt->size = state->pFormatCtx->streams[i]->attached_pic.size;
        			
        			got_packet = 1;
        			break;
        		}
        	}
        }
    }

	av_free(frame);

	if (got_packet) {
		return SUCCESS;
	} else {
		return FAILURE;
	}
}
Ejemplo n.º 12
0
static void
audio_process_audio(audio_decoder_t *ad, media_buf_t *mb)
{
  const audio_class_t *ac = ad->ad_ac;
  AVFrame *frame = ad->ad_frame;
  media_pipe_t *mp = ad->ad_mp;
  media_queue_t *mq = &mp->mp_audio;
  int r;
  int got_frame;
  AVPacket avpkt;
  int offset = 0;

  if(mb->mb_skip || mb->mb_stream != mq->mq_stream) 
    return;

  while(offset < mb->mb_size) {

    if(mb->mb_cw == NULL) {
      frame->sample_rate = mb->mb_rate;
      frame->format = AV_SAMPLE_FMT_S16;
      switch(mb->mb_channels) {
      case 1:
	frame->channel_layout = AV_CH_LAYOUT_MONO;
	frame->nb_samples = mb->mb_size / 2;
	break;
      case 2:
	frame->channel_layout = AV_CH_LAYOUT_STEREO;
	frame->nb_samples = mb->mb_size / 4;
	break;
      default:
	abort();
      }
      frame->data[0] = mb->mb_data;
      frame->linesize[0] = 0;
      r = mb->mb_size;
      got_frame = 1;

    } else {

      media_codec_t *mc = mb->mb_cw;

      AVCodecContext *ctx = mc->ctx;

      if(mc->codec_id != ad->ad_in_codec_id) {
	AVCodec *codec = avcodec_find_decoder(mc->codec_id);
	TRACE(TRACE_DEBUG, "audio", "Codec changed to %s",
	      codec ? codec->name : "???");
	ad->ad_in_codec_id = mc->codec_id;

	audio_cleanup_spdif_muxer(ad);
  
	if(ac->ac_check_passthru != NULL && codec != NULL &&
	   ac->ac_check_passthru(ad, mc->codec_id)) {

	  audio_setup_spdif_muxer(ad, codec, mq);
	}
      }

      av_init_packet(&avpkt);
      avpkt.data = mb->mb_data + offset;
      avpkt.size = mb->mb_size - offset;

      if(ad->ad_spdif_muxer != NULL) {
	av_write_frame(ad->ad_spdif_muxer, &avpkt);
	avio_flush(ad->ad_spdif_muxer->pb);
	ad->ad_pts = mb->mb_pts;
	ad->ad_epoch = mb->mb_epoch;
	return;
      }

      if(ctx == NULL) {

	AVCodec *codec = avcodec_find_decoder(mc->codec_id);
	assert(codec != NULL); // Checked in libav.c

	ctx = mc->ctx = avcodec_alloc_context3(codec);

	if(ad->ad_stereo_downmix)
	  ctx->request_channels = 2;

	if(avcodec_open2(mc->ctx, codec, NULL) < 0) {
	  av_freep(&mc->ctx);
	  return;
	}
      }

      r = avcodec_decode_audio4(ctx, frame, &got_frame, &avpkt);
      if(r < 0)
	return;

      if(frame->sample_rate == 0) {
	frame->sample_rate = ctx->sample_rate;

	if(frame->sample_rate == 0 && mb->mb_cw->fmt_ctx)
	  frame->sample_rate = mb->mb_cw->fmt_ctx->sample_rate;
    
	if(frame->sample_rate == 0)
	  return;
      }

      if(frame->channel_layout == 0) {
	switch(ctx->channels) {
	case 1:
	  frame->channel_layout = AV_CH_LAYOUT_MONO;
	  break;
	case 2:
	  frame->channel_layout = AV_CH_LAYOUT_STEREO;
	  break;
	default:
	  return;
	}
      }

      if(mp->mp_stats)
	mp_set_mq_meta(mq, ctx->codec, ctx);

    }

    if(offset == 0 && mb->mb_pts != AV_NOPTS_VALUE) {
        
      int od = 0, id = 0;
          
      if(ad->ad_avr != NULL) {
	od = avresample_available(ad->ad_avr) *
	  1000000LL / ad->ad_out_sample_rate;
	id = avresample_get_delay(ad->ad_avr) *
	  1000000LL / frame->sample_rate;
      }
      ad->ad_pts = mb->mb_pts - od - id;
      ad->ad_epoch = mb->mb_epoch;
      //        printf("od=%-20d id=%-20d PTS=%-20ld oPTS=%-20ld\n",
      // od, id, mb->mb_pts, pts);
        
      if(mb->mb_drive_clock)
	mp_set_current_time(mp, mb->mb_pts - ad->ad_delay,
			    mb->mb_epoch, mb->mb_delta);
    }

    offset += r;

    if(got_frame) {

      if(frame->sample_rate    != ad->ad_in_sample_rate ||
	 frame->format         != ad->ad_in_sample_format ||
	 frame->channel_layout != ad->ad_in_channel_layout) {
          
	ad->ad_in_sample_rate    = frame->sample_rate;
	ad->ad_in_sample_format  = frame->format;
	ad->ad_in_channel_layout = frame->channel_layout;

	ac->ac_reconfig(ad);

	if(ad->ad_avr == NULL)
	  ad->ad_avr = avresample_alloc_context();
	else
	  avresample_close(ad->ad_avr);
          
	av_opt_set_int(ad->ad_avr, "in_sample_fmt",
		       ad->ad_in_sample_format, 0);
	av_opt_set_int(ad->ad_avr, "in_sample_rate", 
		       ad->ad_in_sample_rate, 0);
	av_opt_set_int(ad->ad_avr, "in_channel_layout",
		       ad->ad_in_channel_layout, 0);

	av_opt_set_int(ad->ad_avr, "out_sample_fmt",
		       ad->ad_out_sample_format, 0);
	av_opt_set_int(ad->ad_avr, "out_sample_rate",
		       ad->ad_out_sample_rate, 0);
	av_opt_set_int(ad->ad_avr, "out_channel_layout",
		       ad->ad_out_channel_layout, 0);
          
	char buf1[128];
	char buf2[128];

	av_get_channel_layout_string(buf1, sizeof(buf1), 
				     -1, ad->ad_in_channel_layout);
	av_get_channel_layout_string(buf2, sizeof(buf2), 
				     -1, ad->ad_out_channel_layout);

	TRACE(TRACE_DEBUG, "Audio",
	      "Converting from [%s %dHz %s] to [%s %dHz %s]",
	      buf1, ad->ad_in_sample_rate,
	      av_get_sample_fmt_name(ad->ad_in_sample_format),
	      buf2, ad->ad_out_sample_rate,
	      av_get_sample_fmt_name(ad->ad_out_sample_format));

	if(avresample_open(ad->ad_avr)) {
	  TRACE(TRACE_ERROR, "AudioQueue", "Unable to open resampler");
	  avresample_free(&ad->ad_avr);
	}

	if(ac->ac_set_volume != NULL) {
	  prop_set(mp->mp_prop_ctrl, "canAdjustVolume", PROP_SET_INT, 1);
	  ac->ac_set_volume(ad, ad->ad_vol_scale);
	}
      }
      if(ad->ad_avr != NULL)
	avresample_convert(ad->ad_avr, NULL, 0, 0,
			   frame->data, frame->linesize[0],
			   frame->nb_samples);
    }
  }
}
Ejemplo n.º 13
0
static int avi_write_packet(AVFormatContext *s, AVPacket *pkt)
{
    AVIContext *avi = s->priv_data;
    AVIOContext *pb = s->pb;
    unsigned char tag[5];
    unsigned int flags=0;
    const int stream_index= pkt->stream_index;
    AVIStream *avist= s->streams[stream_index]->priv_data;
    AVCodecContext *enc= s->streams[stream_index]->codec;
    int size= pkt->size;

//    av_log(s, AV_LOG_DEBUG, "%"PRId64" %d %d\n", pkt->dts, avi->packet_count[stream_index], stream_index);
    while(enc->block_align==0 && pkt->dts != AV_NOPTS_VALUE && pkt->dts > avist->packet_count){
        AVPacket empty_packet;

        av_init_packet(&empty_packet);
        empty_packet.size= 0;
        empty_packet.data= NULL;
        empty_packet.stream_index= stream_index;
        avi_write_packet(s, &empty_packet);
//        av_log(s, AV_LOG_DEBUG, "dup %"PRId64" %d\n", pkt->dts, avi->packet_count[stream_index]);
    }
    avist->packet_count++;

    // Make sure to put an OpenDML chunk when the file size exceeds the limits
    if (pb->seekable &&
        (avio_tell(pb) - avi->riff_start > AVI_MAX_RIFF_SIZE)) {

        avi_write_ix(s);
        ff_end_tag(pb, avi->movi_list);

        if (avi->riff_id == 1)
            avi_write_idx1(s);

        ff_end_tag(pb, avi->riff_start);
        avi->movi_list = avi_start_new_riff(s, pb, "AVIX", "movi");
    }

    avi_stream2fourcc(tag, stream_index, enc->codec_type);
    if(pkt->flags&AV_PKT_FLAG_KEY)
        flags = 0x10;
    if (enc->codec_type == AVMEDIA_TYPE_AUDIO) {
       avist->audio_strm_length += size;
    }

    if (s->pb->seekable) {
        AVIIndex* idx = &avist->indexes;
        int cl = idx->entry / AVI_INDEX_CLUSTER_SIZE;
        int id = idx->entry % AVI_INDEX_CLUSTER_SIZE;
        if (idx->ents_allocated <= idx->entry) {
            idx->cluster = av_realloc(idx->cluster, (cl+1)*sizeof(void*));
            if (!idx->cluster)
                return -1;
            idx->cluster[cl] = av_malloc(AVI_INDEX_CLUSTER_SIZE*sizeof(AVIIentry));
            if (!idx->cluster[cl])
                return -1;
            idx->ents_allocated += AVI_INDEX_CLUSTER_SIZE;
        }

        idx->cluster[cl][id].flags = flags;
        idx->cluster[cl][id].pos = avio_tell(pb) - avi->movi_list;
        idx->cluster[cl][id].len = size;
        idx->entry++;
    }

    avio_write(pb, tag, 4);
    avio_wl32(pb, size);
    avio_write(pb, pkt->data, size);
    if (size & 1)
        avio_w8(pb, 0);

    avio_flush(pb);
    return 0;
}
Ejemplo n.º 14
0
int dc_video_encoder_encode(VideoOutputFile *video_output_file, VideoScaledData *video_scaled_data)
{
	VideoDataNode *video_data_node;
	int ret;

	AVCodecContext *video_codec_ctx = video_output_file->codec_ctx;

	//FIXME: deadlock when pressing 'q' with BigBuckBunny_640x360.m4v
	ret = dc_consumer_lock(&video_output_file->consumer, &video_scaled_data->circular_buf);
	if (ret < 0) {
		GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Video encoder got an end of buffer!\n"));
		return -2;
	}

	if (video_scaled_data->circular_buf.size > 1)
		dc_consumer_unlock_previous(&video_output_file->consumer, &video_scaled_data->circular_buf);

	video_data_node = (VideoDataNode*)dc_consumer_consume(&video_output_file->consumer, &video_scaled_data->circular_buf);

	/*
	 * Set PTS (method 1)
	 */
	if (!video_output_file->use_source_timing) {
		video_data_node->vframe->pts = video_codec_ctx->frame_number;
	}

	/* Encoding video */
	{
		int got_packet = 0;
		AVPacket pkt;
		av_init_packet(&pkt);
		pkt.data = video_output_file->vbuf;
		pkt.size = video_output_file->vbuf_size;
		pkt.pts = pkt.dts = video_data_node->vframe->pkt_dts = video_data_node->vframe->pkt_pts = video_data_node->vframe->pts;
		video_data_node->vframe->pict_type = 0;

#ifdef LIBAV_ENCODE_OLD
		if (!video_output_file->segment_started)
			video_data_node->vframe->pict_type = FF_I_TYPE;

		video_output_file->encoded_frame_size = avcodec_encode_video(video_codec_ctx, video_output_file->vbuf, video_output_file->vbuf_size, video_data_node->vframe);
		got_packet = video_output_file->encoded_frame_size>=0 ? 1 : 0;
#else
		//this is correct but unfortunately doesn't work with some versions of FFMPEG (output is just grey video ...)

		if (!video_output_file->segment_started)
			video_data_node->vframe->pict_type = AV_PICTURE_TYPE_I;

		video_output_file->encoded_frame_size = avcodec_encode_video2(video_codec_ctx, &pkt, video_data_node->vframe, &got_packet);
#endif

		//this is not true with libav !
#ifndef GPAC_USE_LIBAV
		if (video_output_file->encoded_frame_size >= 0)
			video_output_file->encoded_frame_size = pkt.size;
#else
		if (got_packet)
			video_output_file->encoded_frame_size = pkt.size;
#endif
		if (video_output_file->encoded_frame_size >= 0) {
			if (got_packet) {
				video_codec_ctx->coded_frame->pts = video_codec_ctx->coded_frame->pkt_pts = pkt.pts;
				video_codec_ctx->coded_frame->pkt_dts = pkt.dts;
				video_codec_ctx->coded_frame->key_frame = (pkt.flags & AV_PKT_FLAG_KEY) ? 1 : 0;
			}
		}
	}

	dc_consumer_advance(&video_output_file->consumer);

	if (video_scaled_data->circular_buf.size == 1)
		dc_consumer_unlock_previous(&video_output_file->consumer, &video_scaled_data->circular_buf);

	if (video_output_file->encoded_frame_size < 0) {
		GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Error occured while encoding video frame.\n"));
		return -1;
	}

	GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[DashCast] Video %s Frame TS "LLU" encoded at UTC "LLU" ms\n", video_output_file->rep_id, /*video_data_node->source_number, */video_data_node->vframe->pts, gf_net_get_utc() ));

	/* if zero size, it means the image was buffered */
//	if (out_size > 0) {
//		av_init_packet(&pkt);
//		pkt.data = NULL;
//		pkt.size = 0;
//
//		if (video_codec_ctx->coded_frame->pts != AV_NOPTS_VALUE) {
//			pkt.pts = av_rescale_q(video_codec_ctx->coded_frame->pts,
//					video_codec_ctx->time_base, video_stream->time_base);
//		}
//
//
//		if (video_codec_ctx->coded_frame->key_frame)
//			pkt.flags |= AV_PKT_FLAG_KEY;
//
//		pkt.stream_index = video_stream->index;
//		pkt.data = video_output_file->vbuf;
//		pkt.size = out_size;
//
//		// write the compressed frame in the media file
//		if (av_interleaved_write_frame(video_output_file->av_fmt_ctx, &pkt)
//				!= 0) {
//			GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Writing frame is not successful\n"));
//			return -1;
//		}
//
//		av_free_packet(&pkt);
//
//	}

	return video_output_file->encoded_frame_size;
}
Ejemplo n.º 15
0
CBaseDec::RetCode CFfmpegDec::Decoder(FILE *_in, int /*OutputFd*/, State* state, CAudioMetaData* _meta_data, time_t* time_played, unsigned int* secondsToSkip)
{
	in = _in;
	RetCode Status=OK;
	is_stream = fseek((FILE *)in, 0, SEEK_SET);

	if (!SetMetaData((FILE *)in, _meta_data, true)) {
		DeInit();
		Status=DATA_ERR;
		return Status;
	}

	AVCodecContext *c = avc->streams[best_stream]->codec;

	mutex.lock();
	int r = avcodec_open2(c, codec, NULL);
	mutex.unlock();
	if (r)
	{
		DeInit();
		Status=DATA_ERR;
		return Status;
	}

	SwrContext *swr = swr_alloc();
	if (!swr) {
		mutex.lock();
		avcodec_close(c);
		mutex.unlock();
		DeInit();
		Status=DATA_ERR;
		return Status;
	}

	mSampleRate = samplerate;
	mChannels = av_get_channel_layout_nb_channels(AV_CH_LAYOUT_STEREO);
	audioDecoder->PrepareClipPlay(mChannels, mSampleRate, 16, 1);

	AVFrame *frame = NULL;
	AVPacket rpacket;
	av_init_packet(&rpacket);
	c->channel_layout = c->channel_layout ? c->channel_layout : AV_CH_LAYOUT_STEREO;

	av_opt_set_int(swr, "in_channel_layout",	c->channel_layout,	0);
	//av_opt_set_int(swr, "out_channel_layout",	c->channel_layout,	0);
	av_opt_set_int(swr, "out_channel_layout",	AV_CH_LAYOUT_STEREO,	0);
	av_opt_set_int(swr, "in_sample_rate",		c->sample_rate,		0);
	av_opt_set_int(swr, "out_sample_rate",		c->sample_rate,		0);
	av_opt_set_sample_fmt(swr, "in_sample_fmt",	c->sample_fmt,          0);
	av_opt_set_sample_fmt(swr, "out_sample_fmt",   	AV_SAMPLE_FMT_S16,      0);

	if (( swr_init(swr)) < 0) {
		Status=DATA_ERR;
		return Status;
	}

	uint8_t *outbuf = NULL;
	int outsamples = 0;
	int outsamples_max = 0;

	int64_t pts = 0, start_pts = 0, next_skip_pts = 0;
	uint64_t skip = 0;
	int seek_flags = 0;

	do
	{
		int actSecsToSkip = *secondsToSkip;
		if (!is_stream && (actSecsToSkip || *state==FF || *state==REV) && avc->streams[best_stream]->time_base.num) {
			if (!next_skip_pts || pts >= next_skip_pts) {
				skip = avc->streams[best_stream]->time_base.den / avc->streams[best_stream]->time_base.num;
				if (actSecsToSkip)
					skip *= actSecsToSkip;
				if (*state == REV) {
					next_skip_pts = pts - skip;
					pts = next_skip_pts - skip/4;
					seek_flags = AVSEEK_FLAG_BACKWARD;
					if (pts < start_pts) {
						pts = start_pts;
						*state = PAUSE; 
					}
				} else {
					pts += skip;
					next_skip_pts = pts + skip/4;
					seek_flags = 0;
				}
				av_seek_frame(avc, best_stream, pts, seek_flags);
				// if a custom value was set we only jump once
				if (actSecsToSkip != 0) {
					*state=PLAY;
					*secondsToSkip = 0;
				}
			}
		}

		while(*state==PAUSE && !is_stream)
			usleep(10000);

		if (av_read_frame(avc, &rpacket)) {
			Status=DATA_ERR;
			break;
		}

		if (rpacket.stream_index != best_stream) {
			av_packet_unref(&rpacket);
			continue;
		}

		AVPacket packet = rpacket;
		while (packet.size > 0) {
			int got_frame = 0;
			if (!frame) {
				if (!(frame = av_frame_alloc())) {
					Status=DATA_ERR;
					break;
				}
			} else
				av_frame_unref(frame);

			int len = avcodec_decode_audio4(c, frame, &got_frame, &packet);
			if (len < 0) {
				// skip frame
				packet.size = 0;
				avcodec_flush_buffers(c);
				mutex.lock();
				avcodec_close(c);
				avcodec_open2(c, codec, NULL);
				mutex.unlock();
				continue;
			}
			if (got_frame && *state!=PAUSE) {
				int out_samples;
				outsamples = av_rescale_rnd(swr_get_delay(swr, c->sample_rate) + frame->nb_samples,
					c->sample_rate, c->sample_rate, AV_ROUND_UP);
				if (outsamples > outsamples_max) {
					av_free(outbuf);
					if (av_samples_alloc(&outbuf, &out_samples, mChannels, //c->channels,
								frame->nb_samples, AV_SAMPLE_FMT_S16, 1) < 0) {
						Status=WRITE_ERR;
						packet.size = 0;
						break;
					}
					outsamples_max = outsamples;
				}
				outsamples = swr_convert(swr, &outbuf, outsamples,
							(const uint8_t **) &frame->data[0], frame->nb_samples);
				int outbuf_size = av_samples_get_buffer_size(&out_samples, mChannels, //c->channels,
									  outsamples, AV_SAMPLE_FMT_S16, 1);

				if(audioDecoder->WriteClip((unsigned char*) outbuf, outbuf_size) != outbuf_size)
				{
					fprintf(stderr,"%s: PCM write error (%s).\n", ProgName, strerror(errno));
					Status=WRITE_ERR;
				}
				pts = av_frame_get_best_effort_timestamp(frame);
				if (!start_pts)
					start_pts = pts;
			}
			packet.size -= len;
			packet.data += len;
		}
		if (time_played && avc->streams[best_stream]->time_base.den)
			*time_played = (pts - start_pts) * avc->streams[best_stream]->time_base.num / avc->streams[best_stream]->time_base.den;
		av_packet_unref(&rpacket);
	} while (*state!=STOP_REQ && Status==OK);

	audioDecoder->StopClip();
	meta_data_valid = false;

	swr_free(&swr);
	av_free(outbuf);
	av_packet_unref(&rpacket);
	av_frame_free(&frame);
	avcodec_close(c);
	//av_free(avcc);

	DeInit();
	if (_meta_data->cover_temporary && !_meta_data->cover.empty()) {
		_meta_data->cover_temporary = false;
		unlink(_meta_data->cover.c_str());
	}
	return Status;
}
Ejemplo n.º 16
0
AVFrame *FFmpeg_Input::get_frame( int stream_id ) {
  Debug(1, "Getting frame from stream %d", stream_id );

  int frameComplete = false;
  AVPacket packet;
  av_init_packet( &packet );
  AVFrame *frame = zm_av_frame_alloc();
  char errbuf[AV_ERROR_MAX_STRING_SIZE];

  while ( !frameComplete ) {
    int ret = av_read_frame( input_format_context, &packet );
    if ( ret < 0 ) {
      av_strerror(ret, errbuf, AV_ERROR_MAX_STRING_SIZE);
      if (
          // Check if EOF.
          (ret == AVERROR_EOF || (input_format_context->pb && input_format_context->pb->eof_reached)) ||
          // Check for Connection failure.
          (ret == -110)
         ) {
        Info( "av_read_frame returned %s.", errbuf );
        return NULL;
      }
      Error( "Unable to read packet from stream %d: error %d \"%s\".", packet.stream_index, ret, errbuf );
      return NULL;
    }

    if ( (stream_id < 0 ) || ( packet.stream_index == stream_id ) ) {
      Debug(3,"Packet is for our stream (%d)", packet.stream_index );

      AVCodecContext *context = streams[packet.stream_index].context;

#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
    ret = avcodec_send_packet( context, &packet );
    if ( ret < 0 ) {
      av_strerror( ret, errbuf, AV_ERROR_MAX_STRING_SIZE );
      Error( "Unable to send packet at frame %d: %s, continuing", streams[packet.stream_index].frame_count, errbuf );
      zm_av_packet_unref( &packet );
      continue;
    } else {
      Debug(1, "Success getting a packet");
    }

#if HAVE_AVUTIL_HWCONTEXT_H
    if ( hwaccel ) {
      ret = avcodec_receive_frame( context, hwFrame );
      if ( ret < 0 ) {
        av_strerror( ret, errbuf, AV_ERROR_MAX_STRING_SIZE );
        Error( "Unable to receive frame %d: %s, continuing", streams[packet.stream_index].frame_count, errbuf );
        zm_av_packet_unref( &packet );
        continue;
      }
      ret = av_hwframe_transfer_data(frame, hwFrame, 0);
      if (ret < 0) {
        av_strerror( ret, errbuf, AV_ERROR_MAX_STRING_SIZE );
        Error( "Unable to transfer frame at frame %d: %s, continuing", streams[packet.stream_index].frame_count, errbuf );
        zm_av_packet_unref( &packet );
        continue;
      }
    } else {
#endif
      Debug(1,"Getting a frame?");
      ret = avcodec_receive_frame( context, frame );
      if ( ret < 0 ) {
        av_strerror( ret, errbuf, AV_ERROR_MAX_STRING_SIZE );
        Error( "Unable to send packet at frame %d: %s, continuing", streams[packet.stream_index].frame_count, errbuf );
        zm_av_packet_unref( &packet );
        continue;
      }

#if HAVE_AVUTIL_HWCONTEXT_H
    }
#endif

    frameComplete = 1;
# else
    ret = zm_avcodec_decode_video(context, frame, &frameComplete, &packet);
    if ( ret < 0 ) {
      av_strerror(ret, errbuf, AV_ERROR_MAX_STRING_SIZE);
      Error( "Unable to decode frame at frame %d: %s, continuing", streams[packet.stream_index].frame_count, errbuf );
      zm_av_packet_unref( &packet );
      continue;
    }
#endif
  } // end if it's the right stream

    zm_av_packet_unref( &packet );

  } // end while ! frameComplete
  return frame;

} //  end AVFrame *FFmpeg_Input::get_frame
void Utility::VideoLoader::loadP() {
	isRunning_=true;
	target_->setIsComplete(false);
	// Contains information about the stream
	AVFormatContext *formatContext = NULL;

	// Contains information about the codex
	AVCodecContext *codecContext = NULL;

	// The coder with wich to decode the video
	AVCodec *codec = NULL;

	// Open video file
	// avformat_open_input(context, path, format, options)
	// format = NULL means autodetect
	if(!path_.isEmpty()
	        && avformat_open_input(&formatContext, path_.toUtf8(), NULL, NULL)!=0) {
		target_->setIsComplete(true);
		return;
	}

	// Retrieve stream information
	if(avformat_find_stream_info(formatContext, NULL)<0) {
		target_->setIsComplete(true);
		return;
	}

	// Print stream information
	// av_dump_format(formatContext, 0, path_.toUtf8(), 0);


	// Find the best video stream in context
	int videoStreamIndex = av_find_best_stream(formatContext, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
	if(videoStreamIndex == -1) {
		target_->setIsComplete(true);
		return;
	}

	// Get a pointer to the codec context for the video stream
	codecContext = formatContext->streams[videoStreamIndex]->codec;

	// Find the decoder for the video stream
	codec = avcodec_find_decoder(codecContext->codec_id);
	if(codec == NULL) {
		target_->setIsComplete(true);
		return;
	}

	// Open codec
	if(avcodec_open2(codecContext, codec, &dict_) < 0) {
		target_->setIsComplete(true);
		return;
	}

	struct SwsContext      *sws_ctx = NULL;

	averageBitrate_=codecContext->bit_rate;
	codec_=QString(av_codec_get_codec_descriptor(codecContext)->name);
	if(codec_=="")
		codec_="N/A";

	sws_ctx =
	    sws_getContext
	    (
	        codecContext->width,
	        codecContext->height,
	        codecContext->pix_fmt,
	        codecContext->width,
	        codecContext->height,
	        AV_PIX_FMT_RGB24,
	        0,
	        0,
	        0,
	        0
	    );

	AVPacket packet;
	AVFrame *frame = NULL;
	frame = av_frame_alloc();

	AVFrame* rgbframe=NULL;
	uint8_t* buffer = NULL;
	int numbytes=avpicture_get_size(AV_PIX_FMT_RGB24, codecContext->width,codecContext->height);

	target_->setFps(codecContext->framerate.num);
	av_init_packet(&packet);
	packet.data = NULL;
	packet.size = 0;
	int gotPicture = 0;
	while(av_read_frame(formatContext, &packet) >= 0&&isRunning_) {
		avcodec_decode_video2(codecContext, frame, &gotPicture, &packet);

		if(gotPicture != 0) {
			rgbframe=av_frame_alloc();

			buffer=(uint8_t *)av_malloc(numbytes*sizeof(uint8_t));
			avpicture_fill((AVPicture *)rgbframe, buffer, AV_PIX_FMT_RGB24,codecContext->width,
			               codecContext->height);
			rgbframe->width=codecContext->width;
			rgbframe->height=codecContext->height;
			rgbframe->format=AV_PIX_FMT_RGB24;
			rgbframe->pkt_size=frame->pkt_size;

			sws_scale
			(
			    sws_ctx,
			    frame->data,
			    frame->linesize,
			    0,
			    codecContext->height,
			    rgbframe->data,
			    rgbframe->linesize
			);

			target_->appendFrame(rgbframe);
		}
	}

	packet.data=NULL;
	packet.size=0;

	while(isRunning_) {
		avcodec_decode_video2(codecContext, frame, &gotPicture, &packet);

		if(gotPicture == 0)
			break;

		rgbframe=av_frame_alloc();

		buffer=(uint8_t *)av_malloc(numbytes*sizeof(uint8_t));
		avpicture_fill((AVPicture *)rgbframe, buffer, AV_PIX_FMT_RGB24,codecContext->width,
		               codecContext->height);
		rgbframe->width=codecContext->width;
		rgbframe->height=codecContext->height;
		rgbframe->format=AV_PIX_FMT_RGB24;
		rgbframe->pkt_size=frame->pkt_size;

		sws_scale
		(
		    sws_ctx,
		    frame->data,
		    frame->linesize,
		    0,
		    codecContext->height,
		    rgbframe->data,
		    rgbframe->linesize
		);

		target_->appendFrame(rgbframe);
	}
	av_frame_unref(frame);
	av_frame_free(&frame);
	avcodec_close(codecContext);
	avformat_close_input(&formatContext);
	isRunning_=false;
	if(dict_) {
		free(dict_);
	}
	target_->setIsComplete(true);
}
Ejemplo n.º 18
0
void loop() {
  int64_t dts_shift = AV_NOPTS_VALUE;

  uint32_t buf_size = 10240;
  char *buf = (char *)malloc(buf_size);
  while(1) {
    uint32_t len;
    int idx = 0;
    int read_bytes = 0;
    if((read_bytes = read1(in_fd, &len, 4)) != 4) {
      if(read_bytes == 0) {
        _exit(0);
      }
      error("Can't read input length: %d", read_bytes);
    }
    len = ntohl(len);
    if(len > buf_size) {
      buf_size = len;
      free(buf);
      buf = (char *)malloc(buf_size);
    }

    if((read_bytes = read1(in_fd, buf, len)) != len) error("Can't read %d bytes from input: %d", len, read_bytes);
    int version = 0;
    ei_decode_version(buf, &idx, &version);
    int command_idx = idx;

    int arity = 0;
    if(ei_decode_tuple_header(buf, &idx, &arity) == -1) error("must pass tuple");


    int t = 0;
    int size = 0;
    ei_get_type(buf, &idx, &t, &size);
    if(t != ERL_ATOM_EXT) error("first element must be atom");
    char command[MAXATOMLEN+1];
    ei_decode_atom(buf, &idx, command); arity--;


    if(!strcmp(command, "ping")) {
      pong();
      continue;
    }
    if(!strcmp(command, "exit")) {
      return;
    }
    if(!strcmp(command, "init_input")) {
      if(arity != 3) error("Must provide 3 arguments to init_input command");
      char content[1024];
      char codec[1024];
      if(ei_decode_atom(buf, &idx, content) == -1) error("Must provide content as an atom");
      if(ei_decode_atom(buf, &idx, codec) == -1) error("Must provide codec as an atom");

      int decoder_config_len = 0;
      ei_get_type(buf, &idx, &t, &decoder_config_len);
      if(t != ERL_BINARY_EXT) error("decoder config must be a binary");
      uint8_t *decoder_config = av_mallocz(decoder_config_len + FF_INPUT_BUFFER_PADDING_SIZE);
      long bin_len = 0;
      ei_decode_binary(buf, &idx, decoder_config, &bin_len);

      Track *t = NULL;
      if(!strcmp(content, "video")) {
        t = &input_video;
      } else if(!strcmp(content, "audio")) {
        t = &input_audio;
      } else {
        error("Unknown media content: '%s'", content);
      }
      if(t->codec) error("Double initialization of media '%s'", content);

      t->codec = avcodec_find_decoder_by_name(codec);
      t->ctx = avcodec_alloc_context3(t->codec);
      if(!t->codec || !t->ctx) 
        error("Unknown %s decoder '%s'", content, codec);
      t->ctx->time_base = (AVRational){1, 90};
      t->ctx->extradata_size = decoder_config_len;
      t->ctx->extradata = decoder_config;
      if(avcodec_open2(t->ctx, t->codec, NULL) < 0) 
        error("failed to allocate %s decoder", content);

      reply_atom("ready");
      continue;
    }

    if(!strcmp(command, "init_output")) {
      if(arity != 4) error("Must provide 4 arguments to init_output command");
      char content[1024];
      char codec[1024];
      if(ei_decode_atom(buf, &idx, content) == -1) error("Must provide content as an atom");
      if(ei_decode_atom(buf, &idx, codec) == -1) error("Must provide codec as an atom");

      long track_id = -1;
      if(ei_decode_long(buf, &idx, &track_id) == -1) error("track_id must be integer");
      if(track_id < 1 || track_id > MAX_OUTPUT_TRACKS+1) error("track_id must be from 1 to %d", MAX_OUTPUT_TRACKS+1);
      track_id--;

      Track *t = NULL;
      if(!strcmp(content, "audio")) {
        t = &output_audio[out_audio_count++];
      } else if(!strcmp(content, "video")) {
        t = &output_video[out_video_count++];
      } else {
        error("invalid_content '%s'", content);
      }
      t->track_id = track_id;

      t->codec = avcodec_find_encoder_by_name(codec);
      t->ctx = avcodec_alloc_context3(t->codec);
      if(!t->codec || !t->ctx) error("Unknown encoder '%s'", codec);

      AVCodecContext* ctx = t->ctx;
      AVDictionary *opts = NULL;


      int options_count = 0;
      if(ei_decode_list_header(buf, &idx, &options_count) < 0) error("options must be a proplist");
      while(options_count > 0) {
        int arity1 = 0;

        int t,s;
        ei_get_type(buf, &idx, &t, &s);
        if(t == ERL_NIL_EXT) {
          ei_skip_term(buf, &idx);
          break;
        }

        if(ei_decode_tuple_header(buf, &idx, &arity1) < 0) error("options must be a proper proplist");
        if(arity1 != 2) error("tuples in options proplist must be arity 2");

        char key[MAXATOMLEN];
        if(ei_decode_atom(buf, &idx, key) == 0) {

          if(!strcmp(key, "width")) {
            long w = 0;
            if(ei_decode_long(buf, &idx, &w) < 0) error("width must be integer");
            ctx->width = w;
            continue;
          }

          if(!strcmp(key, "height")) {
            long h = 0;
            if(ei_decode_long(buf, &idx, &h) < 0) error("height must be integer");
            ctx->height = h;
            continue;
          }

          if(!strcmp(key, "bitrate")) {
            long b = 0;
            if(ei_decode_long(buf, &idx, &b) < 0) error("bitrate must be integer");
            ctx->bit_rate = b;
            continue;
          }

          if(!strcmp(key, "sample_rate")) {
            long sr = 0;
            if(ei_decode_long(buf, &idx, &sr) < 0) error("sample_rate must be integer");
            ctx->sample_rate = sr;
            continue;
          }

          if(!strcmp(key, "channels")) {
            long ch = 0;
            if(ei_decode_long(buf, &idx, &ch) < 0) error("channels must be integer");
            ctx->channels = ch;
            continue;
          }

          fprintf(stderr, "Unknown key: '%s'\r\n", key);
          ei_skip_term(buf, &idx);
          continue;
        } else if(ei_decode_string(buf, &idx, key) == 0) {
          char value[MAXATOMLEN];
          if(ei_decode_string(buf, &idx, value) < 0) error("key-value must be strings");
          av_dict_set(&opts, key, value, 0);
        } else {
          error("Invalid options proplist");
        }
      }

      if(!strcmp(content, "video")) {
        ctx->pix_fmt = AV_PIX_FMT_YUV420P;
      }
      if(!strcmp(content, "audio")) {
        ctx->sample_fmt = AV_SAMPLE_FMT_S16;
        ctx->profile = FF_PROFILE_AAC_MAIN;
      }
      ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
      ctx->time_base = (AVRational){1,90};

      if(avcodec_open2(ctx, t->codec, &opts) < 0) error("failed to allocate video encoder");

      AVPacket config;
      config.dts = config.pts = 0;
      config.flags = CODEC_FLAG_GLOBAL_HEADER;
      config.data = ctx->extradata;
      config.size = ctx->extradata_size;
      reply_avframe(&config, t->codec);      
      continue;
    }

    if(!strcmp(command, "video_frame")) {
      idx = command_idx;
      struct video_frame *fr = read_video_frame(buf, &idx);

      AVPacket packet;
      av_new_packet(&packet, fr->body.size);
      memcpy(packet.data, fr->body.data, fr->body.size);
      packet.size = fr->body.size;
      packet.dts = fr->dts*90;
      packet.pts = fr->pts*90;
      packet.stream_index = fr->track_id;

      // if(packet_size != pkt_size) error("internal error in reading frame body");

      if(fr->content == frame_content_audio) {
        if(!input_audio.ctx) error("input audio uninitialized");

        AVFrame *decoded_frame = avcodec_alloc_frame();
        int got_output = 0;
        int ret = avcodec_decode_audio4(input_audio.ctx, decoded_frame, &got_output, &packet);
        if(got_output) {
          reply_atom("ok");
        } else {
          error("Got: %d, %d\r\n", ret, got_output);
        }
        free(fr);
        continue;
      }

      if(fr->content == frame_content_video) {
        if(!input_video.ctx) error("input video uninitialized");
        AVFrame *decoded_frame = avcodec_alloc_frame();
        int could_decode = 0;
        int ret = avcodec_decode_video2(input_video.ctx, decoded_frame, &could_decode, &packet);
        if(ret < 0) {
          error("failed to decode video");
        }
        if(could_decode) {
          decoded_frame->pts = av_frame_get_best_effort_timestamp(decoded_frame);
          int sent_config = 0;

          AVPacket pkt;
          av_init_packet(&pkt);
          pkt.data = NULL;
          pkt.size = 0;

          int could_encode = 0;

          if(out_video_count <= 0) error("trying to transcode uninitialized video");
          if(avcodec_encode_video2(output_video[0].ctx, &pkt, decoded_frame, &could_encode) != 0) 
            error("Failed to encode h264");

          if(could_encode) {
            if(dts_shift == AV_NOPTS_VALUE) {
              dts_shift = -pkt.dts;
            }
            pkt.dts += dts_shift;
            reply_avframe(&pkt, output_video[0].codec);
          } else if(!sent_config) {
            reply_atom("ok");
          }
          free(fr);
          continue;
        } else {
          reply_atom("ok");
          free(fr);
          continue;
        }
      }

      error("Unknown content");
    }

    // AVCodecContext
    // AVPacket
    // AVFrame



    char *s = (char *)malloc(1024);
    ei_s_print_term(&s, buf, &command_idx);
    error("Unknown command: %s", s);
  }
}
Ejemplo n.º 19
0
static void video_decode_example(const char *outfilename, const char *filename)
{
    AVCodec *codec;
    AVCodecContext *c= NULL;
    int frame, got_picture, len;
    FILE *f;
    AVFrame *picture;
    uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
    char buf[1024];
    AVPacket avpkt;

    av_init_packet(&avpkt);

    /* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
    memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);

    printf("Video decoding\n");

    /* find the mpeg1 video decoder */
    codec = avcodec_find_decoder(CODEC_ID_MPEG1VIDEO);
    if (!codec) {
        fprintf(stderr, "codec not found\n");
        exit(1);
    }

    c = avcodec_alloc_context3(codec);
    picture= avcodec_alloc_frame();

    if(codec->capabilities&CODEC_CAP_TRUNCATED)
        c->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */

    /* For some codecs, such as msmpeg4 and mpeg4, width and height
       MUST be initialized there because this information is not
       available in the bitstream. */

    /* open it */
    if (avcodec_open2(c, codec, NULL) < 0) {
        fprintf(stderr, "could not open codec\n");
        exit(1);
    }

    /* the codec gives us the frame size, in samples */

    f = fopen(filename, "rb");
    if (!f) {
        fprintf(stderr, "could not open %s\n", filename);
        exit(1);
    }

    frame = 0;
    for(;;) {
        avpkt.size = fread(inbuf, 1, INBUF_SIZE, f);
        if (avpkt.size == 0)
            break;

        /* NOTE1: some codecs are stream based (mpegvideo, mpegaudio)
           and this is the only method to use them because you cannot
           know the compressed data size before analysing it.

           BUT some other codecs (msmpeg4, mpeg4) are inherently frame
           based, so you must call them with all the data for one
           frame exactly. You must also initialize 'width' and
           'height' before initializing them. */

        /* NOTE2: some codecs allow the raw parameters (frame size,
           sample rate) to be changed at any frame. We handle this, so
           you should also take care of it */

        /* here, we use a stream based decoder (mpeg1video), so we
           feed decoder and see if it could decode a frame */
        avpkt.data = inbuf;
        while (avpkt.size > 0) {
            len = avcodec_decode_video2(c, picture, &got_picture, &avpkt);
            if (len < 0) {
                fprintf(stderr, "Error while decoding frame %d\n", frame);
                exit(1);
            }
            if (got_picture) {
                printf("saving frame %3d\n", frame);
                fflush(stdout);

                /* the picture is allocated by the decoder. no need to
                   free it */
                snprintf(buf, sizeof(buf), outfilename, frame);
                pgm_save(picture->data[0], picture->linesize[0],
                         c->width, c->height, buf);
                frame++;
            }
            avpkt.size -= len;
            avpkt.data += len;
        }
    }

    /* some codecs, such as MPEG, transmit the I and P frame with a
       latency of one frame. You must do the following to have a
       chance to get the last frame of the video */
    avpkt.data = NULL;
    avpkt.size = 0;
    len = avcodec_decode_video2(c, picture, &got_picture, &avpkt);
    if (got_picture) {
        printf("saving last frame %3d\n", frame);
        fflush(stdout);

        /* the picture is allocated by the decoder. no need to
           free it */
        snprintf(buf, sizeof(buf), outfilename, frame);
        pgm_save(picture->data[0], picture->linesize[0],
                 c->width, c->height, buf);
        frame++;
    }

    fclose(f);

    avcodec_close(c);
    av_free(c);
    av_free(picture);
    printf("\n");
}
Ejemplo n.º 20
0
static int compute_crc_of_packets(AVFormatContext *fmt_ctx, int video_stream,
                                  AVCodecContext *ctx, AVFrame *fr, uint64_t ts_start, uint64_t ts_end, int no_seeking)
{
    int number_of_written_bytes;
    int got_frame = 0;
    int result;
    int end_of_stream = 0;
    int byte_buffer_size;
    uint8_t *byte_buffer;
    int64_t crc;
    AVPacket pkt;

    byte_buffer_size = av_image_get_buffer_size(ctx->pix_fmt, ctx->width, ctx->height, 16);
    byte_buffer = av_malloc(byte_buffer_size);
    if (!byte_buffer) {
        av_log(NULL, AV_LOG_ERROR, "Can't allocate buffer\n");
        return AVERROR(ENOMEM);
    }

    if (!no_seeking) {
        result = av_seek_frame(fmt_ctx, video_stream, ts_start, AVSEEK_FLAG_ANY);
        printf("Seeking to %"PRId64", computing crc for frames with pts < %"PRId64"\n", ts_start, ts_end);
        if (result < 0) {
            av_log(NULL, AV_LOG_ERROR, "Error in seeking\n");
            return result;
        }
        avcodec_flush_buffers(ctx);
    }

    av_init_packet(&pkt);
    do {
        if (!end_of_stream)
            if (av_read_frame(fmt_ctx, &pkt) < 0)
                end_of_stream = 1;
        if (end_of_stream) {
            pkt.data = NULL;
            pkt.size = 0;
        }
        if (pkt.stream_index == video_stream || end_of_stream) {
            got_frame = 0;
            if ((pkt.pts == AV_NOPTS_VALUE) && (!end_of_stream)) {
                av_log(NULL, AV_LOG_ERROR, "Error: frames doesn't have pts values\n");
                return -1;
            }
            result = avcodec_decode_video2(ctx, fr, &got_frame, &pkt);
            if (result < 0) {
                av_log(NULL, AV_LOG_ERROR, "Error decoding frame\n");
                return result;
            }
            if (got_frame) {
                number_of_written_bytes = av_image_copy_to_buffer(byte_buffer, byte_buffer_size,
                                          (const uint8_t* const *)fr->data, (const int*) fr->linesize,
                                          ctx->pix_fmt, ctx->width, ctx->height, 1);
                if (number_of_written_bytes < 0) {
                    av_log(NULL, AV_LOG_ERROR, "Can't copy image to buffer\n");
                    return number_of_written_bytes;
                }
                if ((fr->pts > ts_end) && (!no_seeking))
                    break;
                crc = av_adler32_update(0, (const uint8_t*)byte_buffer, number_of_written_bytes);
                printf("%10"PRId64", 0x%08lx\n", fr->pts, crc);
                if (no_seeking) {
                    if (add_crc_to_array(crc, fr->pts) < 0)
                        return -1;
                }
                else {
                    if (compare_crc_in_array(crc, fr->pts) < 0)
                        return -1;
                }
            }
        }
        av_packet_unref(&pkt);
        av_init_packet(&pkt);
    } while ((!end_of_stream || got_frame) && (no_seeking || (fr->pts + av_frame_get_pkt_duration(fr) <= ts_end)));

    av_packet_unref(&pkt);
    av_freep(&byte_buffer);

    return 0;
}
Ejemplo n.º 21
0
static bool write_lavc(struct image_writer_ctx *ctx, mp_image_t *image, FILE *fp)
{
    bool success = 0;
    AVFrame *pic = NULL;
    AVPacket pkt = {0};
    int got_output = 0;

    av_init_packet(&pkt);

    struct AVCodec *codec = avcodec_find_encoder(ctx->writer->lavc_codec);
    AVCodecContext *avctx = NULL;
    if (!codec)
        goto print_open_fail;
    avctx = avcodec_alloc_context3(codec);
    if (!avctx)
        goto print_open_fail;

    avctx->time_base = AV_TIME_BASE_Q;
    avctx->width = image->w;
    avctx->height = image->h;
    avctx->pix_fmt = imgfmt2pixfmt(image->imgfmt);
    if (avctx->pix_fmt == AV_PIX_FMT_NONE) {
        MP_ERR(ctx, "Image format %s not supported by lavc.\n",
               mp_imgfmt_to_name(image->imgfmt));
        goto error_exit;
    }
    if (ctx->writer->lavc_codec == AV_CODEC_ID_PNG) {
        avctx->compression_level = ctx->opts->png_compression;
        avctx->prediction_method = ctx->opts->png_filter;
    }

    if (avcodec_open2(avctx, codec, NULL) < 0) {
     print_open_fail:
        MP_ERR(ctx, "Could not open libavcodec encoder for saving images\n");
        goto error_exit;
    }

    pic = av_frame_alloc();
    if (!pic)
        goto error_exit;
    for (int n = 0; n < 4; n++) {
        pic->data[n] = image->planes[n];
        pic->linesize[n] = image->stride[n];
    }
    pic->format = avctx->pix_fmt;
    pic->width = avctx->width;
    pic->height = avctx->height;
    if (ctx->opts->tag_csp) {
        pic->color_primaries = mp_csp_prim_to_avcol_pri(image->params.primaries);
        pic->color_trc = mp_csp_trc_to_avcol_trc(image->params.gamma);
    }
    int ret = avcodec_encode_video2(avctx, &pkt, pic, &got_output);
    if (ret < 0)
        goto error_exit;

    fwrite(pkt.data, pkt.size, 1, fp);

    success = !!got_output;
error_exit:
    if (avctx)
        avcodec_close(avctx);
    av_free(avctx);
    av_frame_free(&pic);
    av_free_packet(&pkt);
    return success;
}
Ejemplo n.º 22
0
int decode_audio_file(ChromaprintContext *chromaprint_ctx, int16_t *buffer1, int16_t *buffer2, const char *file_name, int max_length, int *duration)
{
	int i, ok = 0, remaining, length, consumed, buffer_size, codec_ctx_opened = 0;
	AVFormatContext *format_ctx = NULL;
	AVCodecContext *codec_ctx = NULL;
	AVCodec *codec = NULL;
	AVStream *stream = NULL;
	AVPacket packet, packet_temp;
#ifdef HAVE_AV_AUDIO_CONVERT
	AVAudioConvert *convert_ctx = NULL;
#endif
	int16_t *buffer;

#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 2, 0)
	if (av_open_input_file(&format_ctx, file_name, NULL, 0, NULL) != 0) {
#else
	if (avformat_open_input(&format_ctx, file_name, NULL, NULL) != 0) {
#endif
		fprintf(stderr, "ERROR: couldn't open the file\n");
		goto done;
	}

	if (av_find_stream_info(format_ctx) < 0) {
		fprintf(stderr, "ERROR: couldn't find stream information in the file\n");
		goto done;
	}

	for (i = 0; i < format_ctx->nb_streams; i++) {
		codec_ctx = format_ctx->streams[i]->codec;
		if (codec_ctx && codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
			stream = format_ctx->streams[i];
			break;
		}
	}
	if (!stream) {
		fprintf(stderr, "ERROR: couldn't find any audio stream in the file\n");
		goto done;
	}

	codec = avcodec_find_decoder(codec_ctx->codec_id);
	if (!codec) {
		fprintf(stderr, "ERROR: unknown codec\n");
		goto done;
	}

	if (avcodec_open(codec_ctx, codec) < 0) {
		fprintf(stderr, "ERROR: couldn't open the codec\n");
		goto done;
	}
	codec_ctx_opened = 1;

	if (codec_ctx->channels <= 0) {
		fprintf(stderr, "ERROR: no channels found in the audio stream\n");
		goto done;
	}

	if (codec_ctx->sample_fmt != AV_SAMPLE_FMT_S16) {
#ifdef HAVE_AV_AUDIO_CONVERT
		convert_ctx = av_audio_convert_alloc(AV_SAMPLE_FMT_S16, codec_ctx->channels,
		                                     codec_ctx->sample_fmt, codec_ctx->channels, NULL, 0);
		if (!convert_ctx) {
			fprintf(stderr, "ERROR: couldn't create sample format converter\n");
			goto done;
		}
#else
		fprintf(stderr, "ERROR: unsupported sample format\n");
		goto done;
#endif
	}

	*duration = stream->time_base.num * stream->duration / stream->time_base.den;

  if (max_length == 0) {
    max_length = duration;
  }

	av_init_packet(&packet);
	av_init_packet(&packet_temp);

	remaining = max_length * codec_ctx->channels * codec_ctx->sample_rate;
	chromaprint_start(chromaprint_ctx, codec_ctx->sample_rate, codec_ctx->channels);

	while (1) {
		if (av_read_frame(format_ctx, &packet) < 0) {
			break;
		}

		packet_temp.data = packet.data;
		packet_temp.size = packet.size;

		while (packet_temp.size > 0) {
			buffer_size = BUFFER_SIZE;
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(52, 23, 0)
			consumed = avcodec_decode_audio2(codec_ctx,
				buffer1, &buffer_size, packet_temp.data, packet_temp.size);
#else
			consumed = avcodec_decode_audio3(codec_ctx,
				buffer1, &buffer_size, &packet_temp);
#endif

			if (consumed < 0) {
				break;
			}

			packet_temp.data += consumed;
			packet_temp.size -= consumed;

			if (buffer_size <= 0) {
				if (buffer_size < 0) {
					fprintf(stderr, "WARNING: size returned from avcodec_decode_audioX is too small\n");
				}
				continue;
			}
			if (buffer_size > BUFFER_SIZE) {
				fprintf(stderr, "WARNING: size returned from avcodec_decode_audioX is too large\n");
				continue;
			}

#ifdef HAVE_AV_AUDIO_CONVERT
			if (convert_ctx) {
				const void *ibuf[6] = { buffer1 };
				void *obuf[6] = { buffer2 };
#if LIBAVUTIL_VERSION_INT < AV_VERSION_INT(51, 8, 0)
				int istride[6] = { av_get_bits_per_sample_format(codec_ctx->sample_fmt) / 8 };
#else
				int istride[6] = { av_get_bytes_per_sample(codec_ctx->sample_fmt) };
#endif
				int ostride[6] = { 2 };
				int len = buffer_size / istride[0];
				if (av_audio_convert(convert_ctx, obuf, ostride, ibuf, istride, len) < 0) {
					break;
				}
				buffer = buffer2;
				buffer_size = len * ostride[0];
			}
			else {
				buffer = buffer1;
			}
#else
			buffer = buffer1;
#endif

			length = MIN(remaining, buffer_size / 2);
			if (!chromaprint_feed(chromaprint_ctx, buffer, length)) {
				fprintf(stderr, "ERROR: fingerprint calculation failed\n");
				goto done;
			}

			if (max_length) {
				remaining -= length;
				if (remaining <= 0) {
					goto finish;
				}
			}
		}

		if (packet.data) {
			av_free_packet(&packet);
		}
	}

finish:
	if (!chromaprint_finish(chromaprint_ctx)) {
		fprintf(stderr, "ERROR: fingerprint calculation failed\n");
		goto done;
	}

	ok = 1;

done:
	if (codec_ctx_opened) {
		avcodec_close(codec_ctx);
	}
	if (format_ctx) {
		av_close_input_file(format_ctx);
	}
#ifdef HAVE_AV_AUDIO_CONVERT
	if (convert_ctx) {
		av_audio_convert_free(convert_ctx);
	}
#endif
	return ok;
}

int fpcalc_main(int argc, char **argv)
{
	int i, j, max_length = 120, num_file_names = 0, raw = 0, raw_fingerprint_size, duration;
	int16_t *buffer1, *buffer2;
	int32_t *raw_fingerprint;
	char *file_name, *fingerprint, **file_names;
	ChromaprintContext *chromaprint_ctx;
	int algo = CHROMAPRINT_ALGORITHM_DEFAULT;

	file_names = malloc(argc * sizeof(char *));
	for (i = 1; i < argc; i++) {
		char *arg = argv[i];
		if (!strcmp(arg, "-length") && i + 1 < argc) {
			max_length = atoi(argv[++i]);
		}
		else if (!strcmp(arg, "-version") || !strcmp(arg, "-v")) {
			printf("fpcalc version %s\n", chromaprint_get_version());
			return 0;
		}
		else if (!strcmp(arg, "-raw")) {
			raw = 1;
		}
		else if (!strcmp(arg, "-algo") && i + 1 < argc) {
			const char *v = argv[++i];
			if (!strcmp(v, "test1")) { algo = CHROMAPRINT_ALGORITHM_TEST1; }
			else if (!strcmp(v, "test2")) { algo = CHROMAPRINT_ALGORITHM_TEST2; }
			else if (!strcmp(v, "test3")) { algo = CHROMAPRINT_ALGORITHM_TEST3; }
			else if (!strcmp(v, "test4")) { algo = CHROMAPRINT_ALGORITHM_TEST4; }
			else {
				fprintf(stderr, "WARNING: unknown algorithm, using the default\n");
			}
		}
		else if (!strcmp(arg, "-set") && i + 1 < argc) {
      char *name = argv[++i];
      char *value = strchr(name, '=');
      if (!value && i + 1 < argc) {
        ++i;
      }
		}
		else {
			file_names[num_file_names++] = argv[i];
		}
	}

	if (!num_file_names) {
		printf("usage: %s [OPTIONS] FILE...\n\n", argv[0]);
		printf("Options:\n");
		printf("  -version      print version information\n");
		printf("  -length SECS  length of the audio data used for fingerprint calculation (default 120)\n");
		printf("  -raw          output the raw uncompressed fingerprint\n");
		printf("  -algo NAME    version of the fingerprint algorithm\n");
		return 2;
	}

	av_register_all();
	av_log_set_level(AV_LOG_ERROR);

	buffer1 = av_malloc(BUFFER_SIZE + 16);
	buffer2 = av_malloc(BUFFER_SIZE + 16);
	chromaprint_ctx = chromaprint_new(algo);

	for (i = 1; i < argc; i++) {
		char *arg = argv[i];
		if (!strcmp(arg, "-set") && i + 1 < argc) {
			char *name = argv[++i];
			char *value = strchr(name, '=');
			if (value) {
				*value++ = '\0';
				chromaprint_set_option(chromaprint_ctx, name, atoi(value));
			} else if (i + 1 < argc) {
        value = argv[++i];
        chromaprint_set_option(chromaprint_ctx, name, atoi(value));
      }
		}
	}

	for (i = 0; i < num_file_names; i++) {
		file_name = file_names[i];
		if (!decode_audio_file(chromaprint_ctx, buffer1, buffer2, file_name, max_length, &duration)) {
			fprintf(stderr, "ERROR: unable to calculate fingerprint for file %s, skipping\n", file_name);
			continue;
		}
		if (i > 0) {
			printf("\n");
		}
		printf("FILE=%s\n", file_name);
		printf("DURATION=%d\n", duration);
		if (raw) {
			if (!chromaprint_get_raw_fingerprint(chromaprint_ctx, (void **)&raw_fingerprint, &raw_fingerprint_size)) {
				fprintf(stderr, "ERROR: unable to calculate fingerprint for file %s, skipping\n", file_name);
				continue;
			}
			printf("FINGERPRINT=");
			for (j = 0; j < raw_fingerprint_size; j++) {
				printf("%d%s", raw_fingerprint[j], j + 1 < raw_fingerprint_size ? "," : "");
			}
			printf("\n");
			chromaprint_dealloc(raw_fingerprint);
		}
		else {
			if (!chromaprint_get_fingerprint(chromaprint_ctx, &fingerprint)) {
				fprintf(stderr, "ERROR: unable to calculate fingerprint for file %s, skipping\n", file_name);
				continue;
			}
			printf("FINGERPRINT=%s\n", fingerprint);
			chromaprint_dealloc(fingerprint);
		}
	}

	chromaprint_free(chromaprint_ctx);
	av_free(buffer1);
	av_free(buffer2);
	free(file_names);

	return 0;
}
Ejemplo n.º 23
0
void* decoding_thread(void* arg)
{
    ffdec_context *ffd_context = (ffdec_context*) arg;
    ffdec_reserved *ffd_reserved = (ffdec_reserved*) ffd_context->reserved;
    AVCodecContext *codec_context = ffd_context->codec_context;

    AVPacket packet;
    int got_frame;

    int decode_buffer_length = 4096;
    uint8_t decode_buffer[decode_buffer_length + FF_INPUT_BUFFER_PADDING_SIZE];
    memset(decode_buffer + decode_buffer_length, 0, FF_INPUT_BUFFER_PADDING_SIZE);

    AVFrame *frame = avcodec_alloc_frame();

    while (ffd_reserved->running)
    {
        if (ffd_reserved->read_callback) packet.size = ffd_reserved->read_callback(ffd_context,
                decode_buffer, decode_buffer_length, ffd_reserved->read_callback_arg);

        if (packet.size <= 0) break;

        packet.data = decode_buffer;

        while (ffd_reserved->running && packet.size > 0)
        {
            // reset the AVPacket
            av_init_packet(&packet);

            got_frame = 0;
            int decode_result = avcodec_decode_video2(codec_context, frame, &got_frame, &packet);

            if (decode_result < 0)
            {
                fprintf(stderr, "Error while decoding video\n");
                ffd_reserved->running = false;
                break;
            }

            if (got_frame)
            {
                if (ffd_reserved->frame_callback) ffd_reserved->frame_callback(
                        ffd_context, frame, ffd_reserved->frame_callback_arg);

                display_frame(ffd_context, frame);
            }

            packet.size -= decode_result;
            packet.data += decode_result;
        }
    }

    if (ffd_reserved->running)
    {
        // reset the AVPacket
        av_init_packet(&packet);
        packet.data = NULL;
        packet.size = 0;

        got_frame = 0;
        avcodec_decode_video2(codec_context, frame, &got_frame, &packet);

        if (got_frame)
        {
            if (ffd_reserved->frame_callback) ffd_reserved->frame_callback(
                    ffd_context, frame, ffd_reserved->frame_callback_arg);

            display_frame(ffd_context, frame);
        }
    }

    av_free(frame);
    frame = NULL;

    if (ffd_reserved->close_callback) ffd_reserved->close_callback(
            ffd_context, ffd_reserved->close_callback_arg);

    return 0;
}
Ejemplo n.º 24
0
HRESULT CTMReceiverOutputPin::FillBuffer(IMediaSample *pms)
{
	//TODO: Fill buffer with the decoded frames.
	CTMReceiverSrc* pFilter = (CTMReceiverSrc*)m_pFilter;
	AVPacket pkt, pktForRecord;
	AVPicture pic;
	BYTE *pData;
	long lDataLen;
	lDataLen = pms->GetSize();
	if (m_pData==NULL)
	{
		m_pData = new BYTE[lDataLen];
	}
	if(pFilter->m_queueBuffer.nb_packets<=0)
	{
		REFERENCE_TIME rtStart, rtStop, rtMediaStart, rtMediaStop;
		// The sample times are modified by the current rate.
		rtStart = static_cast<REFERENCE_TIME>(m_rtSampleTime);
		rtStop  = rtStart + static_cast<int>(m_rtAvgTimePerFrame );
		rtMediaStart = static_cast<REFERENCE_TIME>(m_rtPosition);
		rtMediaStop  = rtMediaStart + static_cast<int>(m_rtAvgTimePerFrame );
		pms->SetTime(&rtStart, &rtStop);
		pms->SetMediaTime(&rtMediaStart, &rtMediaStop);
		m_rtSampleTime = m_rtSampleTime + static_cast<int>(m_rtAvgTimePerFrame );
		m_rtPosition = m_rtPosition + m_rtAvgTimePerFrame;
		pms->SetSyncPoint(TRUE);
		Sleep(10);
		//char tmp[1024];
		//sprintf(tmp,"====================No Data!====================\n");
		//OutputDebugStringA(tmp);
		return S_OK;
	}
	av_init_packet(&pkt);
	int maxPktNum = m_bGetAvgFrameTime ? 12 : 7;
	while (pFilter->m_queueBuffer.nb_packets > maxPktNum)
	{
		for(int itmp=1; itmp<=5; itmp++)
		{
			CAutoLock lock(&m_csBuffer);
			pFilter->m_queueBuffer.Get(&pkt,1);
			av_free_packet(&pkt);
		}
		char tmp[1024];
		sprintf(tmp," ===================================Too Many Packets! Pop %d good Packet!\n",pFilter->m_queueBuffer.nb_packets);
		OutputDebugStringA(tmp);
	}

	{
		CAutoLock lock(&m_csBuffer);
		pFilter->m_queueBuffer.Get(&pkt, 1);
		if(pkt.flags & AV_PKT_FLAG_KEY)
		{
	/*char tmp[1024];
	sprintf(tmp,"Key Frame!\n");
	OutputDebugStringA(tmp);*/
		}
	}
	int ret = -1;
	//Record Video
	if(m_bRecordStatus == TRUE)
	{
		if(pkt.flags & AV_PKT_FLAG_KEY)
		{
			m_bFindKeyFrame = TRUE;
		}
		if(m_bFindKeyFrame)
		{
			av_init_packet(&pktForRecord);
			pktForRecord.size = pkt.size;
			pktForRecord.flags = pkt.flags;
			pktForRecord.pts = pts;
			pktForRecord.dts = pts;
			pktForRecord.data = new uint8_t[pktForRecord.size];
			memcpy(pktForRecord.data, pkt.data, pktForRecord.size);
			ret = av_interleaved_write_frame(m_fileSaverCtx, &pktForRecord);
			delete [] pktForRecord.data;
			pktForRecord.data = NULL;
			pktForRecord.size = 0;
			av_init_packet(&pktForRecord);
			av_free_packet(&pktForRecord);
			//pts += m_rtAvgTimePerFrame/1000*9;
			pts++;
		}
	}

	// BEFORE DECODE CB
	TMFrame beforeDecodeFrame;
	beforeDecodeFrame.data = (char *)pkt.data;
	beforeDecodeFrame.len = pkt.size;
	beforeDecodeFrame.decoded = FALSE;
	beforeDecodeFrame.error = FALSE;
	pFilter->CallBeforeDecodeCB(&beforeDecodeFrame);

	ret = -1;
	{
		CAutoLock lock(&m_csDecoder);
		if (m_pDecoder!=NULL)
		{			
			ret = m_pDecoder->DecodeFrame(&pic, m_pData, pkt.data, pkt.size);
		}		
	}

	// AFTER DECODE CB
	TMFrame afterDecodeFrame;
	afterDecodeFrame.data = (char *)pkt.data;
	afterDecodeFrame.len = pkt.size;
	afterDecodeFrame.decoded = TRUE;
	afterDecodeFrame.error = ret <= 0 ? TRUE : FALSE;
	// TODO: construct the pic
	for(int ptr_i=0; ptr_i<AV_NUM_DATA_POINTERS; ptr_i++)
	{
		afterDecodeFrame.pic.data[ptr_i] = pic.data[ptr_i];
		afterDecodeFrame.pic.linesize[ptr_i] = pic.linesize[ptr_i];
	}
	pFilter->CallAfterDecodeCB(&afterDecodeFrame);

	if(ret <=0)
	{
		char tmp[1024];
		sprintf(tmp," ===================================Decode BAD£¬rtSampleTime:%lld\n",m_rtSampleTime);
		OutputDebugStringA(tmp);
		REFERENCE_TIME rtStart, rtStop, rtMediaStart, rtMediaStop;
		// The sample times are modified by the current rate.
		rtStart = static_cast<REFERENCE_TIME>(m_rtSampleTime);
		rtStop  = rtStart + static_cast<int>(m_rtAvgTimePerFrame );
		rtMediaStart = static_cast<REFERENCE_TIME>(m_rtPosition);
		rtMediaStop  = rtMediaStart + static_cast<int>(m_rtAvgTimePerFrame );
		pms->SetTime(&rtStart, &rtStop);
		pms->SetMediaTime(&rtMediaStart, &rtMediaStop);
		m_rtSampleTime = rtStop;
		m_rtPosition = m_rtPosition + m_rtAvgTimePerFrame;
		pms->SetSyncPoint(TRUE);
		return S_OK;
	}


	pms->GetPointer(&pData);

	USES_CONVERSION;
	ZeroMemory(pData, lDataLen);	
	{
		CAutoLock cAutoLockShared(&m_cSharedState);	
		memcpy(pData,m_pData,lDataLen);
		//hack the 1920*1088, the last 8 line should be set to 0.
		if(pFilter->GetImageHeight() == 1088)
		{
			memset(pData, 0, pFilter->GetImageWidth()*8*sizeof(RGBQUAD));
		}
		//hack the 720*576, the first and last 2 lines should be set to 0.
		if(pFilter->GetImageHeight() == 576)
		{
			memset(pData, 0, pFilter->GetImageWidth()*2*sizeof(RGBQUAD));
			memset(pData + pFilter->GetImageWidth()*(pFilter->GetImageHeight()-2)*sizeof(RGBQUAD), 0, pFilter->GetImageWidth()*2*sizeof(RGBQUAD));
		}
		REFERENCE_TIME rtStart, rtStop, rtMediaStart, rtMediaStop;
		// The sample times are modified by the current rate.
		//rtStart = static_cast<REFERENCE_TIME>(m_rtSampleTime);
		if(m_rtFirstFrameTime == 0)
		{
			m_rtFirstFrameTime = pkt.pts ;
		}
		rtStart = (pkt.pts  - m_rtFirstFrameTime)*100/9*10 - 1000;
		if(rtStart > 0 && !m_bGetAvgFrameTime)
		{
			m_rtAvgTimePerFrame = rtStart - 0;
			m_bGetAvgFrameTime = TRUE;
		}
		//Guess FPS
		if(m_bGetAvgFrameTime && !m_bFPSGuessed)
		{
			CTMReceiverSrc *pFilter = (CTMReceiverSrc *)m_pFilter;
			AVCodecContext *pCodecCtx = pFilter->m_pFormatContext->streams[pFilter->m_videoStreamIndex]->codec;
			if(pCodecCtx->time_base.den > 0 && pCodecCtx->time_base.num > 0 && pCodecCtx->ticks_per_frame > 0 && m_bGetAvgFrameTime > 0)
			{
				FPS = pCodecCtx->time_base.den / (pCodecCtx->time_base.num * pCodecCtx->ticks_per_frame * m_bGetAvgFrameTime);
			}
			m_bFPSGuessed = TRUE;
		}
		rtStart = rtStart < m_rtPosition ? rtStart : m_rtPosition;
		rtStop  = rtStart + static_cast<int>(m_rtAvgTimePerFrame );
		rtMediaStart = static_cast<REFERENCE_TIME>(m_rtPosition);
		rtMediaStop  = rtMediaStart + static_cast<int>(m_rtAvgTimePerFrame );
		pms->SetTime(&rtStart, &rtStop);
		pms->SetMediaTime(&rtMediaStart, &rtMediaStop);
		m_rtSampleTime = rtStop;
		m_rtPosition = m_rtPosition + m_rtAvgTimePerFrame; 
		//char tmp[1024];
		//sprintf(tmp," Src Filter:Channel:%d__PTS:%lld__rtStart:%lld\n", pFilter->m_relatedChannel, pkt.pts, rtStart);
		//OutputDebugStringA(tmp);
	}
	pms->SetSyncPoint(TRUE);

//For debug
//char tmp2[1024];
//sprintf(tmp2,"Channel %d__Fill Buffer Finallly %d!\n", pFilter->m_relatedChannel, frame_count);
//OutputDebugStringA(tmp2);

	av_free_packet(&pkt);

	//CallBack
	//DecodeCallback decodeCB = NULL;
	//void *pCBParam = NULL;
	//HRESULT hr = pFilter->m_pConfigManager->GetDecodeCB(pFilter->m_relatedChannel, &decodeCB, &pCBParam);
	//if(SUCCEEDED(hr) && decodeCB != NULL)
	//{
	//	decodeCB(m_pData, lDataLen, pCBParam);
	//}
	return S_OK;
}
Ejemplo n.º 25
0
/**
 * Grab a frame from x11 (public device demuxer API).
 *
 * @param s1 Context from avformat core
 * @param pkt Packet holding the brabbed frame
 * @return frame size in bytes
 */
static int x11grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
{
    X11GrabContext *s = s1->priv_data;
    Display *dpy      = s->dpy;
    XImage *image     = s->image;
    int x_off         = s->x_off;
    int y_off         = s->y_off;
    int follow_mouse  = s->follow_mouse;
    int screen, pointer_x, pointer_y, _, same_screen = 1;
    Window w, root;
    int64_t curtime, delay;
    struct timespec ts;

    /* Calculate the time of the next frame */
    s->time_frame += INT64_C(1000000);

    /* wait based on the frame rate */
    for (;;) {
        curtime = av_gettime();
        delay   = s->time_frame * av_q2d(s->time_base) - curtime;
        if (delay <= 0) {
            if (delay < INT64_C(-1000000) * av_q2d(s->time_base))
                s->time_frame += INT64_C(1000000);
            break;
        }
        ts.tv_sec  = delay / 1000000;
        ts.tv_nsec = (delay % 1000000) * 1000;
        nanosleep(&ts, NULL);
    }

    av_init_packet(pkt);
    pkt->data = image->data;
    pkt->size = s->frame_size;
    pkt->pts  = curtime;

    screen = DefaultScreen(dpy);
    root   = RootWindow(dpy, screen);

    if (follow_mouse || s->draw_mouse)
        same_screen = XQueryPointer(dpy, root, &w, &w,
                                    &pointer_x, &pointer_y, &_, &_, &_);

    if (follow_mouse && same_screen) {
        int screen_w, screen_h;

        screen_w = DisplayWidth(dpy, screen);
        screen_h = DisplayHeight(dpy, screen);
        if (follow_mouse == -1) {
            // follow the mouse, put it at center of grabbing region
            x_off += pointer_x - s->width / 2 - x_off;
            y_off += pointer_y - s->height / 2 - y_off;
        } else {
            // follow the mouse, but only move the grabbing region when mouse
            // reaches within certain pixels to the edge.
            if (pointer_x > x_off + s->width - follow_mouse)
                x_off += pointer_x - (x_off + s->width - follow_mouse);
            else if (pointer_x < x_off + follow_mouse)
                x_off -= (x_off + follow_mouse) - pointer_x;
            if (pointer_y > y_off + s->height - follow_mouse)
                y_off += pointer_y - (y_off + s->height - follow_mouse);
            else if (pointer_y < y_off + follow_mouse)
                y_off -= (y_off + follow_mouse) - pointer_y;
        }
        // adjust grabbing region position if it goes out of screen.
        s->x_off = x_off = FFMIN(FFMAX(x_off, 0), screen_w - s->width);
        s->y_off = y_off = FFMIN(FFMAX(y_off, 0), screen_h - s->height);

        if (s->show_region && s->region_win)
            XMoveWindow(dpy, s->region_win,
                        s->x_off - REGION_WIN_BORDER,
                        s->y_off - REGION_WIN_BORDER);
    }

    if (s->show_region && same_screen) {
        if (s->region_win) {
            XEvent evt = { .type = NoEventMask };
            // Clean up the events, and do the initial draw or redraw.
            while (XCheckMaskEvent(dpy, ExposureMask | StructureNotifyMask,
                                   &evt))
                ;
            if (evt.type)
                x11grab_draw_region_win(s);
        } else {
Ejemplo n.º 26
0
int AVFormatWriter::WriteVideoFrame(VideoFrame *frame)
{
    //AVCodecContext *c = m_videoStream->codec;

    uint8_t *planes[3];
    int len = frame->size;
    unsigned char *buf = frame->buf;
    int framesEncoded = m_framesWritten + m_bufferedVideoFrameTimes.size();

    planes[0] = buf;
    planes[1] = planes[0] + frame->width * frame->height;
    planes[2] = planes[1] + (frame->width * frame->height) /
        4; // (pictureFormat == PIX_FMT_YUV422P ? 2 : 4);

    m_picture->data[0] = planes[0];
    m_picture->data[1] = planes[1];
    m_picture->data[2] = planes[2];
    m_picture->linesize[0] = frame->width;
    m_picture->linesize[1] = frame->width / 2;
    m_picture->linesize[2] = frame->width / 2;
    m_picture->pts = framesEncoded + 1;
    m_picture->type = FF_BUFFER_TYPE_SHARED;

    if ((framesEncoded % m_keyFrameDist) == 0)
        m_picture->pict_type = AV_PICTURE_TYPE_I;
    else
        m_picture->pict_type = AV_PICTURE_TYPE_NONE;

    int got_pkt = 0;
    int ret = 0;

    m_bufferedVideoFrameTimes.push_back(frame->timecode);
    m_bufferedVideoFrameTypes.push_back(m_picture->pict_type);

    av_init_packet(m_pkt);
    m_pkt->data = (unsigned char *)m_videoOutBuf;
    m_pkt->size = len;

    {
        QMutexLocker locker(avcodeclock);
        ret = avcodec_encode_video2(m_videoStream->codec, m_pkt,
                                      m_picture, &got_pkt); 
    }

    if (ret < 0)
    {
        LOG(VB_RECORD, LOG_ERR, "avcodec_encode_video2() failed");
        return ret;
    }

    if (!got_pkt)
    {
        //LOG(VB_RECORD, LOG_DEBUG, QString("WriteVideoFrame(): Frame Buffered: cs: %1, mfw: %2, f->tc: %3, fn: %4, pt: %5").arg(m_pkt->size).arg(m_framesWritten).arg(frame->timecode).arg(frame->frameNumber).arg(m_picture->pict_type));
        return ret;
    }

    long long tc = frame->timecode;

    if (!m_bufferedVideoFrameTimes.isEmpty())
        tc = m_bufferedVideoFrameTimes.takeFirst();
    if (!m_bufferedVideoFrameTypes.isEmpty())
    {
        int pict_type = m_bufferedVideoFrameTypes.takeFirst();
        if (pict_type == AV_PICTURE_TYPE_I)
            m_pkt->flags |= AV_PKT_FLAG_KEY;
    }

    if (m_startingTimecodeOffset == -1)
        m_startingTimecodeOffset = tc - 1;
    tc -= m_startingTimecodeOffset;

    m_pkt->pts = tc * m_videoStream->time_base.den / m_videoStream->time_base.num / 1000;
    m_pkt->dts = AV_NOPTS_VALUE;
    m_pkt->stream_index= m_videoStream->index;

    //LOG(VB_RECORD, LOG_DEBUG, QString("WriteVideoFrame(): cs: %1, mfw: %2, pkt->pts: %3, tc: %4, fn: %5, pic->pts: %6, f->tc: %7, pt: %8").arg(m_pkt->size).arg(m_framesWritten).arg(m_pkt->pts).arg(tc).arg(frame->frameNumber).arg(m_picture->pts).arg(frame->timecode).arg(m_picture->pict_type));
    ret = av_interleaved_write_frame(m_ctx, m_pkt);
    if (ret != 0)
        LOG(VB_RECORD, LOG_ERR, LOC + "WriteVideoFrame(): "
                "av_interleaved_write_frame couldn't write Video");

    frame->timecode = tc + m_startingTimecodeOffset;
    m_framesWritten++;

    return 1;
}
Ejemplo n.º 27
0
JNIEXPORT jint Java_com_lijia_ffmpeg4android_HelloJni_getVoice(JNIEnv* env,
        jobject thiz) {  
    int flag = 0;  
    AVPacket aacpkt;  
    aacpkt.data = NULL;  
    aacpkt.size = 0;  
  
    const char *filename = "file:/sdcard/test.mp3";  
    av_init_packet(&aacpkt);  
    av_register_all(); //注册所有可解码类型  
  
    AVFormatContext *pInFmtCtx = NULL; //文件格式  
    AVCodecContext *pInCodecCtx = NULL; //编码格式  
  
    pInFmtCtx = avformat_alloc_context();  
  
    if (av_open_input_file(&pInFmtCtx, filename, NULL, 0, NULL) != 0) //获取文件格式  
        LOGE("av_open_input_file error\n");  
    if (av_find_stream_info(pInFmtCtx) < 0) //获取文件内音视频流的信息  
        LOGE("av_find_stream_info error\n");  
    unsigned int j;  
    // Find the first audio stream  
  
    int audioStream = -1;  
    // Dump information about file onto standard error  
    dump_format(pInFmtCtx, 0, filename, 0);  
  
    //从FormatdContext的中找到对应流对应的编码类型,若是CODEC_TYPE_AUDIO则表示是音频  
    for (j = 0; j < pInFmtCtx->nb_streams; j++) //找到音频对应的stream  
        if (pInFmtCtx->streams[j]->codec->codec_type == CODEC_TYPE_AUDIO) {  
            audioStream = j;  
            break;  
        }  
    if (audioStream == -1) {  
        printf("input file has no audio stream\n");  
        return 0; // Didn't find a audio stream  
  
    }  
    LOGE("audio stream num: %d\n", audioStream);  
  
    pInCodecCtx = pInFmtCtx->streams[audioStream]->codec; //音频的编码上下文  
    AVCodec *pInCodec = NULL;  
    /* FILE *file3 = fopen("codec_private_data_size.txt","w");  
     for(int i = 0; i <200000; i++)  
     {  
     pInCodec = avcodec_find_decoder((CodecID)i);  
     if (pInCodec!=NULL)  
     {  
     fprintf(file3,"%s %d\n",pInCodec->name,pInCodec->priv_data_size );  
     }  
     }  
     fclose(file3);  
     system("pause");  
     */  
    pInCodec = avcodec_find_decoder(pInCodecCtx->codec_id); //根据编码ID找到用于解码的结构体  
    if (pInCodec == NULL) {  
        printf("error no Codec found\n");  
        return -1; // Codec not found  
    }  
  
    //使用test的代替pInCodecCtx也可以完成解码,可以看出只要获取以下几个重要信息就可以实现解码和重采样  
    AVCodecContext *test = avcodec_alloc_context();  
    test->bit_rate = pInCodecCtx->bit_rate; //重采样用  
    test->sample_rate = pInCodecCtx->sample_rate; //重采样用  
    test->channels = pInCodecCtx->channels; //重采样用  
    test->extradata = pInCodecCtx->extradata; //若有则必有  
    test->extradata_size = pInCodecCtx->extradata_size; //若有则必要  
    test->codec_type = CODEC_TYPE_AUDIO; //不必要  
    test->block_align = pInCodecCtx->block_align; //必要  
  
    if (avcodec_open(test, pInCodec) < 0) //将两者结合以便在下面的解码函数中调用pInCodec中的对应解码函数  
            {  
        printf("error avcodec_open failed.\n");  
        return -1; // Could not open codec  
  
    }  
  
    if (avcodec_open(pInCodecCtx, pInCodec) < 0) {  
        printf("error avcodec_open failed.\n");  
        return -1; // Could not open codec  
  
    }  
  
    static AVPacket packet;  
  
    LOGI(" bit_rate = %d \r\n", pInCodecCtx->bit_rate);  
    LOGI(" sample_rate = %d \r\n", pInCodecCtx->sample_rate);  
    LOGI(" channels = %d \r\n", pInCodecCtx->channels);  
    LOGI(" code_name = %s \r\n", pInCodecCtx->codec->name);  
    //LOGI("extra data size: %d :data%x %x %x %x\n",pInCodecCtx->extradata_size,pInCodecCtx->extradata[0]  
    // ,pInCodecCtx->extradata[1],pInCodecCtx->extradata[2],pInCodecCtx->extradata[3]);  
    LOGI(" block_align = %d\n", pInCodecCtx->block_align);  
    /********************************************************/  
    unsigned char *outbuf = NULL;  
    outbuf = (unsigned char *) malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE * 2);  
    int outsize;  
    outsize = AVCODEC_MAX_AUDIO_FRAME_SIZE * 2;  
    int len = 0;  
    int size = 0;  
  
    FILE *out_fp;  
  
    out_fp = fopen("out.dat", "wb+");  
    while (av_read_frame(pInFmtCtx, &aacpkt) >= 0) {  
        LOGI("***************************");  
  
        if (aacpkt.stream_index == audioStream) {  
            int declen = 0;  
            size = aacpkt.size;  
            while (aacpkt.size > 0) {  
                outsize = AVCODEC_MAX_AUDIO_FRAME_SIZE * 2;  
                memset(outbuf, 0, outsize);  
                len = avcodec_decode_audio3(pInCodecCtx, (int16_t *) outbuf,  
                        &outsize, &aacpkt);  
                if (len < 0) {  
                    printf("avcodec_decode_audio3 failed!\n");  
                    break;  
                }  
                if (outsize > 0) {  
                    fwrite(outbuf, 1, outsize, out_fp);  
                    printf("write %d bytes\n", outsize);  
                }  
                declen += len;  
                if (declen == size) {  
                    av_free_packet(&aacpkt);  
                    printf("packet decoded succeed!\n");  
                    break;  
                } else if (declen < size) {  
                    aacpkt.size -= len;  
                    aacpkt.data += len;  
                } else {  
                    printf("decode error!\n");  
                    break;  
                }  
  
            }  
  
        }  
  
    }  
  
    fclose(out_fp);  
    // Close the codec  
    avcodec_close(pInCodecCtx);  
  
    // Close the video file  
    av_close_input_file(pInFmtCtx);  
  
    return flag;  
  
}
Ejemplo n.º 28
0
int AVFormatWriter::WriteAudioFrame(unsigned char *buf, int fnum, long long &timecode)
{
#if HAVE_BIGENDIAN
    bswap_16_buf((short int*) buf, m_audioFrameSize, m_audioChannels);
#endif

    int got_packet = 0;
    int ret = 0;

    av_init_packet(m_audPkt);

    if (m_audioStream->codec->sample_fmt == AV_SAMPLE_FMT_FLT)
    {
        AudioOutputUtil::toFloat(FORMAT_S16, (void *)m_audioFltBuf, (void *)buf,
                                 m_audioFrameSize * 2 * m_audioChannels);
        m_audPicture->data[0] = (unsigned char *)m_audioFltBuf;
    }
    else
    {
        m_audPicture->data[0] = buf;
    }

    m_audPkt->data = m_audioOutBuf;
    m_audPkt->size = m_audioOutBufSize;

    m_audPicture->linesize[0] = m_audioFrameSize;
    m_audPicture->nb_samples = m_audioFrameSize;
    m_audPicture->format = m_audioStream->codec->sample_fmt;
    m_audPicture->extended_data = m_audPicture->data;

    m_bufferedAudioFrameTimes.push_back(timecode);

    {
        QMutexLocker locker(avcodeclock);
        ret = avcodec_encode_audio2(m_audioStream->codec, m_audPkt,
                                      m_audPicture, &got_packet);
    }

    if (ret < 0)
    {
        LOG(VB_RECORD, LOG_ERR, "avcodec_encode_audio2() failed");
        return ret;
    }

    if (!got_packet)
    {
        //LOG(VB_RECORD, LOG_ERR, QString("WriteAudioFrame(): Frame Buffered: cs: %1, mfw: %2, f->tc: %3, fn: %4").arg(m_audPkt->size).arg(m_framesWritten).arg(timecode).arg(fnum));
        return ret;
    }

    long long tc = timecode;

    if (m_bufferedAudioFrameTimes.size())
        tc = m_bufferedAudioFrameTimes.takeFirst();

    if (m_startingTimecodeOffset == -1)
        m_startingTimecodeOffset = tc - 1;
    tc -= m_startingTimecodeOffset;

    if (m_avVideoCodec)
        m_audPkt->pts = tc * m_videoStream->time_base.den / m_videoStream->time_base.num / 1000;
    else
        m_audPkt->pts = tc * m_audioStream->time_base.den / m_audioStream->time_base.num / 1000;

    m_audPkt->dts = AV_NOPTS_VALUE;
    m_audPkt->flags |= AV_PKT_FLAG_KEY;
    m_audPkt->stream_index = m_audioStream->index;

    //LOG(VB_RECORD, LOG_ERR, QString("WriteAudioFrame(): cs: %1, mfw: %2, pkt->pts: %3, tc: %4, fn: %5, f->tc: %6").arg(m_audPkt->size).arg(m_framesWritten).arg(m_audPkt->pts).arg(tc).arg(fnum).arg(timecode));

    ret = av_interleaved_write_frame(m_ctx, m_audPkt);
    if (ret != 0)
        LOG(VB_RECORD, LOG_ERR, LOC + "WriteAudioFrame(): "
                "av_interleaved_write_frame couldn't write Audio");
    timecode = tc + m_startingTimecodeOffset;

    return 1;
}
Ejemplo n.º 29
0
bool MediaEngine::stepVideo(int videoPixelMode, bool skipFrame) {
#ifdef USE_FFMPEG
	auto codecIter = m_pCodecCtxs.find(m_videoStream);
	AVCodecContext *m_pCodecCtx = codecIter == m_pCodecCtxs.end() ? 0 : codecIter->second;

	if (!m_pFormatCtx)
		return false;
	if (!m_pCodecCtx)
		return false;
	if ((!m_pFrame)||(!m_pFrameRGB))
		return false;

	updateSwsFormat(videoPixelMode);
	// TODO: Technically we could set this to frameWidth instead of m_desWidth for better perf.
	// Update the linesize for the new format too.  We started with the largest size, so it should fit.
	m_pFrameRGB->linesize[0] = getPixelFormatBytes(videoPixelMode) * m_desWidth;

	AVPacket packet;
	av_init_packet(&packet);
	int frameFinished;
	bool bGetFrame = false;
	while (!bGetFrame) {
		bool dataEnd = av_read_frame(m_pFormatCtx, &packet) < 0;
		// Even if we've read all frames, some may have been re-ordered frames at the end.
		// Still need to decode those, so keep calling avcodec_decode_video2().
		if (dataEnd || packet.stream_index == m_videoStream) {
			// avcodec_decode_video2() gives us the re-ordered frames with a NULL packet.
			if (dataEnd)
				av_free_packet(&packet);

			int result = avcodec_decode_video2(m_pCodecCtx, m_pFrame, &frameFinished, &packet);
			if (frameFinished) {
				if (!skipFrame) {
					sws_scale(m_sws_ctx, m_pFrame->data, m_pFrame->linesize, 0,
						m_pCodecCtx->height, m_pFrameRGB->data, m_pFrameRGB->linesize);
				}

				if (av_frame_get_best_effort_timestamp(m_pFrame) != AV_NOPTS_VALUE)
					m_videopts = av_frame_get_best_effort_timestamp(m_pFrame) + av_frame_get_pkt_duration(m_pFrame) - m_firstTimeStamp;
				else
					m_videopts += av_frame_get_pkt_duration(m_pFrame);
				bGetFrame = true;
			}
			if (result <= 0 && dataEnd) {
				// Sometimes, m_readSize is less than m_streamSize at the end, but not by much.
				// This is kinda a hack, but the ringbuffer would have to be prematurely empty too.
				m_isVideoEnd = !bGetFrame && (m_pdata->getQueueSize() == 0);
				if (m_isVideoEnd)
					m_decodingsize = 0;
				break;
			}
		}
		av_free_packet(&packet);
	}
	return bGetFrame;
#else
	// If video engine is not available, just add to the timestamp at least.
	m_videopts += 3003;
	return true;
#endif // USE_FFMPEG
}
Ejemplo n.º 30
0
ARCODECS_Manager_FFMPEGDecoder_t *ARCODECS_Manager_NewFFMPEGDecoder (eARCODECS_ERROR *error)
{
    /* -- Create a new FFMPEG decoder -- */
    ARCODECS_Manager_FFMPEGDecoder_t *ffmpegDecoder = NULL;
    eARCODECS_ERROR localError = ARCODECS_OK;
    
    ffmpegDecoder = calloc (1, sizeof(ARCODECS_Manager_FFMPEGDecoder_t));
    if (ffmpegDecoder == NULL)
    {
        localError = ARCODECS_ERROR_ALLOC;
    }
    /* No else: the FFMPEG Decoder is successfully allocated and initialized to "0" */
    if (localError == ARCODECS_OK)
    {
        /* register all the codecs */
        avcodec_register_all();
        
        //av_log_set_level(AV_LOG_WARNING);
        av_log_set_level(AV_LOG_QUIET);
        
        /* get the H264 decoder */
        ffmpegDecoder->codec = avcodec_find_decoder (AV_CODEC_ID_H264);
        if(ffmpegDecoder->codec == NULL)
        {
            localError = ARCODECS_ERROR_MANAGER_UNSUPPORTED_CODEC;
        }
        /* No else: the codec is successfully found */
    }
    /* No else: skipped by an error */
    if(localError == ARCODECS_OK)
    {
        ffmpegDecoder->codecCtx = avcodec_alloc_context3(ffmpegDecoder->codec);
        if(ffmpegDecoder->codecCtx == NULL)
        {
            localError = ARCODECS_ERROR_ALLOC;
        }
        /* No else : the ffmpeg context is successfully allocated */
    }
    /* No else: skipped by an error */
    if(localError == ARCODECS_OK)
    {
        /* initialize the codec context */
        ffmpegDecoder->codecCtx->pix_fmt = PIX_FMT_YUV420P;
        //ffmpegDecoder->codecCtx->pix_fmt = PIX_FMT_BGR24;
        ffmpegDecoder->codecCtx->skip_frame = AVDISCARD_DEFAULT;
        ffmpegDecoder->codecCtx->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK;
        ffmpegDecoder->codecCtx->skip_loop_filter = AVDISCARD_DEFAULT;
        ffmpegDecoder->codecCtx->workaround_bugs = FF_BUG_AUTODETECT;
        ffmpegDecoder->codecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
        ffmpegDecoder->codecCtx->codec_id = AV_CODEC_ID_H264;
        ffmpegDecoder->codecCtx->skip_idct = AVDISCARD_DEFAULT;
        
        if (avcodec_open2(ffmpegDecoder->codecCtx, ffmpegDecoder->codec, NULL) < 0)
        {
            localError = ARCODECS_ERROR_MANAGER_CODEC_OPENING;
        }
        /* No else: the codec is not open ; localError is set to ARCODECS_ERROR_MANAGER_CODEC_OPENING ; stops the processing */
    }
    /* No else: skipped by an error */
    if(localError == ARCODECS_OK)
    {
        ffmpegDecoder->decodedFrame = avcodec_alloc_frame();
        if (ffmpegDecoder->decodedFrame == NULL)
        {
            localError = ARCODECS_ERROR_ALLOC;
        }
        /* No else: the decodedFrame is not allocate ; localError is set to ARCODECS_ERROR_ALLOC ; stops the processing */
    }
    /* No else: skipped by an error */
    
    if(localError == ARCODECS_OK)
    {
        av_init_packet(&ffmpegDecoder->avpkt);
    }
    /* No else: skipped by an error */
    
    /* return error*/
    if(error != NULL)
    {
        *error = localError;
    }
    /* No else: the error is not returned */
    
    return ffmpegDecoder;
}