Beispiel #1
0
static demuxer_t* demux_open_lavf(demuxer_t *demuxer){
    AVDictionary *opts = NULL;
    AVFormatContext *avfc;
    AVDictionaryEntry *t = NULL;
    lavf_priv_t *priv= demuxer->priv;
    int i;
    char mp_filename[256]="mp:";

    stream_seek(demuxer->stream, 0);

    avfc = avformat_alloc_context();

    if (opt_cryptokey)
        parse_cryptokey(avfc, opt_cryptokey);
    if (user_correct_pts != 0)
        avfc->flags |= AVFMT_FLAG_GENPTS;
    if (index_mode == 0)
        avfc->flags |= AVFMT_FLAG_IGNIDX;

    if(opt_probesize) {
        if (av_opt_set_int(avfc, "probesize", opt_probesize, 0) < 0)
            mp_msg(MSGT_HEADER,MSGL_ERR, "demux_lavf, couldn't set option probesize to %u\n", opt_probesize);
    }
    if(opt_analyzeduration) {
        if (av_opt_set_int(avfc, "analyzeduration", opt_analyzeduration * AV_TIME_BASE, 0) < 0)
            mp_msg(MSGT_HEADER,MSGL_ERR, "demux_lavf, couldn't set option analyzeduration to %u\n", opt_analyzeduration);
    }

    if (rtsp_transport_http || rtsp_transport_tcp)
       av_dict_set(&opts, "rtsp_transport", rtsp_transport_http ? "http" : "tcp", 0);

    if(opt_avopt){
        if(av_dict_parse_string(&opts, opt_avopt, "=", ",", 0) < 0){
            mp_msg(MSGT_HEADER,MSGL_ERR, "Your options /%s/ look like gibberish to me pal\n", opt_avopt);
            return NULL;
        }
    }

    if(demuxer->stream->url) {
        if (!strncmp(demuxer->stream->url, "ffmpeg://dummy://", 17))
            av_strlcpy(mp_filename, demuxer->stream->url + 17, sizeof(mp_filename));
        else if (!strncmp(demuxer->stream->url, "ffmpeg://", 9))
            av_strlcpy(mp_filename, demuxer->stream->url + 9, sizeof(mp_filename));
        else if (!strncmp(demuxer->stream->url, "rtsp://", 7))
            av_strlcpy(mp_filename, demuxer->stream->url, sizeof(mp_filename));
        else
            av_strlcat(mp_filename, demuxer->stream->url, sizeof(mp_filename));
    } else
        av_strlcat(mp_filename, "foobar.dummy", sizeof(mp_filename));

    if (!(priv->avif->flags & AVFMT_NOFILE)) {
        uint8_t *buffer = av_mallocz(BIO_BUFFER_SIZE);
        priv->pb = avio_alloc_context(buffer, BIO_BUFFER_SIZE, 0,
                                      demuxer, mp_read, NULL, mp_seek);
        priv->pb->read_seek = mp_read_seek;
        if (!demuxer->stream->end_pos || (demuxer->stream->flags & MP_STREAM_SEEK) != MP_STREAM_SEEK)
            priv->pb->seekable = 0;
        avfc->pb = priv->pb;
    }

    if(avformat_open_input(&avfc, mp_filename, priv->avif, &opts)<0){
        mp_msg(MSGT_HEADER,MSGL_ERR,"LAVF_header: av_open_input_stream() failed\n");
        return NULL;
    }
    if (av_dict_count(opts)) {
        AVDictionaryEntry *e = NULL;
        int invalid = 0;
        while ((e = av_dict_get(opts, "", e, AV_DICT_IGNORE_SUFFIX))) {
            if (strcmp(e->key, "rtsp_transport")) {
                invalid++;
                mp_msg(MSGT_HEADER,MSGL_ERR,"Unknown option %s\n", e->key);
            }
        }
        if (invalid)
            return 0;
    }
    av_dict_free(&opts);

    priv->avfc= avfc;

	is_matroska_format = 0;

    if(avformat_find_stream_info(avfc, NULL) < 0){
        mp_msg(MSGT_HEADER,MSGL_ERR,"LAVF_header: av_find_stream_info() failed\n");
    }

    /* Add metadata. */
    while((t = av_dict_get(avfc->metadata, "", t, AV_DICT_IGNORE_SUFFIX)))
        demux_info_add(demuxer, t->key, t->value);

	if(!strcmp("matroska,webm", priv->avif->name)) {
		is_matroska_format = 1;
	}

    for(i=0; i < avfc->nb_chapters; i++) {
        AVChapter *c = avfc->chapters[i];
        uint64_t start = av_rescale_q(c->start, c->time_base, (AVRational){1,1000});
        uint64_t end   = av_rescale_q(c->end, c->time_base, (AVRational){1,1000});
        t = av_dict_get(c->metadata, "title", NULL, 0);
        demuxer_add_chapter(demuxer, t ? t->value : NULL, start, end);
    }

    for(i=0; i<avfc->nb_streams; i++)
        handle_stream(demuxer, avfc, i);
    priv->nb_streams_last = avfc->nb_streams;

    if(avfc->nb_programs) {
        int p;
        for (p = 0; p < avfc->nb_programs; p++) {
            AVProgram *program = avfc->programs[p];
            t = av_dict_get(program->metadata, "title", NULL, 0);
            mp_msg(MSGT_HEADER,MSGL_INFO,"LAVF: Program %d %s\n", program->id, t ? t->value : "");
            mp_msg(MSGT_IDENTIFY, MSGL_V, "PROGRAM_ID=%d\n", program->id);
        }
    }

    mp_msg(MSGT_HEADER,MSGL_V,"LAVF: %d audio and %d video streams found\n",priv->audio_streams,priv->video_streams);
    mp_msg(MSGT_HEADER,MSGL_V,"LAVF: build %d\n", LIBAVFORMAT_BUILD);
    if(!priv->audio_streams) demuxer->audio->id=-2;  // nosound
//    else if(best_audio > 0 && demuxer->audio->id == -1) demuxer->audio->id=best_audio;
    if(!priv->video_streams){
        if(!priv->audio_streams){
	    mp_msg(MSGT_HEADER,MSGL_ERR,"LAVF: no audio or video headers found - broken file?\n");
            if (!priv->sub_streams)
            return NULL;
        }
        demuxer->video->id=-2; // audio-only
    } //else if (best_video > 0 && demuxer->video->id == -1) demuxer->video->id = best_video;

    return demuxer;
}
Beispiel #2
0
Result SoundSourceFFmpeg::tryOpen(const AudioSourceConfig& /*audioSrcCfg*/) {
    unsigned int i;
    AVDictionary *l_iFormatOpts = NULL;

    const QString localFileName(getLocalFileName());
    qDebug() << "New SoundSourceFFmpeg :" << localFileName;

    DEBUG_ASSERT(!m_pFormatCtx);
    m_pFormatCtx = avformat_alloc_context();

    if (m_pFormatCtx == NULL) {
        qDebug() << "SoundSourceFFmpeg::tryOpen: Can't allocate memory";
        return ERR;
    }

    // TODO() why is this required, should't it be a runtime check
#if LIBAVCODEC_VERSION_INT < 3622144 // 55.69.0
    m_pFormatCtx->max_analyze_duration = 999999999;
#endif

    // libav replaces open() with ff_win32_open() which accepts a
    // Utf8 path
    // see: avformat/os_support.h
    // The old method defining an URL_PROTOCOL is deprecated
#if defined(_WIN32) && !defined(__MINGW32CE__)
    const QByteArray qBAFilename(
            avformat_version() >= ((52<<16)+(0<<8)+0) ?
            getLocalFileName().toUtf8() :
            getLocalFileName().toLocal8Bit());
#else
    const QByteArray qBAFilename(getLocalFileName().toLocal8Bit());
#endif

    // Open file and make m_pFormatCtx
    if (avformat_open_input(&m_pFormatCtx, qBAFilename.constData(), NULL,
                            &l_iFormatOpts) != 0) {
        qDebug() << "SoundSourceFFmpeg::tryOpen: cannot open" << localFileName;
        return ERR;
    }

    // TODO() why is this required, should't it be a runtime check
#if LIBAVCODEC_VERSION_INT > 3544932 // 54.23.100
    av_dict_free(&l_iFormatOpts);
#endif

    // Retrieve stream information
    if (avformat_find_stream_info(m_pFormatCtx, NULL) < 0) {
        qDebug() << "SoundSourceFFmpeg::tryOpen: cannot open" << localFileName;
        return ERR;
    }

    //debug only (Enable if needed)
    //av_dump_format(m_pFormatCtx, 0, qBAFilename.constData(), false);

    // Find the first audio stream
    m_iAudioStream = -1;

    for (i = 0; i < m_pFormatCtx->nb_streams; i++)
        if (m_pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
            m_iAudioStream = i;
            break;
        }

    if (m_iAudioStream == -1) {
        qDebug() <<
                 "SoundSourceFFmpeg::tryOpen: cannot find an audio stream: cannot open"
                 << localFileName;
        return ERR;
    }

    // Get a pointer to the codec context for the audio stream
    m_pCodecCtx = m_pFormatCtx->streams[m_iAudioStream]->codec;

    // Find the decoder for the audio stream
    if (!(m_pCodec = avcodec_find_decoder(m_pCodecCtx->codec_id))) {
        qDebug() << "SoundSourceFFmpeg::tryOpen: cannot find a decoder for" <<
                localFileName;
        return ERR;
    }

    if (avcodec_open2(m_pCodecCtx, m_pCodec, NULL)<0) {
        qDebug() << "SoundSourceFFmpeg::tryOpen: cannot open" << localFileName;
        return ERR;
    }

    m_pResample = new EncoderFfmpegResample(m_pCodecCtx);
    m_pResample->openMixxx(m_pCodecCtx->sample_fmt, AV_SAMPLE_FMT_FLT);

    setChannelCount(m_pCodecCtx->channels);
    setFrameRate(m_pCodecCtx->sample_rate);
    setFrameCount((qint64)round((double)((double)m_pFormatCtx->duration *
                                         (double)m_pCodecCtx->sample_rate) / (double)AV_TIME_BASE));

    qDebug() << "SoundSourceFFmpeg::tryOpen: Samplerate: " << getFrameRate() <<
             ", Channels: " <<
             getChannelCount() << "\n";
    if (getChannelCount() > 2) {
        qDebug() << "ffmpeg: No support for more than 2 channels!";
        return ERR;
    }

    return OK;
}
Beispiel #3
0
static gpointer mpegts_demuxer_process_input(gpointer data)
{
    MpegTSDemuxer *demuxer = MPEGTS_DEMUXER(data);
    ParseAction action = PA_INIT;

#ifdef DEBUG_OUTPUT
    g_print("MpegTS: Entered process_input\n");
#endif

    while (demuxer->is_reading)
    {
        switch(action)
        {
        case PA_INIT:
            {
#ifdef DEBUG_OUTPUT
                g_print("MpegTS: action = PA_INIT\n");
#endif

                guchar      *io_buffer = (guchar*)av_malloc(BUFFER_SIZE);
                if (!io_buffer)
                {
                    post_error(demuxer, "LibAV input buffer alloc error", 0, GST_STREAM_ERROR_DEMUX);
                    return NULL;
                }

                AVIOContext *io_context = avio_alloc_context(io_buffer,            // buffer
                                                             BUFFER_SIZE,          // buffer size
                                                             0,                    // read only
                                                             demuxer,              // opaque reference
                                                             mpegts_demuxer_read_packet, // read callback
                                                             NULL,                 // write callback
                                                             mpegts_demuxer_seek); // seek callback

                if (!io_context)
                {
                    post_error(demuxer, "LibAV context alloc error", 0, GST_STREAM_ERROR_DEMUX);
                    return NULL;
                }

                demuxer->context = avformat_alloc_context();
                demuxer->context->pb = io_context;

                demuxer->adapter_limit_type = UNLIMITED;
                demuxer->adapter_limit_size = ADAPTER_LIMIT;

                AVInputFormat* iformat = av_find_input_format("mpegts");

                action = get_init_action(demuxer, avformat_open_input(&demuxer->context, "", iformat, NULL));

                if (action != PA_READ_FRAME)
                    break;

                action = get_init_action(demuxer, avformat_find_stream_info(demuxer->context, NULL));

                g_mutex_lock(&demuxer->lock);
                gint available = gst_adapter_available(demuxer->sink_adapter);
                demuxer->adapter_limit_type = LIMITED;
                gst_adapter_flush(demuxer->sink_adapter, available > demuxer->offset ? demuxer->offset : available);
                demuxer->flush_adapter = TRUE;
                demuxer->offset = 0;
                g_cond_signal(&demuxer->del_cond);
                g_mutex_unlock(&demuxer->lock);

                mpegts_demuxer_check_streams(demuxer);
            }
            break;

        case PA_READ_FRAME:
            //            g_print("action = PA_READ_FRAME, is_eos=%s, is_flushing=%s\n", BV(demuxer->is_eos), BV(demuxer->is_flushing));
            action = mpegts_demuxer_read_frame(demuxer);
            break;

        case PA_STOP:
#ifdef DEBUG_OUTPUT
            g_print("MpegTS: action = PA_STOP\n");
#endif
            demuxer->is_reading = FALSE;

            if (demuxer->context)
            {
                av_free(demuxer->context->pb->buffer);
                av_free(demuxer->context->pb);
                avformat_free_context(demuxer->context);
                demuxer->context = NULL;
            }
            break;

        default:
            break;
        }
    }

#ifdef DEBUG_OUTPUT
    g_print("MpegTS: Exiting process_input\n");
#endif

    return NULL;
}
Beispiel #4
0
/*
* Return buffer with frame in ppm format
*/
int CBaseOperation::GetFrame(PCHAR inputFilename, char *buffer, int bufferSize, int width, int height, int64_t timestamp){
	int error = 0;
	if (timestamp < 0){
		error = 1;
		return error;
	}

	av_register_all();
	clock_t startClock = clock();
	AVFormatContext *fmtCtx = avformat_alloc_context();
	AVCodecContext* codecCtx{ nullptr };
	SwsContext *imgconvertCtx{ nullptr };
	AVFrame *frame = av_frame_alloc();
	AVFrame *frame_rgb = av_frame_alloc();
	int gotFrame = 0;
	uint8_t* avpicBuffer{ nullptr };
	if (avformat_open_input(&fmtCtx, inputFilename, nullptr, nullptr) != 0){
		error = 2;
		goto cleanUp;
	}

	if (avformat_find_stream_info(fmtCtx, nullptr) < 0){
		error = 2;
		goto cleanUp;
	}

	auto videoStreamIndex = av_find_best_stream(fmtCtx, AVMEDIA_TYPE_VIDEO, -1, -1, nullptr, 0);
	if (videoStreamIndex < 0){
		error = 2;
		goto cleanUp;
	}

	codecCtx = fmtCtx->streams[videoStreamIndex]->codec;
	if (avcodec_open2(codecCtx, avcodec_find_decoder(codecCtx->codec_id), nullptr) < 0)
	{
		error = 2;
		goto cleanUp;
	}
	AVPacket packet;
	av_init_packet(&packet);
	timestamp = (fmtCtx->start_time + timestamp * 1000) * av_q2d(fmtCtx->streams[videoStreamIndex]->time_base);
	auto numBytes = avpicture_get_size(AV_PIX_FMT_RGB32, codecCtx->width, codecCtx->height);
	avpicBuffer = static_cast<uint8_t *>(av_malloc(numBytes*sizeof(uint8_t)));
	imgconvertCtx = sws_getContext(codecCtx->width, codecCtx->height, codecCtx->pix_fmt, width, height, AV_PIX_FMT_RGB32, SWS_BICUBIC, NULL, NULL, NULL);
	avpicture_fill(reinterpret_cast<AVPicture *>(frame_rgb), avpicBuffer, AV_PIX_FMT_RGB32, width, height);

	clock_t endClock = clock() - startClock;
	_RPT1(0, "OpenTime: %i\n", endClock);
	startClock = clock();
	if (0 > av_seek_frame(fmtCtx, videoStreamIndex, timestamp, AVSEEK_FLAG_FRAME))
	{
		error = 1;
		goto cleanUp;
	}
	while (!gotFrame){
		av_read_frame(fmtCtx, &packet);
		if (packet.stream_index == videoStreamIndex)
			if (0 > avcodec_decode_video2(codecCtx, frame, &gotFrame, &packet)){
				error = 2;
				av_packet_unref(&packet);
				av_free_packet(&packet);
				goto cleanUp;
			}
		av_packet_unref(&packet);
	}
	endClock = clock() - startClock;
	_RPT1(0, "DecodeTime: %i\n", endClock);
	if (gotFrame){
		sws_scale(imgconvertCtx, static_cast<const uint8_t* const*>(frame->data), frame->linesize, 0, codecCtx->height, frame_rgb->data, frame_rgb->linesize);
		startClock = clock();
		error = saveToJpeg(frame_rgb, width, height, buffer, bufferSize);
		endClock = clock() - startClock;
		_RPT1(0, "SaveToJpgTime: %i\n", endClock);
		goto cleanUp;
	}
cleanUp:
	if (avpicBuffer)
		av_free(avpicBuffer);
	if (codecCtx)
		avcodec_close(codecCtx);
	if (imgconvertCtx)
		sws_freeContext(imgconvertCtx);
	av_frame_free(&frame);
	av_frame_free(&frame_rgb);
	if (fmtCtx){
		avformat_close_input(&fmtCtx);
		avformat_free_context(fmtCtx);
	}
	return error;
}
Beispiel #5
0
int decode_thread(void *arg) {

    VideoState *is = (VideoState *)arg;
    AVFormatContext *pFormatCtx = NULL;
    AVPacket pkt1, *packet = &pkt1;

    AVDictionary *io_dict = NULL;
    AVIOInterruptCB callback;

    int video_index = -1;
    int audio_index = -1;
    int i;

    is->videoStream = -1;
    is->audioStream = -1;
    is->audio_need_resample = 0;

    global_video_state = is;
    // will interrupt blocking functions if we quit!
    callback.callback = decode_interrupt_cb;
    callback.opaque = is;

    if(avio_open2(&is->io_context, is->filename, 0, &callback, &io_dict)) {
        fprintf(stderr, "Unable to open I/O for %s\n", is->filename);
        return -1;
    }

    // Open video file
    if(avformat_open_input(&pFormatCtx, is->filename, NULL, NULL) != 0) {
        return -1;    // Couldn't open file
    }

    is->pFormatCtx = pFormatCtx;

    // Retrieve stream information
    if(avformat_find_stream_info(pFormatCtx, NULL) < 0) {
        return -1;    // Couldn't find stream information
    }

    // Dump information about file onto standard error
    av_dump_format(pFormatCtx, 0, is->filename, 0);

    // Find the first video stream

    for(i = 0; i < pFormatCtx->nb_streams; i++) {
        if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
                video_index < 0) {
            video_index = i;
        }

        if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
                audio_index < 0) {
            audio_index = i;
        }
    }

    if(audio_index >= 0) {
        stream_component_open(is, audio_index);
    }

    if(video_index >= 0) {
        stream_component_open(is, video_index);
    }

    if(is->videoStream < 0 && is->audioStream < 0) {
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
        goto fail;
    }

#ifdef __RESAMPLER__

    if( audio_index >= 0
            && pFormatCtx->streams[audio_index]->codec->sample_fmt != AV_SAMPLE_FMT_S16) {
        is->audio_need_resample = 1;
        is->pResampledOut = NULL;
        is->pSwrCtx = NULL;

        printf("Configure resampler: ");

#ifdef __LIBAVRESAMPLE__
        printf("libAvResample\n");
        is->pSwrCtx = avresample_alloc_context();
#endif

#ifdef __LIBSWRESAMPLE__
        printf("libSwResample\n");
        is->pSwrCtx = swr_alloc();
#endif

        // Some MP3/WAV don't tell this so make assumtion that
        // They are stereo not 5.1
        if (pFormatCtx->streams[audio_index]->codec->channel_layout == 0
                && pFormatCtx->streams[audio_index]->codec->channels == 2) {
            pFormatCtx->streams[audio_index]->codec->channel_layout = AV_CH_LAYOUT_STEREO;

        } else if (pFormatCtx->streams[audio_index]->codec->channel_layout == 0
                   && pFormatCtx->streams[audio_index]->codec->channels == 1) {
            pFormatCtx->streams[audio_index]->codec->channel_layout = AV_CH_LAYOUT_MONO;

        } else if (pFormatCtx->streams[audio_index]->codec->channel_layout == 0
                   && pFormatCtx->streams[audio_index]->codec->channels == 0) {
            pFormatCtx->streams[audio_index]->codec->channel_layout = AV_CH_LAYOUT_STEREO;
            pFormatCtx->streams[audio_index]->codec->channels = 2;
        }

        av_opt_set_int(is->pSwrCtx, "in_channel_layout",
                       pFormatCtx->streams[audio_index]->codec->channel_layout, 0);
        av_opt_set_int(is->pSwrCtx, "in_sample_fmt",
                       pFormatCtx->streams[audio_index]->codec->sample_fmt, 0);
        av_opt_set_int(is->pSwrCtx, "in_sample_rate",
                       pFormatCtx->streams[audio_index]->codec->sample_rate, 0);

        av_opt_set_int(is->pSwrCtx, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0);
        av_opt_set_int(is->pSwrCtx, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
        av_opt_set_int(is->pSwrCtx, "out_sample_rate", 44100, 0);

#ifdef __LIBAVRESAMPLE__

        if (avresample_open(is->pSwrCtx) < 0) {
#else

        if (swr_init(is->pSwrCtx) < 0) {
#endif
            fprintf(stderr, " ERROR!! From Samplert: %d Hz Sample format: %s\n",
                    pFormatCtx->streams[audio_index]->codec->sample_rate,
                    av_get_sample_fmt_name(pFormatCtx->streams[audio_index]->codec->sample_fmt));
            fprintf(stderr, "         To 44100 Sample format: s16\n");
            is->audio_need_resample = 0;
            is->pSwrCtx = NULL;;
        }

    }

#endif

    // main decode loop

    for(;;) {
        if(is->quit) {
            break;
        }

        // seek stuff goes here
        if(is->audioq.size > MAX_AUDIOQ_SIZE ||
                is->videoq.size > MAX_VIDEOQ_SIZE) {
            SDL_Delay(10);
            continue;
        }

        if(av_read_frame(is->pFormatCtx, packet) < 0) {
            if(is->pFormatCtx->pb->error == 0) {
                SDL_Delay(100); /* no error; wait for user input */
                continue;

            } else {
                break;
            }
        }

        // Is this a packet from the video stream?
        if(packet->stream_index == is->videoStream) {
            packet_queue_put(&is->videoq, packet);

        } else if(packet->stream_index == is->audioStream) {
            packet_queue_put(&is->audioq, packet);

        } else {
            av_free_packet(packet);
        }
    }

    /* all done - wait for it */
    while(!is->quit) {
        SDL_Delay(100);
    }

fail: {
        SDL_Event event;
        event.type = FF_QUIT_EVENT;
        event.user.data1 = is;
        SDL_PushEvent(&event);
    }
    return 0;
}

const char g_szClassName[] = "myWindowClass";

// Step 4: the Window Procedure
LRESULT CALLBACK WndProc(HWND hwnd, UINT msg, WPARAM wParam, LPARAM lParam)
{
    switch(msg)
    {
    case WM_CLOSE:
        DestroyWindow(hwnd);
        break;
    case WM_DESTROY:
        PostQuitMessage(0);
        break;
    default:
        return DefWindowProc(hwnd, msg, wParam, lParam);
    }
    return 0;
}

sem_t mutex;
HWND hwnd;

static void * window_manager(void * data) {
    printf("Window manager has been started!\n");
    WNDCLASSEX wc;

    MSG Msg;

    //Step 1: Registering the Window Class
    wc.cbSize        = sizeof(WNDCLASSEX);
    wc.style         = 0;
    wc.lpfnWndProc   = WndProc;
    wc.cbClsExtra    = 0;
    wc.cbWndExtra    = 0;
    //wc.hInstance     = hInstance;
    wc.hIcon         = LoadIcon(NULL, IDI_APPLICATION);
    wc.hCursor       = LoadCursor(NULL, IDC_ARROW);
    wc.hbrBackground = (HBRUSH)(COLOR_WINDOW+1);
    wc.lpszMenuName  = NULL;
    wc.lpszClassName = g_szClassName;
    wc.hIconSm       = LoadIcon(NULL, IDI_APPLICATION);

    if(!RegisterClassEx(&wc))
    {
        MessageBox(NULL, "Window Registration Failed!", "Error!",
                   MB_ICONEXCLAMATION | MB_OK);
        return 0;
    }

    // Step 2: Creating the Window
    hwnd = CreateWindowEx(
               WS_EX_CLIENTEDGE,
               g_szClassName,
               "The title of my window",
               WS_OVERLAPPEDWINDOW,
               CW_USEDEFAULT, CW_USEDEFAULT, 240, 120,
               NULL, NULL, NULL, NULL);
    sem_post(&mutex);
    if(hwnd == NULL)
    {
        MessageBox(NULL, "Window Creation Failed!", "Error!",
                   MB_ICONEXCLAMATION | MB_OK);
        return 0;
    } else {
        printf("window hwnd = %d\n", hwnd);
    }

    ShowWindow(hwnd, TRUE);
    UpdateWindow(hwnd);

    // Step 3: The Message Loop
    while(GetMessage(&Msg, NULL, 0, 0) > 0)
    {
        TranslateMessage(&Msg);
        DispatchMessage(&Msg);
    }

    return Msg.wParam;
}
Beispiel #6
0
bool FeMedia::internal_open( sf::Texture *outt )
{
	if ( avformat_find_stream_info( m_format_ctx, NULL ) < 0 )
	{
		std::cerr << "Error finding stream information in input file: "
					<< m_format_ctx->filename << std::endl;
		return false;
	}

	if ( m_type & Audio )
	{
		int stream_id( -1 );
		AVCodec *dec;
		stream_id = av_find_best_stream( m_format_ctx, AVMEDIA_TYPE_AUDIO,
											-1, -1, &dec, 0 );

		if ( stream_id >= 0 )
		{
			m_format_ctx->streams[stream_id]->codec->request_sample_fmt = AV_SAMPLE_FMT_S16;

			if ( avcodec_open2( m_format_ctx->streams[stream_id]->codec,
										dec, NULL ) < 0 )
			{
				std::cerr << "Could not open audio decoder for file: "
						<< m_format_ctx->filename << std::endl;
			}
			else
			{
				m_audio = new FeAudioImp();
				m_audio->stream_id = stream_id;
				m_audio->codec_ctx = m_format_ctx->streams[stream_id]->codec;
				m_audio->codec = dec;

				//
				// TODO: Fix buffer sizing, we allocate way
				// more than we use
				//
				m_audio->buffer = (sf::Int16 *)av_malloc(
					MAX_AUDIO_FRAME_SIZE
					+ FF_INPUT_BUFFER_PADDING_SIZE
					+ m_audio->codec_ctx->sample_rate );

				sf::SoundStream::initialize(
					m_audio->codec_ctx->channels,
					m_audio->codec_ctx->sample_rate );

				sf::SoundStream::setLoop( false );

#ifndef DO_RESAMPLE
				if ( m_audio->codec_ctx->sample_fmt != AV_SAMPLE_FMT_S16 )
				{
					std::cerr << "Warning: Attract-Mode was compiled without an audio resampler (libswresample or libavresample)." << std::endl
						<< "The audio format in " << name << " appears to need resampling.  It will likely sound like garbage." << std::endl;
				}
#endif
			}
		}
	}

	if ( m_type & Video )
	{
		int stream_id( -1 );
		AVCodec *dec;
		stream_id = av_find_best_stream( m_format_ctx, AVMEDIA_TYPE_VIDEO,
					-1, -1, &dec, 0 );

		if ( stream_id < 0 )
		{
			std::cout << "No video stream found, file: "
				<< m_format_ctx->filename << std::endl;
		}
		else
		{
			m_format_ctx->streams[stream_id]->codec->workaround_bugs = FF_BUG_AUTODETECT;

			// Note also: http://trac.ffmpeg.org/ticket/4404
			m_format_ctx->streams[stream_id]->codec->thread_count=1;

			if ( avcodec_open2( m_format_ctx->streams[stream_id]->codec,
										dec, NULL ) < 0 )
			{
				std::cerr << "Could not open video decoder for file: "
					<< m_format_ctx->filename << std::endl;
			}
			else
			{
				m_video = new FeVideoImp( this );
				m_video->stream_id = stream_id;
				m_video->codec_ctx = m_format_ctx->streams[stream_id]->codec;
				m_video->codec = dec;
				m_video->time_base = sf::seconds(
						av_q2d(m_format_ctx->streams[stream_id]->time_base) );

				m_video->display_texture = outt;
				m_video->display_texture->create( m_video->codec_ctx->width,
						m_video->codec_ctx->height );
				m_video->preload();
			}
		}
	}

	if ( (!m_video) && (!m_audio) )
		return false;

	return true;
}
Beispiel #7
0
bool MediaEngine::openContext() {
#ifdef USE_FFMPEG
	InitFFmpeg();

	if (m_pFormatCtx || !m_pdata)
		return false;
	m_mpegheaderReadPos = 0;
	m_decodingsize = 0;

	u8* tempbuf = (u8*)av_malloc(m_bufSize);

	m_pFormatCtx = avformat_alloc_context();
	m_pIOContext = avio_alloc_context(tempbuf, m_bufSize, 0, (void*)this, _MpegReadbuffer, NULL, 0);
	m_pFormatCtx->pb = m_pIOContext;

	// Open video file
	if (avformat_open_input((AVFormatContext**)&m_pFormatCtx, NULL, NULL, NULL) != 0)
		return false;

	if (avformat_find_stream_info(m_pFormatCtx, NULL) < 0)
		return false;

	if (m_videoStream >= (int)m_pFormatCtx->nb_streams) {
		WARN_LOG_REPORT(ME, "Bad video stream %d", m_videoStream);
		m_videoStream = -1;
	}

	if (m_videoStream == -1) {
		// Find the first video stream
		for(int i = 0; i < (int)m_pFormatCtx->nb_streams; i++) {
			if(m_pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
				m_videoStream = i;
				break;
			}
		}
		if(m_videoStream == -1)
			return false;
	}

	// Get a pointer to the codec context for the video stream
	m_pCodecCtx = m_pFormatCtx->streams[m_videoStream]->codec;

	// Find the decoder for the video stream
	AVCodec *pCodec = avcodec_find_decoder(m_pCodecCtx->codec_id);
	if(pCodec == NULL)
		return false;

	// Open codec
	AVDictionary *optionsDict = 0;
	if(avcodec_open2(m_pCodecCtx, pCodec, &optionsDict)<0)
		return false; // Could not open codec

	setVideoDim();
	m_audioContext = AT3Create();
	m_isVideoEnd = false;
	m_noAudioData = false;
	m_mpegheaderReadPos++;
	av_seek_frame(m_pFormatCtx, m_videoStream, 0, 0);
#endif // USE_FFMPEG
	return true;
}
Beispiel #8
0
int open_aplayer(const char *filename, struct aplayer_t *player) {
    // creates a new format (file container) context to be
    // used to detect the kind of file in use
    AVFormatContext *container = avformat_alloc_context();
    if(avformat_open_input(&container, filename, NULL, NULL) < 0) {
        WARN("Could not open file");
    }
    if(avformat_find_stream_info(container, NULL) < 0) {
        WARN("Could not find file info");
    }

#ifdef _DEBUG
    // dumps the format information to the standard outpu
    // this should print information on the container file
    av_dump_format(container, 0, filename, false);
#endif

    // starts the (audio) stream id with an invalid value and then
    // iterates over the complete set of stream to find one that complies
    // with the audio "interface"
    int stream_id = -1;
    for(unsigned int index = 0; index < container->nb_streams; index++) {
        // retrieves the current stram and checks if the
        // codec type is of type audio in case it's not
        // continues the loop (nothing to be done)
        AVStream *stream = container->streams[index];
        if(stream->codec->codec_type != AVMEDIA_TYPE_AUDIO) { continue; }

        // sets the current index as the stream identifier
        // to be used from now on
        stream_id = index;
        break;
    }
    if(stream_id == -1) { WARN("Could not find Audio Stream"); }

    // retrieves the codec context associted with the audio stream
    // that was just discovered
    AVCodecContext *codec_ctx = container->streams[stream_id]->codec;

    // tries to find the codec for the current codec context and
    // opens it for the current execution
    AVCodec *codec = avcodec_find_decoder(codec_ctx->codec_id);
    if(codec == NULL) { WARN("Cannot find codec"); }
    if(avcodec_open2(codec_ctx, codec, NULL) < 0) { WARN("Codec cannot be found"); }

    // initializes the ao structure creating the device associated
    // with the created structures this is going to be used
    ao_device *device = init_ao(codec_ctx);

    // allocates the buffer to be used in the packet for the
    // unpacking of the various packets
    uint8_t buffer[AVCODEC_MAX_AUDIO_FRAME_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];

    // creates the packet structure to be used and initializes
    // it, this is going to be the payload for each iteration
    // then sets its data and size
    av_init_packet(&player->packet);
    player->packet.data = buffer;
    player->packet.size = AVCODEC_MAX_AUDIO_FRAME_SIZE + FF_INPUT_BUFFER_PADDING_SIZE;

    // allocates a new frame structure to be used for the audio
    // frames in iteration
    AVFrame *frame = avcodec_alloc_frame();

    // updates the player structure with all the attributes that
    // were retrieved for the current context
    player->device = device;
    player->stream_id = stream_id;
    player->frame = frame;
    player->container = container;
    player->codec_ctx = codec_ctx;

    return 0;
}
int FfmpegCamera::OpenFfmpeg() {

    Debug ( 2, "OpenFfmpeg called." );

    mOpenStart = time(NULL);
    mIsOpening = true;

    // Open the input, not necessarily a file
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 4, 0)
    Debug ( 1, "Calling av_open_input_file" );
    if ( av_open_input_file( &mFormatContext, mPath.c_str(), NULL, 0, NULL ) !=0 )
#else
    Debug ( 1, "Calling avformat_open_input" );

    mFormatContext = avformat_alloc_context( );
    mFormatContext->interrupt_callback.callback = FfmpegInterruptCallback;
    mFormatContext->interrupt_callback.opaque = this;

    if ( avformat_open_input( &mFormatContext, mPath.c_str(), NULL, NULL ) !=0 )
#endif
    {
        mIsOpening = false;
        Error( "Unable to open input %s due to: %s", mPath.c_str(), strerror(errno) );
        return -1;
    }

    mIsOpening = false;
    Debug ( 1, "Opened input" );

    // Locate stream info from avformat_open_input
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 4, 0)
    Debug ( 1, "Calling av_find_stream_info" );
    if ( av_find_stream_info( mFormatContext ) < 0 )
#else
    Debug ( 1, "Calling avformat_find_stream_info" );
    if ( avformat_find_stream_info( mFormatContext, 0 ) < 0 )
#endif
        Fatal( "Unable to find stream info from %s due to: %s", mPath.c_str(), strerror(errno) );
    
    Debug ( 1, "Got stream info" );

    // Find first video stream present
    mVideoStreamId = -1;
    for (unsigned int i=0; i < mFormatContext->nb_streams; i++ )
    {
#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51,2,1)
        if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO )
#else
        if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO )
#endif
        {
            mVideoStreamId = i;
            break;
        }
    }
    if ( mVideoStreamId == -1 )
        Fatal( "Unable to locate video stream in %s", mPath.c_str() );

    Debug ( 1, "Found video stream" );

    mCodecContext = mFormatContext->streams[mVideoStreamId]->codec;

    // Try and get the codec from the codec context
    if ( (mCodec = avcodec_find_decoder( mCodecContext->codec_id )) == NULL )
        Fatal( "Can't find codec for video stream from %s", mPath.c_str() );

    Debug ( 1, "Found decoder" );

    // Open the codec
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 7, 0)
    Debug ( 1, "Calling avcodec_open" );
    if ( avcodec_open( mCodecContext, mCodec ) < 0 )
#else
    Debug ( 1, "Calling avcodec_open2" );
    if ( avcodec_open2( mCodecContext, mCodec, 0 ) < 0 )
#endif
        Fatal( "Unable to open codec for video stream from %s", mPath.c_str() );

    Debug ( 1, "Opened codec" );

    // Allocate space for the native video frame
    mRawFrame = avcodec_alloc_frame();

    // Allocate space for the converted video frame
    mFrame = avcodec_alloc_frame();
    
    if(mRawFrame == NULL || mFrame == NULL)
        Fatal( "Unable to allocate frame for %s", mPath.c_str() );

    Debug ( 1, "Allocated frames" );
    
    int pSize = avpicture_get_size( imagePixFormat, width, height );
    if( (unsigned int)pSize != imagesize) {
        Fatal("Image size mismatch. Required: %d Available: %d",pSize,imagesize);
    }

    Debug ( 1, "Validated imagesize" );
    
#if HAVE_LIBSWSCALE
    Debug ( 1, "Calling sws_isSupportedInput" );
    if(!sws_isSupportedInput(mCodecContext->pix_fmt)) {
        Fatal("swscale does not support the codec format: %c%c%c%c",(mCodecContext->pix_fmt)&0xff,((mCodecContext->pix_fmt>>8)&0xff),((mCodecContext->pix_fmt>>16)&0xff),((mCodecContext->pix_fmt>>24)&0xff));
    }
bool VideoReaderUnit::OpenStreams(StreamSet* set) {
  // Setup FFMPEG.
  if (!ffmpeg_initialized_) {
    ffmpeg_initialized_ = true;
    av_register_all();
  }

  // Open video file.
  AVFormatContext* format_context = nullptr;
  if (avformat_open_input (&format_context, video_file_.c_str(), nullptr, nullptr) != 0) {
    LOG(ERROR) << "Could not open file: " << video_file_;
    return false;
  }

  if (avformat_find_stream_info(format_context, nullptr) < 0) {
    LOG(ERROR) << video_file_ << " is not a valid movie file.";
    return false;
  }

  // Get video stream index.
  video_stream_idx_ = -1;

  for (uint i = 0; i < format_context->nb_streams; ++i) {
    if (format_context->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
      video_stream_idx_ = i;
      break;
    }
  }

  if (video_stream_idx_ < 0) {
    LOG(ERROR) << "Could not find video stream in " << video_file_;
    return false;
  }

  AVCodecContext* codec_context = format_context->streams[video_stream_idx_]->codec;
  AVCodec* codec = avcodec_find_decoder (codec_context->codec_id);

  if (!codec) {
    LOG(ERROR) << "Unsupported codec for file " << video_file_;
    return false;
  }

  if (avcodec_open2(codec_context, codec, nullptr) < 0) {
    LOG(ERROR) << "Could not open codec";
    return false;
  }

  AVStream* av_stream = format_context->streams[video_stream_idx_];
  fps_ = av_q2d(av_stream->avg_frame_rate);
  LOG(INFO) << "Frame rate: " << fps_;

  // if av_q2d wasn't able to figure out the frame rate, set it 24
  if (fps_ != fps_) {
    LOG(WARNING) << "Can't figure out frame rate - Defaulting to 24";
    fps_ = 24;
  }

  // Limit to meaningful values. Sometimes avg_frame_rate.* holds garbage.
  if (fps_ < 5) {
    LOG(WARNING) << "Capping video fps_ of " << fps_ << " to " << 5;
    fps_ = 5;
  }

  if (fps_ > 60) {
    LOG(WARNING) << "Capping video fps_ of " << fps_ << " to " << 60;
    fps_ = 60;
  }

  bytes_per_pixel_ = PixelFormatToNumChannels(options_.pixel_format);
  frame_width_ = codec_context->width;
  frame_height_ = codec_context->height;

  switch (options_.downscale) {
    case VideoReaderOptions::DOWNSCALE_NONE:
      output_width_ = frame_width_;
      output_height_ = frame_height_;
      downscale_factor_ = 1.0f;
      break;

    case VideoReaderOptions::DOWNSCALE_BY_FACTOR:
      if (options_.downscale_factor > 1.0f) {
        LOG(ERROR) << "Only downscaling is supported.";
        return false;
      }

      downscale_factor_ = options_.downscale_factor;
      output_width_ = std::ceil(frame_width_ * downscale_factor_);
      output_height_ = std::ceil(frame_height_ * downscale_factor_);
      break;

    case VideoReaderOptions::DOWNSCALE_TO_MIN_SIZE:
      downscale_factor_ = std::max(options_.downscale_size * (1.0f / frame_width_),
                                   options_.downscale_size * (1.0f / frame_height_));
      // Cap to downscaling.
      downscale_factor_ = std::min(1.0f, downscale_factor_);
      output_width_ = std::ceil(frame_width_ * downscale_factor_);
      output_height_ = std::ceil(frame_height_ * downscale_factor_);
      break;

    case VideoReaderOptions::DOWNSCALE_TO_MAX_SIZE:
      downscale_factor_ = std::min(options_.downscale_size * (1.0f / frame_width_),
                                   options_.downscale_size * (1.0f / frame_height_));
      // Cap to downscaling.
      downscale_factor_ = std::min(1.0f, downscale_factor_);
      output_width_ = std::ceil(frame_width_ * downscale_factor_);
      output_height_ = std::ceil(frame_height_ * downscale_factor_);
      break;
  }

  if (downscale_factor_ != 1.0) {
    LOG(INFO) << "Downscaling by factor " << downscale_factor_
              << " from " << frame_width_ << ", " << frame_height_
              << " to " << output_width_ << ", " << output_height_;
  }

  // Force even resolutions.
  output_width_ += output_width_ % 2;
  output_width_step_ = output_width_ * bytes_per_pixel_;

  // Pad width_step to be a multiple of 4.
  if (output_width_step_ % 4 != 0) {
    output_width_step_ += 4 - output_width_step_ % 4;
    DCHECK_EQ(output_width_step_ % 4, 0);
  }

  // Save some infos for later use.
  codec_ = codec;
  codec_context_ = codec_context;
  format_context_ = format_context;

  // Allocate temporary structures.
  frame_yuv_ = av_frame_alloc();
  frame_bgr_ = av_frame_alloc();

  if (!frame_yuv_ || !frame_bgr_) {
    LOG(ERROR) << "Could not allocate AVFrames.";
    return false;
  }

  int pix_fmt;
  switch (options_.pixel_format) {
    case PIXEL_FORMAT_RGB24:
      pix_fmt = PIX_FMT_RGB24;
      break;
    case PIXEL_FORMAT_BGR24:
      pix_fmt = PIX_FMT_BGR24;
      break;
    case PIXEL_FORMAT_ARGB32:
      pix_fmt = PIX_FMT_ARGB;
      break;
    case PIXEL_FORMAT_ABGR32:
      pix_fmt = PIX_FMT_ABGR;
      break;
    case PIXEL_FORMAT_RGBA32:
      pix_fmt = PIX_FMT_RGBA;
      break;
    case PIXEL_FORMAT_BGRA32:
      pix_fmt = PIX_FMT_BGRA;
      break;
    case PIXEL_FORMAT_YUV422:
      pix_fmt = PIX_FMT_YUYV422;
      break;
    case PIXEL_FORMAT_LUMINANCE:
      pix_fmt = PIX_FMT_GRAY8;
      break;
  }

  uint8_t* bgr_buffer = (uint8_t*)av_malloc(avpicture_get_size((::PixelFormat)pix_fmt,
                                                               output_width_,
                                                               output_height_));

  avpicture_fill((AVPicture*)frame_bgr_,
                 bgr_buffer,
                 (::PixelFormat)pix_fmt,
                 output_width_,
                 output_height_);

  // Setup SwsContext for color conversion.
  sws_context_ = sws_getContext(frame_width_,
                                frame_height_,
                                codec_context_->pix_fmt,
                                output_width_,
                                output_height_,
                                (::PixelFormat)pix_fmt,
                                SWS_BICUBIC,
                                nullptr,
                                nullptr,
                                nullptr);
  if(!sws_context_) {
    LOG(ERROR) << "Could not setup SwsContext for color conversion.";
    return false;
  }

  current_pos_ = 0;
  used_as_root_ = set->empty();
  VideoStream* vid_stream = new VideoStream(output_width_,
                                            output_height_,
                                            output_width_step_,
                                            fps_,
                                            options_.pixel_format,
                                            options_.stream_name);

  vid_stream->set_original_width(frame_width_);
  vid_stream->set_original_height(frame_height_);

  set->push_back(shared_ptr<VideoStream>(vid_stream));
  frame_num_ = 0;
  return true;
}
Beispiel #11
0
// --------------------------------------------------------------------------
// ARDrone::initVideo()
// Description  : Initialize video.
// Return value : SUCCESS: 1  FAILURE: 0
// --------------------------------------------------------------------------
int ARDrone::initVideo(void)
{
    // AR.Drone 2.0
    if (version.major == ARDRONE_VERSION_2) {
        // Open the IP address and port
        char filename[256];
        sprintf(filename, "tcp://%s:%d", ip, ARDRONE_VIDEO_PORT);
        if (avformat_open_input(&pFormatCtx, filename, NULL, NULL) < 0) {
            CVDRONE_ERROR("avformat_open_input() was failed. (%s, %d)\n", __FILE__, __LINE__);
            return 0;
        }

        // Retrive and dump stream information
        avformat_find_stream_info(pFormatCtx, NULL);
        av_dump_format(pFormatCtx, 0, filename, 0);

        // Find the decoder for the video stream
        pCodecCtx = pFormatCtx->streams[0]->codec;
        AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
        if (pCodec == NULL) {
            CVDRONE_ERROR("avcodec_find_decoder() was failed. (%s, %d)\n", __FILE__, __LINE__);
            return 0;
        }

        // Open codec
        if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
            CVDRONE_ERROR("avcodec_open2() was failed. (%s, %d)\n", __FILE__, __LINE__);
            return 0;
        }

        // Allocate video frames and a buffer
        pFrame = avcodec_alloc_frame();
        pFrameBGR = avcodec_alloc_frame();
        bufferBGR = (uint8_t*)av_mallocz(avpicture_get_size(PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height) * sizeof(uint8_t));

        // Assign appropriate parts of buffer to image planes in pFrameBGR
        avpicture_fill((AVPicture*)pFrameBGR, bufferBGR, PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height);

        // Convert it to BGR
        pConvertCtx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_BGR24, SWS_SPLINE, NULL, NULL, NULL);
    }
    // AR.Drone 1.0
    else {
        // Open the IP address and port
        if (!sockVideo.open(ip, ARDRONE_VIDEO_PORT)) {
            CVDRONE_ERROR("UDPSocket::open(port=%d) was failed. (%s, %d)\n", ARDRONE_VIDEO_PORT, __FILE__, __LINE__);
            return 0;
        }

        // Set codec
        //pCodecCtx = avcodec_alloc_context();
        pCodecCtx=avcodec_alloc_context3(NULL);
        pCodecCtx->width = 320;
        pCodecCtx->height = 240;

        // Allocate a buffer
        bufferBGR = (uint8_t*)av_mallocz(avpicture_get_size(PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height));
    }

    // Allocate an IplImage
    img = cvCreateImage(cvSize(pCodecCtx->width, (pCodecCtx->height == 368) ? 360 : pCodecCtx->height), IPL_DEPTH_8U, 3);
    if (!img) {
        CVDRONE_ERROR("cvCreateImage() was failed. (%s, %d)\n", __FILE__, __LINE__);
        return 0;
    }

    // Clear the image
    cvZero(img);

    // Create a mutex
    mutexVideo = new pthread_mutex_t;
    pthread_mutex_init(mutexVideo, NULL);

    // Create a thread
    threadVideo = new pthread_t;
    if (pthread_create(threadVideo, NULL, runVideo, this) != 0) {
        CVDRONE_ERROR("pthread_create() was failed. (%s, %d)\n", __FILE__, __LINE__);
        return 0;
    }

    return 1;
}
Beispiel #12
0
int main(int argc, char** argv)
{
	AVFormatContext* avfmt_ctx = NULL;
	int video_stream;
	enum AVCodecID video_codec;

	if (argc < 2)
	{
		fprintf(stderr, "Usage: %s filename\n", argv[0]);
		exit(EXIT_FAILURE);
	}

	char *filename = argv[1];

	av_register_all();

	if (avformat_open_input(&avfmt_ctx, filename, NULL, NULL) < 0)
	{
		fprintf(stderr, "Could not open source file %s\n", filename);
		exit(1);
	}

	if (avformat_find_stream_info(avfmt_ctx, NULL) < 0)
	{
		fprintf(stderr, "Could not find stream information\n");
		avformat_close_input(&avfmt_ctx);
		exit(1);
	}

	video_stream = av_find_best_stream(avfmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
	if (video_stream < 0)
	{
		fprintf(stderr, "Could not find video stream in input file\n");
		avformat_close_input(&avfmt_ctx);
		exit(1);
	}

	video_codec = avfmt_ctx->streams[video_stream]->codec->codec_id;

	if (video_codec != AV_CODEC_ID_MPEG1VIDEO && video_codec != AV_CODEC_ID_MPEG2VIDEO)
	{
		fprintf(stderr, "Can't handle codec %s\n", avcodec_get_name(video_codec));
		avformat_close_input(&avfmt_ctx);
		exit(1);
	}

	AVPacket pkt;
	av_init_packet(&pkt);
	pkt.data = NULL;
	pkt.size = 0;

	struct mpeg_t mpeg;
	memset(&mpeg, 0, sizeof(mpeg));
	if (video_codec == AV_CODEC_ID_MPEG1VIDEO)
		mpeg.type = MPEG1;
	else
		mpeg.type = MPEG2;

	if (!ve_open())
		err(EXIT_FAILURE, "Can't open VE");

	struct frame_buffers_t frame_buffers = { NULL, NULL, NULL };

	unsigned int disp_frame = 0, gop_offset = 0, gop_frames = 0, last_gop = 0;
	struct frame_t *frames[RING_BUFFER_SIZE];
	memset(frames, 0, sizeof(frames));

	// activate MPEG engine
	writel(ve_get_regs() + 0x00, 0x00130000);

	printf("Playing now... press Enter for next frame!\n");

	while (av_read_frame(avfmt_ctx, &pkt) >= 0)
	{
		mpeg.data = pkt.data;
		mpeg.len = pkt.size;
		mpeg.pos = 0;

		if (pkt.stream_index == video_stream && parse_mpeg(&mpeg))
		{
			// create output buffer
			frame_buffers.output = frame_new(mpeg.width, mpeg.height, COLOR_YUV420);
			if (!frame_buffers.backward)
				frame_buffers.backward = frame_ref(frame_buffers.output);
			if (!frame_buffers.forward)
				frame_buffers.forward = frame_ref(frame_buffers.output);

			// decode frame
			decode_mpeg(&frame_buffers, &mpeg);

			// simple frame reordering (not safe, only for testing)
			// count frames
			if (mpeg.gop > last_gop)
			{
				last_gop = mpeg.gop;
				gop_offset += gop_frames;
				gop_frames = 0;
			}
			gop_frames++;

			// save frame in ringbuffer
			if (frames[(gop_offset + mpeg.temporal_reference) % RING_BUFFER_SIZE] != NULL)
			{
				printf("Buffer overrun!\n");
				frame_unref(frames[(gop_offset + mpeg.temporal_reference) % RING_BUFFER_SIZE]);
			}
			frames[(gop_offset + mpeg.temporal_reference) % RING_BUFFER_SIZE] = frame_buffers.output;

			// if we decoded a displayable frame, show it
			if (frames[disp_frame % RING_BUFFER_SIZE] != NULL)
			{
				frame_show(frames[disp_frame % RING_BUFFER_SIZE]);
				frame_unref(frames[(disp_frame - 2) % RING_BUFFER_SIZE]);
				frames[(disp_frame - 2) % RING_BUFFER_SIZE] = NULL;
				disp_frame++;
				getchar();
			}

		}
		av_free_packet(&pkt);
	}

	// stop MPEG engine
	writel(ve_get_regs() + 0x0, 0x00130007);

	// show left over frames
	while (disp_frame < gop_offset + gop_frames && frames[disp_frame % RING_BUFFER_SIZE] != NULL)
	{
		frame_show(frames[disp_frame % RING_BUFFER_SIZE]);
		frame_unref(frames[(disp_frame - 2) % RING_BUFFER_SIZE]);
		frames[(disp_frame - 2) % RING_BUFFER_SIZE] = NULL;
		disp_frame++;
		getchar();
	}

	disp_close();

	frame_unref(frames[(disp_frame - 2) % RING_BUFFER_SIZE]);
	frame_unref(frames[(disp_frame - 1) % RING_BUFFER_SIZE]);

	frame_unref(frame_buffers.forward);
	frame_unref(frame_buffers.backward);

	ve_close();

	avformat_close_input(&avfmt_ctx);

	return 0;
}
Beispiel #13
0
	VideoStream::VideoStream(const std::string& filename, unsigned int numFrameBuffered, GLenum minFilter, GLenum magFilter, GLenum sWrapping, GLenum tWrapping, int maxLevel)
	 : __ReadOnly_ComponentLayout(declareLayout(numFrameBuffered)), InputDevice(declareLayout(numFrameBuffered), "Reader"), idVideoStream(0), readFrameCount(0), timeStampFrameRate(1.0f), timeStampOffset(0), timeStampOfLastFrameRead(0), endReached(false),
	   pFormatCtx(NULL), pCodecCtx(NULL), pCodec(NULL), pFrame(NULL), pFrameRGB(NULL), buffer(NULL), pSWSCtx(NULL), idCurrentBufferForWritting(0)
	{
		#ifdef __USE_PBO__
			#ifdef __VIDEO_STREAM_VERBOSE__
				std::cout << "VideoStream::VideoStream - Using PBO for uploading data to the GPU." << std::endl;
			#endif
			pbo = NULL;
		#else
			#ifdef __VIDEO_STREAM_VERBOSE__
				std::cout << "VideoStream::VideoStream - Using standard method HdlTexture::write for uploading data to the GPU." << std::endl;
			#endif
		#endif

		int retCode = 0;

		// Open stream :
		//DEPRECATED IN libavformat : retCode = av_open_input_file(&pFormatCtx, filename.c_str(), NULL, 0, NULL)!=0);
		retCode = avformat_open_input(&pFormatCtx, filename.c_str(), NULL, NULL);

		if(retCode!=0)
			throw Exception("VideoStream::VideoStream - Failed to open stream (at av_open_input_file).", __FILE__, __LINE__);

		// Find stream information :
		//DEPRECATED : retCode = av_find_stream_info(pFormatCtx);
		retCode = avformat_find_stream_info(pFormatCtx, NULL);

		if(retCode<0)
			throw Exception("VideoStream::VideoStream - Failed to open stream (at av_find_stream_info).", __FILE__, __LINE__);

		// Walk through pFormatCtx->nb_streams to find a/the first video stream :
		for(idVideoStream=0; idVideoStream<pFormatCtx->nb_streams; idVideoStream++)
			//DEPRECATED : if(pFormatCtx->streams[idVideoStream]->codec->codec_type==CODEC_TYPE_VIDEO)
			if(pFormatCtx->streams[idVideoStream]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
				break;

		if(idVideoStream>=pFormatCtx->nb_streams)
			throw Exception("VideoStream::VideoStream - Failed to find video stream (at streams[idVideoStream]->codec->codec_type==CODEC_TYPE_VIDEO).", __FILE__, __LINE__);

		// Get a pointer to the codec context for the video stream :
		pCodecCtx = pFormatCtx->streams[idVideoStream]->codec;

		// Find the decoder for the video stream :
		pCodec = avcodec_find_decoder(pCodecCtx->codec_id);

		if(pCodec==NULL)
			throw Exception("VideoStream::VideoStream - No suitable codec found (at avcodec_find_decoder).", __FILE__, __LINE__);

		// Open codec :
		//DEPRECATED : retCode = avcodec_open(pCodecCtx, pCodec);
		retCode = avcodec_open2(pCodecCtx, pCodec, NULL);

		if(retCode<0)
			throw Exception("VideoStream::VideoStream - Could not open codec (at avcodec_open).", __FILE__, __LINE__);

		// Get the framerate :
		/*float timeUnit_sec = static_cast<float>(pCodecCtx->time_base.num)/static_cast<float>(pCodecCtx->time_base.den);
		frameRate = 1.0f/(pCodecCtx->timeUnit_sec;*/

		timeStampFrameRate = static_cast<float>(pFormatCtx->streams[idVideoStream]->time_base.den)/static_cast<float>(pFormatCtx->streams[idVideoStream]->time_base.num);

		// Get the duration :
		duration_sec = pFormatCtx->duration / AV_TIME_BASE;

		#ifdef __VIDEO_STREAM_VERBOSE__
			std::cout << "VideoStream::VideoStream" << std::endl;
			std::cout << "                         - Frame rate : " << timeStampFrameRate << " frames per second (for time stamps)" << std::endl;
			std::cout << "                         - Duration   : " << duration_sec << " second(s)" << std::endl;
		#endif

		// Allocate video frame :
		pFrame = avcodec_alloc_frame();

		// Allocate an AVFrame structure :
		pFrameRGB = avcodec_alloc_frame();

		if(pFrameRGB==NULL)
			throw Exception("VideoStream::VideoStream - Failed to open stream (at avcodec_alloc_frame).", __FILE__, __LINE__);

		// Determine required buffer size and allocate buffer :
		bufferSizeBytes = avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);
		buffer = (uint8_t *)av_malloc(bufferSizeBytes*sizeof(uint8_t));

		#ifdef __VIDEO_STREAM_VERBOSE__
			std::cout << "VideoStream::VideoStream - Frame size : " << pCodecCtx->width << "x" << pCodecCtx->height << std::endl;
		#endif

		if(buffer==NULL)
			throw Exception("VideoStream::VideoStream - Unable to allocate video frame buffer.", __FILE__, __LINE__);

		// Assign appropriate parts of buffer to image planes in pFrameRGB (Note that pFrameRGB is an AVFrame, but AVFrame is a superset of AVPicture) :
		avpicture_fill( (AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);

		// Initialize libswscale :
		pSWSCtx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_RGB24, SWS_POINT, NULL, NULL, NULL);

		// Create format :
		HdlTextureFormat frameFormat(pCodecCtx->width, pCodecCtx->height, GL_RGB, GL_UNSIGNED_BYTE, minFilter, magFilter, sWrapping, tWrapping, 0, maxLevel);

		// Create the texture :
		for(unsigned int i=0; i<numFrameBuffered; i++)
		{
			//old : addOutputPort("output" + to_string(i));
			textureBuffers.push_back( new HdlTexture(frameFormat) );

			// YOU MUST WRITE ONCE IN THE TEXTURE BEFORE USING PBO::copyToTexture ON IT.
			// We are also doing this to prevent reading from an empty (not-yet-allocated) texture.
			textureBuffers.back()->fill(0);

			// Set links :
			setTextureLink(textureBuffers.back(), i);
		}

		#ifdef __USE_PBO__
			// Create PBO for uplodaing data to GPU :
			pbo = new HdlPBO(frameFormat, GL_PIXEL_UNPACK_BUFFER_ARB,GL_STREAM_DRAW_ARB);
		#endif

		// Finish by forcing read of first frame :
		readNextFrame();
	}
Beispiel #14
0
/*!
  Allocates and initializes the parameters depending on the video and the desired color type.
  One the stream is opened, it is possible to get the video encoding framerate getFramerate(),
  and the dimension of the images using getWidth() and getHeight().
  
  \param filename : Path to the video which has to be read.
  \param colortype : Desired color map used to open the video.
  The parameter can take two values : COLORED and GRAY_SCALED.
  
  \return It returns true if the paramters could be initialized. Else it returns false.
*/
bool vpFFMPEG::openStream(const char *filename, vpFFMPEGColorType colortype)
{
  this->color_type = colortype;
  
  av_register_all();
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53,0,0) // libavformat 52.84.0
  if (av_open_input_file (&pFormatCtx, filename, NULL, 0, NULL) != 0)
#else
  if (avformat_open_input (&pFormatCtx, filename, NULL, NULL) != 0) // libavformat 53.4.0
#endif
  {
    vpTRACE("Couldn't open file ");
    return false;
  }

#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53,21,0) // libavformat 53.21.0
  if (av_find_stream_info (pFormatCtx) < 0)
#else 
  if (avformat_find_stream_info (pFormatCtx, NULL) < 0)
#endif
      return false;
  
  videoStream = 0;
  bool found_codec = false;
  
  /*
  * Detect streams types
  */
  for (unsigned int i = 0; i < pFormatCtx->nb_streams; i++)
  {
#if LIBAVUTIL_VERSION_INT < AV_VERSION_INT(51,0,0)
    if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO) // avutil 50.33.0
#else
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) // avutil 51.9.1
#endif
    {
      videoStream = i;
      //std::cout << "rate: " << pFormatCtx->streams[i]->r_frame_rate.num << " " << pFormatCtx->streams[i]->r_frame_rate.den << std::endl;
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(55,12,0)
      framerate_stream =  pFormatCtx->streams[i]->r_frame_rate.num;
      framerate_stream /= pFormatCtx->streams[i]->r_frame_rate.den;
#else
      framerate_stream =  pFormatCtx->streams[i]->avg_frame_rate.num;
      framerate_stream /= pFormatCtx->streams[i]->avg_frame_rate.den;
#endif
      found_codec= true;
      break;
    }
  }

  if (found_codec)
  {
    pCodecCtx = pFormatCtx->streams[videoStream]->codec;
    pCodec = avcodec_find_decoder(pCodecCtx->codec_id);

    if (pCodec == NULL)
    {
      vpTRACE("unsuported codec");
      return false;		// Codec not found
    }
    
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(53,35,0) // libavcodec 53.35.0
    if (avcodec_open (pCodecCtx, pCodec) < 0)
#else
    if (avcodec_open2 (pCodecCtx, pCodec, NULL) < 0)
#endif
    {
      vpTRACE("Could not open codec");
      return false;		// Could not open codec
    }

#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,34,0)
    pFrame = avcodec_alloc_frame();
#else
    pFrame = av_frame_alloc(); // libavcodec 55.34.1
#endif

    if (color_type == vpFFMPEG::COLORED)
    {
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,34,0)
      pFrameRGB=avcodec_alloc_frame();
#else
      pFrameRGB=av_frame_alloc(); // libavcodec 55.34.1
#endif
    
      if (pFrameRGB == NULL)
        return false;
      
      numBytes = avpicture_get_size (PIX_FMT_RGB24,pCodecCtx->width,pCodecCtx->height);
    }
    
    else if (color_type == vpFFMPEG::GRAY_SCALED)
    {
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,34,0)
      pFrameGRAY=avcodec_alloc_frame();
#else
      pFrameGRAY=av_frame_alloc(); // libavcodec 55.34.1
#endif
    
      if (pFrameGRAY == NULL)
        return false;
      
      numBytes = avpicture_get_size (PIX_FMT_GRAY8,pCodecCtx->width,pCodecCtx->height);
    }  

    /*
     * Determine required buffer size and allocate buffer
     */
    width = pCodecCtx->width ;
    height = pCodecCtx->height ;
    buffer = (uint8_t *) malloc ((unsigned int)(sizeof (uint8_t)) * (unsigned int)numBytes);
  }
  else
  {
    vpTRACE("Didn't find a video stream");
    return false;
  }
  
  if (color_type == vpFFMPEG::COLORED)
    avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);
  
  else if (color_type == vpFFMPEG::GRAY_SCALED)
    avpicture_fill((AVPicture *)pFrameGRAY, buffer, PIX_FMT_GRAY8, pCodecCtx->width, pCodecCtx->height);
  
  streamWasOpen = true;

  return true;
}
Beispiel #15
0
	//----------------------------------------------------------------------------------------------------
	bool EEMusic::AsyncLoadMusic(const char* _fileName)
	{
		AVFormatContext *formatContext = NULL;
		int streamIndex = -1;
		AVCodecContext *codecContext = NULL;
		AVCodec *codec = NULL;

		// open file
		if (avformat_open_input(&formatContext, _fileName, NULL, NULL) < 0)
		{
			return false;
		}

		// find stream info
		if (avformat_find_stream_info(formatContext, NULL) < 0)
		{
			//unable to find stream info
			avformat_close_input(&formatContext);
			return false;
		}
		// find the stream
		if ((streamIndex = av_find_best_stream(formatContext, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0)) < 0)
		{
			avformat_close_input(&formatContext);
			return false;
		}

		// find decoder
		codecContext = formatContext->streams[streamIndex]->codec;
		codec = avcodec_find_decoder(codecContext->codec_id);
		if (!codec)
		{
			avformat_close_input(&formatContext);
			return false;
		}
		// open codec
		if (avcodec_open2(codecContext, codec, NULL) < 0)
		{
			avformat_close_input(&formatContext);
			return false;
		}

		int channels = codecContext->channels;
		int bitsPerSample = av_get_bits_per_sample(av_get_pcm_codec(codecContext->sample_fmt, -1));
		int bytesPerSample = bitsPerSample / 8;
		int samplesPerSec = codecContext->sample_rate;
		int blockAlign = bytesPerSample * channels;
		int avgBytesPerSec = samplesPerSec * blockAlign;
		// m_totalBytes = (int)((double)formatContext->duration / AV_TIME_BASE * avgBytesPerSec);
		// m_totalSamples = (int)((double)formatContext->duration / AV_TIME_BASE * samplesPerSec);
		// m_totalTime = formatContext->duration / (double)AV_TIME_BASE;

		if (bitsPerSample == 32)
			m_format.wFormatTag = WAVE_FORMAT_IEEE_FLOAT;
		else
			m_format.wFormatTag = WAVE_FORMAT_PCM;
		m_format.nChannels = channels;
		m_format.nSamplesPerSec = samplesPerSec;
		m_format.nAvgBytesPerSec = avgBytesPerSec;
		m_format.nBlockAlign = blockAlign;
		m_format.wBitsPerSample = bitsPerSample;
		m_format.cbSize = 0;
		if (FAILED(s_XAudio2->CreateSourceVoice(&m_sourceVoice, &m_format, 0, XAUDIO2_DEFAULT_FREQ_RATIO, &m_musicCallBack)))
			return false;

		// todo: keep the the handler of the thr
		m_loader = new boost::thread([&, bytesPerSample, formatContext, streamIndex, codecContext, codec] () mutable ->bool {
			AVPacket *packet = new AVPacket;
			av_init_packet(packet);
			AVFrame	*frame = av_frame_alloc();
			uint32_t len = 0;
			int got;
			while (av_read_frame(formatContext, packet) >= 0)
			{
				if (packet->stream_index == streamIndex)
				{
					if (avcodec_decode_audio4(codecContext, frame, &got, packet) < 0)
					{
						printf("Error in decoding audio frame.\n");
						av_free_packet(packet);
						continue;
					}
					if (got > 0)
					{
						//int size = *frame->linesize;
						int size = frame->nb_samples * bytesPerSample;
						if (m_data.empty())
							m_data.push_back(std::pair<int, std::string>(0, std::string()));
						else
							m_data.push_back(std::pair<int, std::string>(m_data.back().first + m_data.back().second.size(), std::string()));
						std::string& data = m_data.back().second;
						if (av_sample_fmt_is_planar(codecContext->sample_fmt))
						{
							data.resize(size * 2);
							int index = 0;
							for (int i = 0; i < size; i += bytesPerSample)
							{
								for (int j = 0; j < bytesPerSample; ++j)
								{
									data[index++] = (char)frame->data[0][i + j];
								}
								for (int j = 0; j < bytesPerSample; ++j)
								{
									data[index++] = (char)frame->data[1][i + j];
								}
							}
							len += size * 2;
						}
						else
						{
							data.resize(size);
							memcpy((&data[0]), frame->data[0], size);
							len += size;
						}
						try
						{
							PushBuffer(EEMusicCell(&m_data.back()));
							if (EE_MUSIC_NO_BUFFER == m_state)
							{
								SubmitBuffer();
								SubmitBuffer();
								SubmitBuffer();
							}
							EEThreadSleep(1);
						}
						catch (boost::thread_interrupted&)
						{
							avcodec_close(codecContext);
							avformat_close_input(&formatContext);
							return false;
						}
					}
				}
				av_free_packet(packet);
			}
			m_totalBytes = len;
			m_totalSamples = len / m_format.nBlockAlign;
			m_totalTime = (double)m_totalSamples / m_format.nSamplesPerSec;

			av_frame_free(&frame);
			avcodec_close(codecContext);
			avformat_close_input(&formatContext);

			return true;
		});

		return true;
	}
Beispiel #16
0
int main(int argc, char *argv[])
{

    AVFormatContext *pFormatCtx = NULL;
    int i, videoStream;
    AVCodecContext *pCodecCtx;
    AVCodec *pCodec;
    AVFrame *pFrame;
    AVFrame *pFrameCropped;
    AVFrame *pFrameRGB;
    struct SwsContext * pSwsCtx;
    AVPacket packet;
    int frameFinished;
    int numBytes;
    int numBytesCroped;
    uint8_t *buffer;

    AVDictionary * p_options = NULL;
    AVInputFormat * p_in_fmt = NULL;

    pFile = fopen("screencap.out", "wb");
    if (pFile == NULL)
        return 0;

    // Register all formats and codecs
    av_register_all();
    avcodec_register_all();
    avdevice_register_all();

    av_dict_set(&p_options, "framerate", "60", 0);
    av_dict_set(&p_options, "video_size", "1920x1080", 0);
    av_dict_set(&p_options, "qscale", "1", 0);
    p_in_fmt = av_find_input_format("x11grab");

    // Open video file
    if (avformat_open_input(&pFormatCtx, ":0.0", p_in_fmt, &p_options) != 0)
    {
        printf("cannot open input file!\n");
        return -1; // Couldn't open file
    }

    // Retrieve stream information
    if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
        return -1; // Couldn't find stream information

    // Dump information about file onto standard error
    av_dump_format(pFormatCtx, 0, argv[1], 0);

    // Find the first video stream
    videoStream = -1;
    for (i = 0; i < pFormatCtx->nb_streams; i++)
        if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
        {
            videoStream = i;
            break;
        }
    if (videoStream == -1)
        return -1; // Didn't find a video stream

    // Get a pointer to the codec context for the video stream
    pCodecCtx = pFormatCtx->streams[videoStream]->codec;

    // Find the decoder for the video stream
    pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
    if (pCodec == NULL)
    {
        fprintf(stderr, "Unsupported codec!\n");
        return -1; // Codec not found
    }
    // Open codec
    if (avcodec_open2(pCodecCtx, pCodec, 0) < 0)
        return -1; // Could not open codec

    // Allocate video frame
    pFrame = avcodec_alloc_frame();

    int crop_x = 0, crop_y = 0, crop_h = 1080, crop_w = 1920;
    pFrameCropped = avcodec_alloc_frame();

    if (pFrameCropped == NULL)
        return -1;

    // Allocate an AVFrame structure
    pFrameRGB = avcodec_alloc_frame();
    if (pFrameRGB == NULL)
        return -1;

    // Determine required buffer size and allocate buffer
    numBytes = avpicture_get_size(AV_PIX_FMT_YUV420P, crop_w, crop_h);
    buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));

    // Assign appropriate parts of buffer to image planes in pFrameRGB
    // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
    // of AVPicture
    avpicture_fill((AVPicture *) pFrameRGB, buffer, AV_PIX_FMT_YUV420P, crop_w, crop_h);

    pSwsCtx = sws_getContext(crop_w, crop_h, pCodecCtx->pix_fmt, crop_w, crop_h, AV_PIX_FMT_YUV420P, SWS_FAST_BILINEAR,
        NULL, NULL, NULL);

    if (pSwsCtx == NULL)
    {
        fprintf(stderr, "Cannot initialize the sws context\n");
        return -1;
    }

    // Read frames and save first five frames to disk
    i = 0;
    FILE* fp = fopen("encodec.mpg", "wb");
    while (av_read_frame(pFormatCtx, &packet) >= 0)
    {
        // Is this a packet from the video stream?
        if (packet.stream_index == videoStream)
        { // Decode video frame
            avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

            // Did we get a video frame?
            if (frameFinished)
            {
                sws_scale(pSwsCtx, (const uint8_t * const *) pFrame->data, pFrame->linesize, 0, crop_h, pFrameRGB->data,
                    pFrameRGB->linesize);
                int y, x;
                /* Y */
                for (y = 0; y < crop_h; y++)
                {
                    for (x = 0; x < crop_w; x++)
                    {
                        //fwrite(pFrameRGB->data[0] + y * pFrameRGB->linesize[0] + x, sizeof(uint8_t), 1, fp);
                    }
                }
                /* Cb and Cr */
                for (y = 0; y < crop_h / 2; y++)
                {
                    for (x = 0; x < crop_w / 2; x++)
                    {
                        //fwrite(pFrameRGB->data[1] + y * pFrameRGB->linesize[1] + x, sizeof(uint8_t), 1, fp);
                        //fwrite(pFrameRGB->data[2] + y * pFrameRGB->linesize[2] + x, sizeof(uint8_t), 1, fp);
                    }
                }

                video_encode_example(pFrameRGB, fp);

                // Save the frame to disk
                if (++i >= 100)
                    break;
            }
        }

        av_free_packet(&packet);
    }

    fclose(fp);
    printf("Frames read %d\n", i);
    // Free the RGB image
    av_free(buffer);
    av_free(pFrameRGB);

    // Free the YUV frame
    av_free(pFrame);

    // Close the codec
    avcodec_close(pCodecCtx);

    // Close the video file
    avformat_close_input(&pFormatCtx);

    // Close file
    fclose(pFile);
    return 0;
}
Beispiel #17
0
int main(int argc, char *argv[]) {
  AVFormatContext *pFormatCtx = NULL;
  int             i, videoStream, audioStream;
  AVCodecContext  *pCodecCtx = NULL;
  AVCodec         *pCodec = NULL;
  AVFrame         *pFrame = NULL; 
  AVPacket        packet;
  int             frameFinished;
  //float           aspect_ratio;
  
  AVCodecContext  *aCodecCtx = NULL;
  AVCodec         *aCodec = NULL;

  SDL_Overlay     *bmp = NULL;
  SDL_Surface     *screen = NULL;
  SDL_Rect        rect;
  SDL_Event       event;
  SDL_AudioSpec   wanted_spec, spec;

  struct SwsContext   *sws_ctx            = NULL;
  AVDictionary        *videoOptionsDict   = NULL;
  AVDictionary        *audioOptionsDict   = NULL;

  if(argc < 2) {
    fprintf(stderr, "Usage: test <file>\n");
    exit(1);
  }
  // Register all formats and codecs
  av_register_all();
  
  if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
    fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
    exit(1);
  }

  // Open video file
  if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
    return -1; // Couldn't open file
  
  // Retrieve stream information
  if(avformat_find_stream_info(pFormatCtx, NULL)<0)
    return -1; // Couldn't find stream information
  
  // Dump information about file onto standard error
  av_dump_format(pFormatCtx, 0, argv[1], 0);
  
  // Find the first video stream
  videoStream=-1;
  audioStream=-1;
  for(i=0; i<pFormatCtx->nb_streams; i++) {
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO &&
       videoStream < 0) {
      videoStream=i;
    }
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO &&
       audioStream < 0) {
      audioStream=i;
    }
  }
  if(videoStream==-1)
    return -1; // Didn't find a video stream
  if(audioStream==-1)
    return -1;
   
  aCodecCtx=pFormatCtx->streams[audioStream]->codec;
  // Set audio settings from codec info
  wanted_spec.freq = aCodecCtx->sample_rate;
  wanted_spec.format = AUDIO_S16SYS;
  wanted_spec.channels = aCodecCtx->channels;
  wanted_spec.silence = 0;
  wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
  wanted_spec.callback = audio_callback;
  wanted_spec.userdata = aCodecCtx;
  
  if(SDL_OpenAudio(&wanted_spec, &spec) < 0) {
    fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
    return -1;
  }
  aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
  if(!aCodec) {
    fprintf(stderr, "Unsupported codec!\n");
    return -1;
  }
  avcodec_open2(aCodecCtx, aCodec, &audioOptionsDict);

  // audio_st = pFormatCtx->streams[index]
  packet_queue_init(&audioq);
  SDL_PauseAudio(0);

  // Get a pointer to the codec context for the video stream
  pCodecCtx=pFormatCtx->streams[videoStream]->codec;
  
  // Find the decoder for the video stream
  pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
  if(pCodec==NULL) {
    fprintf(stderr, "Unsupported codec!\n");
    return -1; // Codec not found
  }
  // Open codec
  if(avcodec_open2(pCodecCtx, pCodec, &videoOptionsDict)<0)
    return -1; // Could not open codec
  
  // Allocate video frame
  pFrame=av_frame_alloc();

  // Make a screen to put our video

#ifndef __DARWIN__
        screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);
#else
        screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0);
#endif
  if(!screen) {
    fprintf(stderr, "SDL: could not set video mode - exiting\n");
    exit(1);
  }
  
  // Allocate a place to put our YUV image on that screen
  bmp = SDL_CreateYUVOverlay(pCodecCtx->width,
				 pCodecCtx->height,
				 SDL_YV12_OVERLAY,
				 screen);
  sws_ctx =
    sws_getContext
    (
        pCodecCtx->width,
        pCodecCtx->height,
        pCodecCtx->pix_fmt,
        pCodecCtx->width,
        pCodecCtx->height,
        PIX_FMT_YUV420P,
        SWS_BILINEAR,
        NULL,
        NULL,
        NULL
    );


  // Read frames and save first five frames to disk
  i=0;
  while(av_read_frame(pFormatCtx, &packet)>=0) {
    // Is this a packet from the video stream?
    if(packet.stream_index==videoStream) {
      // Decode video frame
      avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, 
			   &packet);
      
      // Did we get a video frame?
      if(frameFinished) {
	SDL_LockYUVOverlay(bmp);

	AVPicture pict;
	pict.data[0] = bmp->pixels[0];
	pict.data[1] = bmp->pixels[2];
	pict.data[2] = bmp->pixels[1];

	pict.linesize[0] = bmp->pitches[0];
	pict.linesize[1] = bmp->pitches[2];
	pict.linesize[2] = bmp->pitches[1];

	// Convert the image into YUV format that SDL uses
    sws_scale
    (
        sws_ctx, 
        (uint8_t const * const *)pFrame->data, 
        pFrame->linesize, 
        0,
        pCodecCtx->height,
        pict.data,
        pict.linesize
    );
	
	SDL_UnlockYUVOverlay(bmp);
	
	rect.x = 0;
	rect.y = 0;
	rect.w = pCodecCtx->width;
	rect.h = pCodecCtx->height;
	SDL_DisplayYUVOverlay(bmp, &rect);
	av_free_packet(&packet);
      }
    } else if(packet.stream_index==audioStream) {
      packet_queue_put(&audioq, &packet);
    } else {
      av_free_packet(&packet);
    }
    // Free the packet that was allocated by av_read_frame
    SDL_PollEvent(&event);
    switch(event.type) {
    case SDL_QUIT:
      quit = 1;
      SDL_Quit();
      exit(0);
      break;
    default:
      break;
    }

  }

  // Free the YUV frame
  av_free(pFrame);
  
  // Close the codec
  avcodec_close(pCodecCtx);
  
  // Close the video file
  avformat_close_input(&pFormatCtx);
  
  return 0;
}
int set_data_source(State **ps, const char* path, const char* headers) {
	printf("set_data_source\n");
	int audio_index = -1;
	int video_index = -1;
	int i;

	State *state = *ps;
	
	if (state && state->pFormatCtx) {
		avformat_close_input(&state->pFormatCtx);
	}

	if (!state) {
		state = av_mallocz(sizeof(State));
	}

	state->pFormatCtx = NULL;
	state->audio_stream = -1;
	state->video_stream = -1;
	state->audio_st = NULL;
	state->video_st = NULL;
	
	char duration[30] = "0";

    printf("Path: %s\n", path);

    AVDictionary *options = NULL;
    av_dict_set(&options, "icy", "1", 0);
    av_dict_set(&options, "user-agent", "FFmpegMediaMetadataRetriever", 0);
    
    if (headers) {
        av_dict_set(&options, "headers", headers, 0);
    }
    
    if (avformat_open_input(&state->pFormatCtx, path, NULL, &options) != 0) {
	    printf("Metadata could not be retrieved\n");
		*ps = NULL;
    	return FAILURE;
    }

	if (avformat_find_stream_info(state->pFormatCtx, NULL) < 0) {
	    printf("Metadata could not be retrieved\n");
	    avformat_close_input(&state->pFormatCtx);
		*ps = NULL;
    	return FAILURE;
	}

	get_duration(state->pFormatCtx, duration);
	av_dict_set(&state->pFormatCtx->metadata, DURATION, duration, 0);
	
	get_shoutcast_metadata(state->pFormatCtx);

	//av_dump_format(state->pFormatCtx, 0, path, 0);
	
    // Find the first audio and video stream
	for (i = 0; i < state->pFormatCtx->nb_streams; i++) {
		if (state->pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && video_index < 0) {
			video_index = i;
		}

		if (state->pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0) {
			audio_index = i;
		}

		set_codec(state->pFormatCtx, i);
	}

	/*if (audio_index >= 0) {
		stream_component_open(state, audio_index);
	}*/

	if (video_index >= 0) {
		stream_component_open(state, video_index);
	}

	/*if(state->video_stream < 0 || state->audio_stream < 0) {
	    avformat_close_input(&state->pFormatCtx);
		*ps = NULL;
		return FAILURE;
	}*/

    set_rotation(state);
    
	/*printf("Found metadata\n");
	AVDictionaryEntry *tag = NULL;
	while ((tag = av_dict_get(state->pFormatCtx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) {
    	printf("Key %s: \n", tag->key);
    	printf("Value %s: \n", tag->value);
    }*/
	
	*ps = state;
	return SUCCESS;
}
Beispiel #19
0
int open_input_file(int check_yuv)
{
	AVStream *stream = NULL;
	AVCodecContext *codecCtx = NULL;
	AVCodec *dec = NULL;
	int ret;
	int streamIdx = 0;
	unsigned int i;

	/*
	*	reads file header and stores information
	*	about the file format in the AVFormatContext structure
	*	@param fmt, options If NULL acuto-detect file format,
	*						buffer size, and format options
	*/


	ret = avformat_open_input(&inFmtCtx, file_input, NULL, NULL);
	if (ret < 0)
	{
		av_log(NULL, AV_LOG_ERROR, "Cannot open input file [%s]\n", file_input);
		return ret;
	}
/*
	if (check_yuv == 1){
		printf("it is yuvfile\n");
		printf("So you must input media size\n");
		printf("width : ");
		scanf("%d", &(inFmtCtx)->streams[streamIdx]->codec->width);
		printf("height : ");
		scanf("%d", &(inFmtCtx)->streams[streamIdx]->codec->height);

	}
*/
	(inFmtCtx)->streams[streamIdx]->codec->width = 352;
	(inFmtCtx)->streams[streamIdx]->codec->height = 288;
	av_log(NULL, AV_LOG_INFO, "File [%s] Open Success\n", file_input);
	av_log(NULL, AV_LOG_DEBUG, "Format: %s\n", inFmtCtx->iformat->name);


	//retrieve stream information
	ret = avformat_find_stream_info(inFmtCtx, NULL);
	if (ret < 0)
	{
		av_log(NULL, AV_LOG_ERROR, "Cannot find stream information");
		return ret;
	}

	av_log(NULL, AV_LOG_INFO, "Get Stream Information Success\n");

	for (i = 0; i < inFmtCtx->nb_streams; i++)
	{
		stream = inFmtCtx->streams[i];
		codecCtx = stream->codec;

		if (codecCtx->codec_type == AVMEDIA_TYPE_VIDEO)
		{
			streamIdx = i;
			//find decoder
			dec = avcodec_find_decoder(codecCtx->codec_id);
			if (dec < 0)
			{
				av_log(NULL, AV_LOG_ERROR, "Could not Find [%s] Codec\n", av_get_media_type_string(dec->type));
				return AVERROR(EINVAL);
			}

			ret = avcodec_open2(codecCtx, dec, NULL);
			if (ret < 0)
			{
				av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
				return ret;
			}
		}
	}

	clip_width = inFmtCtx->streams[streamIdx]->codec->width;
	clip_height = inFmtCtx->streams[streamIdx]->codec->height;
	time_base_den = inFmtCtx->streams[streamIdx]->codec->time_base.den;
	time_base_num = inFmtCtx->streams[streamIdx]->codec->time_base.num;

	//debugging function
	av_dump_format(inFmtCtx, 0, file_input, 0);

	return 0;
}
int main(int argc, char *argv[])
{
    AVCodecContext* video_dec_ctx = NULL;
    AVCodec* video_dec = NULL;
    AVPacket pkt;
    AVFrame *frame = NULL;
    int read_eos = 0;
    int decode_count = 0;
    int render_count = 0;
    int video_stream_index = -1, i;
    uint8_t *frame_copy = NULL;
    FILE *dump_yuv = NULL;

    // parse command line parameters
    process_cmdline(argc, argv);
    if (!input_file) {
        ERROR("no input file specified\n");
        return -1;
    }

    // libav* init
    av_register_all();

    // open input file
    AVFormatContext* pFormat = NULL;
    if (avformat_open_input(&pFormat, input_file, NULL, NULL) < 0) {
        ERROR("fail to open input file: %s by avformat\n", input_file);
        return -1;
    }
    if (avformat_find_stream_info(pFormat, NULL) < 0) {
        ERROR("fail to find out stream info\n");
        return -1;
    }
    av_dump_format(pFormat,0,input_file,0);

    // find out video stream
    for (i = 0; i < pFormat->nb_streams; i++) {
        if (pFormat->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            video_dec_ctx = pFormat->streams[i]->codec;
            video_stream_index = i;
            break;
        }
    }
    ASSERT(video_dec_ctx && video_stream_index>=0);

    // open video codec
    video_dec = avcodec_find_decoder(video_dec_ctx->codec_id);
    video_dec_ctx->coder_type = render_mode ? render_mode -1 : render_mode; // specify output frame type
    if (avcodec_open2(video_dec_ctx, video_dec, NULL) < 0) {
        ERROR("fail to open codec\n");
        return -1;
    }

    // decode frames one by one
    av_init_packet(&pkt);
    while (1) {
        if(read_eos == 0 && av_read_frame(pFormat, &pkt) < 0) {
            read_eos = 1;
        }
        if (read_eos) {
            pkt.data = NULL;
            pkt.size = 0;
        }

        if (pkt.stream_index == video_stream_index) {
            frame = av_frame_alloc();
            int got_picture = 0,ret = 0;
            ret = avcodec_decode_video2(video_dec_ctx, frame, &got_picture, &pkt);
            if (ret < 0) { // decode fail (or decode finished)
                DEBUG("exit ...\n");
                break;
            }

            if (read_eos && ret>=0 && !got_picture) {
                DEBUG("ret=%d, exit ...\n", ret);
                break; // eos has been processed
            }

            decode_count++;
            if (got_picture) {
                switch (render_mode) {
                case 0: // dump raw video frame to disk file
                case 1: { // draw raw frame data as texture
                    // assumed I420 format
                    int height[3] = {video_dec_ctx->height, video_dec_ctx->height/2, video_dec_ctx->height/2};
                    int width[3] = {video_dec_ctx->width, video_dec_ctx->width/2, video_dec_ctx->width/2};
                    int plane, row;

                    if (render_mode == 0) {
                        if (!dump_yuv) {
                            char out_file[256];
                            sprintf(out_file, "./dump_%dx%d.I420", video_dec_ctx->width, video_dec_ctx->height);
                            dump_yuv = fopen(out_file, "ab");
                            if (!dump_yuv) {
                                ERROR("fail to create file for dumped yuv data\n");
                                return -1;
                            }
                        }
                        for (plane=0; plane<3; plane++) {
                            for (row = 0; row<height[plane]; row++)
                                fwrite(frame->data[plane]+ row*frame->linesize[plane], width[plane], 1, dump_yuv);
                        }
                    } else {
                        // glTexImage2D  doesn't handle pitch, make a copy of video data
                        frame_copy = malloc(video_dec_ctx->height * video_dec_ctx->width * 3 / 2);
                        unsigned char* ptr = frame_copy;

                        for (plane=0; plane<3; plane++) {
                            for (row=0; row<height[plane]; row++) {
                                memcpy(ptr, frame->data[plane]+row*frame->linesize[plane], width[plane]);
                                ptr += width[plane];
                            }
                        }

                        drawVideo((uintptr_t)frame_copy, 0, video_dec_ctx->width, video_dec_ctx->height, 0);
                    }
                }
                    break;
                case 2: // draw video frame as texture with drm handle
                case 3: // draw video frame as texture with dma_buf handle
                    drawVideo((uintptr_t)frame->data[0], render_mode -1, video_dec_ctx->width, video_dec_ctx->height, (uintptr_t)frame->data[1]);
                    break;
                default:
                    break;
                }
                render_count++;
            }
        }
    }

    if (frame)
        av_frame_free(&frame);
    if (frame_copy)
        free(frame_copy);
    if (dump_yuv)
        fclose(dump_yuv);
    deinit_egl();
    PRINTF("decode %s ok, decode_count=%d, render_count=%d\n", input_file, decode_count, render_count);

    return 0;
}
Beispiel #21
0
int CBaseOperation::GetFrameCollection(FGetImageCallback fgetImageCollectionCallback, PCHAR inputFilename, int width, int height, int64_t startTimestamp, int step, int count)
{
	int error = 0;
	if (fgetImageCollectionCallback == nullptr){
		error = 2;
		return error;
	}
	if (startTimestamp < 0){
		error = 1;
		return error;
	}
	av_register_all();
	int64_t stepTimestamp = 0;
	AVFormatContext *fmtCtx = avformat_alloc_context();
	AVCodecContext* codecCtx{ nullptr };
	SwsContext *imgconvertCtx{ nullptr };
	AVFrame *frame = av_frame_alloc();
	AVFrame *frame_rgb = av_frame_alloc();
	int gotFrame = 0;
	uint8_t* avpicBuffer{ nullptr };
	if (avformat_open_input(&fmtCtx, inputFilename, nullptr, nullptr) != 0){
		error = 2;
		goto cleanUp;
	}

	if (avformat_find_stream_info(fmtCtx, nullptr) < 0){
		error = 2;
		goto cleanUp;
	}

	auto videoStreamIndex = av_find_best_stream(fmtCtx, AVMEDIA_TYPE_VIDEO, -1, -1, nullptr, 0);
	if (videoStreamIndex < 0){
		error = 2;
		goto cleanUp;
	}

	codecCtx = fmtCtx->streams[videoStreamIndex]->codec;
	if (avcodec_open2(codecCtx, avcodec_find_decoder(codecCtx->codec_id), nullptr) < 0)
	{
		error = 2;
		goto cleanUp;
	}
	AVPacket packet;
	av_init_packet(&packet);
	startTimestamp = (fmtCtx->start_time + startTimestamp * 1000) * av_q2d(fmtCtx->streams[videoStreamIndex]->time_base);
	step = step * 1000 * av_q2d(fmtCtx->streams[videoStreamIndex]->time_base); // converting step from ms to stream timebase;
	auto numBytes = avpicture_get_size(AV_PIX_FMT_RGB32, codecCtx->width, codecCtx->height);
	avpicBuffer = static_cast<uint8_t *>(av_malloc(numBytes*sizeof(uint8_t)));
	imgconvertCtx = sws_getContext(codecCtx->width, codecCtx->height, codecCtx->pix_fmt, width, height, AV_PIX_FMT_RGB32, SWS_BICUBIC, NULL, NULL, NULL);
	avpicture_fill(reinterpret_cast<AVPicture *>(frame_rgb), avpicBuffer, AV_PIX_FMT_RGB32, width, height);

	if (0 > av_seek_frame(fmtCtx, videoStreamIndex, startTimestamp, AVSEEK_FLAG_FRAME))
	{
		error = 1;
		goto cleanUp;
	}

	for (int i = 0; i < count; i++){
		stepTimestamp = startTimestamp + step*i;

		while (!gotFrame){
			if (av_read_frame(fmtCtx, &packet) < 0)
				break;
			double delta = packet.pts / stepTimestamp;
			if (delta > 0.9 && delta < 1.1){
				if (packet.stream_index == videoStreamIndex)
					if (0 > avcodec_decode_video2(codecCtx, frame, &gotFrame, &packet)){
						error = 2;
						av_packet_unref(&packet);
						av_free_packet(&packet);
						goto cleanUp;
					}
			}
			av_packet_unref(&packet);

		}
		sws_scale(imgconvertCtx, static_cast<const uint8_t* const*>(frame->data), frame->linesize, 0, codecCtx->height, frame_rgb->data, frame_rgb->linesize);
		error = saveToJpeg(fgetImageCollectionCallback, frame_rgb, width, height);
		if (error > 0)
			return error;
		gotFrame = 0;
	}
	goto cleanUp;
cleanUp:
	if (avpicBuffer)
		av_free(avpicBuffer);
	if (codecCtx)
		avcodec_close(codecCtx);
	if (imgconvertCtx)
		sws_freeContext(imgconvertCtx);
	av_frame_free(&frame);
	av_frame_free(&frame_rgb);
	if (fmtCtx){
		avformat_close_input(&fmtCtx);
		avformat_free_context(fmtCtx);
	}
	return error;
}
Beispiel #22
0
int OpenDemux( vlc_object_t *p_this )
{
    demux_t       *p_demux = (demux_t*)p_this;
    demux_sys_t   *p_sys;
    AVProbeData   pd = { };
    AVInputFormat *fmt = NULL;
    int64_t       i_start_time = -1;
    bool          b_can_seek;
    char         *psz_url;
    const uint8_t *peek;
    int           error;

    /* Init Probe data */
    pd.buf_size = vlc_stream_Peek( p_demux->s, &peek, 2048 + 213 );
    if( pd.buf_size <= 0 )
    {
        msg_Warn( p_demux, "cannot peek" );
        return VLC_EGENERIC;
    }

    pd.buf = malloc( pd.buf_size + AVPROBE_PADDING_SIZE );
    if( unlikely(pd.buf == NULL) )
        return VLC_ENOMEM;

    memcpy( pd.buf, peek, pd.buf_size );
    memset( pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE );

    if( p_demux->psz_file )
        psz_url = strdup( p_demux->psz_file );
    else
    {
        if( asprintf( &psz_url, "%s://%s", p_demux->psz_access,
                      p_demux->psz_location ) == -1)
            psz_url = NULL;
    }

    if( psz_url != NULL )
        msg_Dbg( p_demux, "trying url: %s", psz_url );

    pd.filename = psz_url;

    vlc_stream_Control( p_demux->s, STREAM_CAN_SEEK, &b_can_seek );

    vlc_init_avformat(p_this);

    /* Guess format */
    char *psz_format = var_InheritString( p_this, "avformat-format" );
    if( psz_format )
    {
        if( (fmt = av_find_input_format(psz_format)) )
            msg_Dbg( p_demux, "forcing format: %s", fmt->name );
        free( psz_format );
    }

    if( fmt == NULL )
        fmt = av_probe_input_format( &pd, 1 );

    free( pd.buf );

    if( fmt == NULL )
    {
        msg_Dbg( p_demux, "couldn't guess format" );
        free( psz_url );
        return VLC_EGENERIC;
    }

    if( !p_demux->obj.force )
    {
        static const char ppsz_blacklist[][16] = {
            /* Don't handle MPEG unless forced */
            "mpeg", "vcd", "vob", "mpegts",
            /* libavformat's redirector won't work */
            "redir", "sdp",
            /* Don't handle subtitles format */
            "ass", "srt", "microdvd",
            /* No timestamps at all */
            "hevc", "h264",
            ""
        };

        for( int i = 0; *ppsz_blacklist[i]; i++ )
        {
            if( !strcmp( fmt->name, ppsz_blacklist[i] ) )
            {
                free( psz_url );
                return VLC_EGENERIC;
            }
        }
    }

    /* Don't trigger false alarms on bin files */
    if( !p_demux->obj.force && !strcmp( fmt->name, "psxstr" ) )
    {
        int i_len;

        if( !p_demux->psz_file )
        {
            free( psz_url );
            return VLC_EGENERIC;
        }

        i_len = strlen( p_demux->psz_file );
        if( i_len < 4 )
        {
            free( psz_url );
            return VLC_EGENERIC;
        }

        if( strcasecmp( &p_demux->psz_file[i_len - 4], ".str" ) &&
            strcasecmp( &p_demux->psz_file[i_len - 4], ".xai" ) &&
            strcasecmp( &p_demux->psz_file[i_len - 3], ".xa" ) )
        {
            free( psz_url );
            return VLC_EGENERIC;
        }
    }

    msg_Dbg( p_demux, "detected format: %s", fmt->name );

    /* Fill p_demux fields */
    p_demux->pf_demux = Demux;
    p_demux->pf_control = Control;
    p_demux->p_sys = p_sys = xmalloc( sizeof( demux_sys_t ) );
    p_sys->ic = 0;
    p_sys->fmt = fmt;
    p_sys->i_tk = 0;
    p_sys->tk = NULL;
    p_sys->tk_pcr = NULL;
    p_sys->i_ssa_order = 0;
    TAB_INIT( p_sys->i_attachments, p_sys->attachments);
    p_sys->p_title = NULL;

    /* Create I/O wrapper */
    unsigned char * p_io_buffer = av_malloc( AVFORMAT_IOBUFFER_SIZE );
    if( !p_io_buffer )
    {
        free( psz_url );
        CloseDemux( p_this );
        return VLC_ENOMEM;
    }

    p_sys->ic = avformat_alloc_context();
    if( !p_sys->ic )
    {
        av_free( p_io_buffer );
        free( psz_url );
        CloseDemux( p_this );
        return VLC_ENOMEM;
    }

    AVIOContext *pb = p_sys->ic->pb = avio_alloc_context( p_io_buffer,
        AVFORMAT_IOBUFFER_SIZE, 0, p_demux, IORead, NULL, IOSeek );
    if( !pb )
    {
        av_free( p_io_buffer );
        free( psz_url );
        CloseDemux( p_this );
        return VLC_ENOMEM;
    }

    p_sys->ic->pb->seekable = b_can_seek ? AVIO_SEEKABLE_NORMAL : 0;
    error = avformat_open_input(&p_sys->ic, psz_url, p_sys->fmt, NULL);

    if( error < 0 )
    {
        msg_Err( p_demux, "Could not open %s: %s", psz_url,
                 vlc_strerror_c(AVUNERROR(error)) );
        av_free( p_io_buffer );
        av_free( pb );
        p_sys->ic = NULL;
        free( psz_url );
        CloseDemux( p_this );
        return VLC_EGENERIC;
    }
    free( psz_url );

    char *psz_opts = var_InheritString( p_demux, "avformat-options" );
    AVDictionary *options[p_sys->ic->nb_streams ? p_sys->ic->nb_streams : 1];
    options[0] = NULL;
    unsigned int nb_streams = p_sys->ic->nb_streams;
    for (unsigned i = 1; i < nb_streams; i++)
        options[i] = NULL;
    if (psz_opts) {
        vlc_av_get_options(psz_opts, &options[0]);
        for (unsigned i = 1; i < nb_streams; i++) {
            av_dict_copy(&options[i], options[0], 0);
        }
        free(psz_opts);
    }
    vlc_avcodec_lock(); /* avformat calls avcodec behind our back!!! */
    error = avformat_find_stream_info( p_sys->ic, options );
    /* FIXME: what if nb_streams change after that call? */
    vlc_avcodec_unlock();
    AVDictionaryEntry *t = NULL;
    while ((t = av_dict_get(options[0], "", t, AV_DICT_IGNORE_SUFFIX))) {
        msg_Err( p_demux, "Unknown option \"%s\"", t->key );
    }
    av_dict_free(&options[0]);
    for (unsigned i = 1; i < nb_streams; i++) {
        av_dict_free(&options[i]);
    }

    if( error < 0 )
    {
        msg_Warn( p_demux, "Could not find stream info: %s",
                  vlc_strerror_c(AVUNERROR(error)) );
    }

    for( unsigned i = 0; i < p_sys->ic->nb_streams; i++ )
    {
        AVStream *s = p_sys->ic->streams[i];
        const AVCodecParameters *cp = s->codecpar;
        es_out_id_t  *es = NULL;
        es_format_t es_fmt;
        const char *psz_type = "unknown";

        /* Do not use the cover art as a stream */
        if( s->disposition == AV_DISPOSITION_ATTACHED_PIC )
        {
            TAB_APPEND( p_sys->i_tk, p_sys->tk, NULL );
            continue;
        }

        vlc_fourcc_t fcc = GetVlcFourcc( cp->codec_id );
        switch( cp->codec_type )
        {
        case AVMEDIA_TYPE_AUDIO:
            es_format_Init( &es_fmt, AUDIO_ES, fcc );
            es_fmt.i_original_fourcc = CodecTagToFourcc( cp->codec_tag );
            es_fmt.i_bitrate = cp->bit_rate;
            es_fmt.audio.i_channels = cp->channels;
            es_fmt.audio.i_rate = cp->sample_rate;
            es_fmt.audio.i_bitspersample = cp->bits_per_coded_sample;
            es_fmt.audio.i_blockalign = cp->block_align;
            psz_type = "audio";

            if(cp->codec_id == AV_CODEC_ID_AAC_LATM)
            {
                es_fmt.i_original_fourcc = VLC_FOURCC('L','A','T','M');
                es_fmt.b_packetized = false;
            }
            else if(cp->codec_id == AV_CODEC_ID_AAC &&
                    strstr(p_sys->fmt->long_name, "raw ADTS AAC"))
            {
                es_fmt.i_original_fourcc = VLC_FOURCC('A','D','T','S');
                es_fmt.b_packetized = false;
            }
            break;

        case AVMEDIA_TYPE_VIDEO:
            es_format_Init( &es_fmt, VIDEO_ES, fcc );
            es_fmt.i_original_fourcc = CodecTagToFourcc( cp->codec_tag );

            es_fmt.video.i_bits_per_pixel = cp->bits_per_coded_sample;
            /* Special case for raw video data */
            if( cp->codec_id == AV_CODEC_ID_RAWVIDEO )
            {
                msg_Dbg( p_demux, "raw video, pixel format: %i", cp->format );
                if( GetVlcChroma( &es_fmt.video, cp->format ) != VLC_SUCCESS)
                {
                    msg_Err( p_demux, "was unable to find a FourCC match for raw video" );
                }
                else
                    es_fmt.i_codec = es_fmt.video.i_chroma;
            }
            /* We need this for the h264 packetizer */
            else if( cp->codec_id == AV_CODEC_ID_H264 && ( p_sys->fmt == av_find_input_format("flv") ||
                p_sys->fmt == av_find_input_format("matroska") || p_sys->fmt == av_find_input_format("mp4") ) )
                es_fmt.i_original_fourcc = VLC_FOURCC( 'a', 'v', 'c', '1' );

            es_fmt.video.i_width = cp->width;
            es_fmt.video.i_height = cp->height;
            es_fmt.video.i_visible_width = es_fmt.video.i_width;
            es_fmt.video.i_visible_height = es_fmt.video.i_height;

            get_rotation(&es_fmt, s);

# warning FIXME: implement palette transmission
            psz_type = "video";
            es_fmt.video.i_frame_rate = s->codec->time_base.num;
            es_fmt.video.i_frame_rate_base = s->codec->time_base.den * __MAX( s->codec->ticks_per_frame, 1 );
            es_fmt.video.i_sar_num = s->sample_aspect_ratio.num;
            if (s->sample_aspect_ratio.num > 0)
                es_fmt.video.i_sar_den = s->sample_aspect_ratio.den;
            else
                es_fmt.video.i_sar_den = 0;
            break;

        case AVMEDIA_TYPE_SUBTITLE:
            es_format_Init( &es_fmt, SPU_ES, fcc );
            es_fmt.i_original_fourcc = CodecTagToFourcc( cp->codec_tag );
            if( strncmp( p_sys->ic->iformat->name, "matroska", 8 ) == 0 &&
                cp->codec_id == AV_CODEC_ID_DVD_SUBTITLE &&
                cp->extradata != NULL &&
                cp->extradata_size > 0 )
            {
                char *psz_start;
                char *psz_buf = malloc( cp->extradata_size + 1);
                if( psz_buf != NULL )
                {
                    memcpy( psz_buf, cp->extradata , cp->extradata_size );
                    psz_buf[cp->extradata_size] = '\0';

                    psz_start = strstr( psz_buf, "size:" );
                    if( psz_start &&
                        vobsub_size_parse( psz_start,
                                           &es_fmt.subs.spu.i_original_frame_width,
                                           &es_fmt.subs.spu.i_original_frame_height ) == VLC_SUCCESS )
                    {
                        msg_Dbg( p_demux, "original frame size: %dx%d",
                                 es_fmt.subs.spu.i_original_frame_width,
                                 es_fmt.subs.spu.i_original_frame_height );
                    }
                    else
                    {
                        msg_Warn( p_demux, "reading original frame size failed" );
                    }

                    psz_start = strstr( psz_buf, "palette:" );
                    if( psz_start &&
                        vobsub_palette_parse( psz_start, &es_fmt.subs.spu.palette[1] ) == VLC_SUCCESS )
                    {
                        es_fmt.subs.spu.palette[0] = SPU_PALETTE_DEFINED;
                        msg_Dbg( p_demux, "vobsub palette read" );
                    }
                    else
                    {
                        msg_Warn( p_demux, "reading original palette failed" );
                    }
                    free( psz_buf );
                }
            }

            psz_type = "subtitle";
            break;

        default:
            es_format_Init( &es_fmt, UNKNOWN_ES, 0 );
            es_fmt.i_original_fourcc = CodecTagToFourcc( cp->codec_tag );
#ifdef HAVE_AVUTIL_CODEC_ATTACHMENT
            if( cp->codec_type == AVMEDIA_TYPE_ATTACHMENT )
            {
                input_attachment_t *p_attachment;

                psz_type = "attachment";
                if( cp->codec_id == AV_CODEC_ID_TTF )
                {
                    AVDictionaryEntry *filename = av_dict_get( s->metadata, "filename", NULL, 0 );
                    if( filename && filename->value )
                    {
                        p_attachment = vlc_input_attachment_New(
                                filename->value, "application/x-truetype-font",
                                NULL, cp->extradata, (int)cp->extradata_size );
                        if( p_attachment )
                            TAB_APPEND( p_sys->i_attachments, p_sys->attachments,
                                        p_attachment );
                    }
                }
                else msg_Warn( p_demux, "unsupported attachment type (%u) in avformat demux", cp->codec_id );
            }
            else
#endif
            {
                if( cp->codec_type == AVMEDIA_TYPE_DATA )
                    psz_type = "data";

                msg_Warn( p_demux, "unsupported track type (%u:%u) in avformat demux", cp->codec_type, cp->codec_id );
            }
            break;
        }

        AVDictionaryEntry *language = av_dict_get( s->metadata, "language", NULL, 0 );
        if ( language && language->value )
            es_fmt.psz_language = strdup( language->value );

        if( s->disposition & AV_DISPOSITION_DEFAULT )
            es_fmt.i_priority = ES_PRIORITY_SELECTABLE_MIN + 1000;

#ifdef HAVE_AVUTIL_CODEC_ATTACHMENT
        if( cp->codec_type != AVMEDIA_TYPE_ATTACHMENT )
#endif
        if( cp->codec_type != AVMEDIA_TYPE_DATA )
        {
            const bool    b_ogg = !strcmp( p_sys->fmt->name, "ogg" );
            const uint8_t *p_extra = cp->extradata;
            unsigned      i_extra  = cp->extradata_size;

            if( cp->codec_id == AV_CODEC_ID_THEORA && b_ogg )
            {
                unsigned pi_size[3];
                const void *pp_data[3];
                unsigned i_count;
                for( i_count = 0; i_count < 3; i_count++ )
                {
                    if( i_extra < 2 )
                        break;
                    pi_size[i_count] = GetWBE( p_extra );
                    pp_data[i_count] = &p_extra[2];
                    if( i_extra < pi_size[i_count] + 2 )
                        break;

                    p_extra += 2 + pi_size[i_count];
                    i_extra -= 2 + pi_size[i_count];
                }
                if( i_count > 0 && xiph_PackHeaders( &es_fmt.i_extra, &es_fmt.p_extra,
                                                     pi_size, pp_data, i_count ) )
                {
                    es_fmt.i_extra = 0;
                    es_fmt.p_extra = NULL;
                }
            }
            else if( cp->codec_id == AV_CODEC_ID_SPEEX && b_ogg )
            {
                const uint8_t p_dummy_comment[] = {
                    0, 0, 0, 0,
                    0, 0, 0, 0,
                };
                unsigned pi_size[2];
                const void *pp_data[2];

                pi_size[0] = i_extra;
                pp_data[0] = p_extra;

                pi_size[1] = sizeof(p_dummy_comment);
                pp_data[1] = p_dummy_comment;

                if( pi_size[0] > 0 && xiph_PackHeaders( &es_fmt.i_extra, &es_fmt.p_extra,
                                                        pi_size, pp_data, 2 ) )
                {
                    es_fmt.i_extra = 0;
                    es_fmt.p_extra = NULL;
                }
            }
            else if( cp->codec_id == AV_CODEC_ID_OPUS )
            {
                const uint8_t p_dummy_comment[] = {
                    'O', 'p', 'u', 's',
                    'T', 'a', 'g', 's',
                    0, 0, 0, 0, /* Vendor String length */
                                /* Vendor String */
                    0, 0, 0, 0, /* User Comment List Length */

                };
                unsigned pi_size[2];
                const void *pp_data[2];

                pi_size[0] = i_extra;
                pp_data[0] = p_extra;

                pi_size[1] = sizeof(p_dummy_comment);
                pp_data[1] = p_dummy_comment;

                if( pi_size[0] > 0 && xiph_PackHeaders( &es_fmt.i_extra, &es_fmt.p_extra,
                                                        pi_size, pp_data, 2 ) )
                {
                    es_fmt.i_extra = 0;
                    es_fmt.p_extra = NULL;
                }
            }
            else if( cp->extradata_size > 0 )
            {
                es_fmt.p_extra = malloc( i_extra );
                if( es_fmt.p_extra )
                {
                    es_fmt.i_extra = i_extra;
                    memcpy( es_fmt.p_extra, p_extra, i_extra );
                }
            }
            es = es_out_Add( p_demux->out, &es_fmt );
            if( s->disposition & AV_DISPOSITION_DEFAULT )
                es_out_Control( p_demux->out, ES_OUT_SET_ES_DEFAULT, es );
            es_format_Clean( &es_fmt );

            msg_Dbg( p_demux, "adding es: %s codec = %4.4s (%d)",
                     psz_type, (char*)&fcc, cp->codec_id  );
        }
        TAB_APPEND( p_sys->i_tk, p_sys->tk, es );
    }
    p_sys->tk_pcr = xcalloc( p_sys->i_tk, sizeof(*p_sys->tk_pcr) );

    if( p_sys->ic->start_time != (int64_t)AV_NOPTS_VALUE )
        i_start_time = p_sys->ic->start_time * 1000000 / AV_TIME_BASE;

    msg_Dbg( p_demux, "AVFormat(%s %s) supported stream", AVPROVIDER(LIBAVFORMAT), LIBAVFORMAT_IDENT );
    msg_Dbg( p_demux, "    - format = %s (%s)",
             p_sys->fmt->name, p_sys->fmt->long_name );
    msg_Dbg( p_demux, "    - start time = %"PRId64, i_start_time );
    msg_Dbg( p_demux, "    - duration = %"PRId64,
             ( p_sys->ic->duration != (int64_t)AV_NOPTS_VALUE ) ?
             p_sys->ic->duration * 1000000 / AV_TIME_BASE : -1 );

    if( p_sys->ic->nb_chapters > 0 )
    {
        p_sys->p_title = vlc_input_title_New();
        p_sys->p_title->i_length = p_sys->ic->duration * 1000000 / AV_TIME_BASE;
    }

    for( unsigned i = 0; i < p_sys->ic->nb_chapters; i++ )
    {
        seekpoint_t *s = vlc_seekpoint_New();

        AVDictionaryEntry *title = av_dict_get( p_sys->ic->metadata, "title", NULL, 0);
        if( title && title->value )
        {
            s->psz_name = strdup( title->value );
            EnsureUTF8( s->psz_name );
            msg_Dbg( p_demux, "    - chapter %d: %s", i, s->psz_name );
        }
        s->i_time_offset = p_sys->ic->chapters[i]->start * 1000000 *
            p_sys->ic->chapters[i]->time_base.num /
            p_sys->ic->chapters[i]->time_base.den -
            (i_start_time != -1 ? i_start_time : 0 );
        TAB_APPEND( p_sys->p_title->i_seekpoint, p_sys->p_title->seekpoint, s );
    }

    ResetTime( p_demux, 0 );
    return VLC_SUCCESS;
}
Beispiel #23
0
AVFormatContext *
fa_libav_open_format(AVIOContext *avio, const char *url,
		     char *errbuf, size_t errlen, const char *mimetype,
                     int probe_size, int max_analyze_duration,
		     int fps_probe_frames)
{
  AVInputFormat *fmt = NULL;
  AVFormatContext *fctx;
  int err;

  avio_seek(avio, 0, SEEK_SET);
  if(mimetype != NULL) {
    int i;

    for(i = 0; i < sizeof(mimetype2fmt) / sizeof(mimetype2fmt[0]); i++) {
      if(!strcasecmp(mimetype, mimetype2fmt[i].mimetype)) {
	fmt = av_find_input_format(mimetype2fmt[i].fmt);
	break;
      }
    }
    if(fmt == NULL)
      TRACE(TRACE_DEBUG, "probe", "%s: Don't know mimetype %s, probing instead",
	    url, mimetype);
  }

  if(fmt == NULL) {
    if((err = av_probe_input_buffer(avio, &fmt, url, NULL, 0, probe_size)) != 0)
      return fa_libav_open_error(errbuf, errlen,
				 "Unable to probe file", err);

    if(fmt == NULL) {
      snprintf(errbuf, errlen, "Unknown file format");
      return NULL;
    }
    TRACE(TRACE_DEBUG, "probe", "%s: Probed as %s", url, fmt->name);
  }

  fctx = avformat_alloc_context();
  fctx->pb = avio;
  if(max_analyze_duration != -1)
    fctx->max_analyze_duration = max_analyze_duration;

  if((err = avformat_open_input(&fctx, url, fmt, NULL)) != 0) {
    if(mimetype != NULL) {
      TRACE(TRACE_DEBUG, "libav",
            "Unable to open using mimetype %s, retrying with probe",
            mimetype);
      return fa_libav_open_format(avio, url, errbuf, errlen, NULL, probe_size,
                                  max_analyze_duration, fps_probe_frames);
    }
    return fa_libav_open_error(errbuf, errlen,
			       "Unable to open file as input format", err);
  }

  if(fps_probe_frames != -1)
    fctx->fps_probe_size = fps_probe_frames;

  if(avformat_find_stream_info(fctx, NULL) < 0) {
    avformat_close_input(&fctx);
    if(mimetype != NULL) {
      TRACE(TRACE_DEBUG, "libav",
            "Unable to find stream info using mimetype %s, retrying with probe",
            mimetype);
      return fa_libav_open_format(avio, url, errbuf, errlen, NULL, probe_size,
                                  max_analyze_duration, fps_probe_frames);
    }
    return fa_libav_open_error(errbuf, errlen,
			       "Unable to handle file contents", err);
  }

  return fctx;
}
Beispiel #24
0
int ConvertH264toTS(int fileNumber)
{
	AVFormatContext* inputFormatContext = NULL;
	AVFormatContext* outputFormatContext = NULL;
	AVStream* inStream = NULL;
	AVStream* outStream = NULL;
	AVOutputFormat* outFormat = NULL;
	AVCodec* outCodec = NULL;
	AVPacket pkt;
	AVPacket outpkt;
	char inputFile[200], outputFile[200];
	unsigned int i, inStreamIndex = 0;
	int fps, pts = 0, last_pts = 0;
	int64_t inputEndTime;

	// intialize ffmpeg libraries	
	av_register_all();
	
	// Open input file
	sprintf(inputFile, "VIDEO%d.h264", fileNumber);
	if(avformat_open_input(&inputFormatContext, inputFile, NULL, NULL) != 0)
	{
		printf("\nopen %s file error!!!!!!!\n", inputFile);
		return -1;
	}

	// Find input file's stream info
	if((avformat_find_stream_info(inputFormatContext, NULL)) < 0)
	{
		printf("\nfind stream info error!!!!!!!\n");
		return -1;
	}
	else
	{
		// printf("found inputfile's stream info\n");
	}

	// Dump information about the input file onto strerr
	av_dump_format(inputFormatContext, 0, inputFile, 0);

	for(i = 0; i < inputFormatContext->nb_streams; i++)
	{
		if(inputFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
		{
			inStreamIndex = i;
            inStream = inputFormatContext->streams[i];
            // printf("found stream!!!! inStreamIndex : %d\n", inStreamIndex);

            break;
		}
	}
	
	// if there is no duration info of inputFile(h264), deduce it from time_base
	if(inputFormatContext->duration == AV_NOPTS_VALUE)
	{
        if(inStreamIndex != -1 && inputFormatContext->streams[inStreamIndex])
        {	
            if(inputFormatContext->streams[inStreamIndex]->duration != AV_NOPTS_VALUE)
            {
                inputEndTime = (inputFormatContext->streams[inStreamIndex]->duration)/(inputFormatContext->streams[inStreamIndex]->time_base.den/inputFormatContext->streams[inStreamIndex]->time_base.num);
            }
        }
    }
    else
        inputEndTime = (inputFormatContext->duration)/(AV_TIME_BASE);

    // calculate frame per second(fps), but following code doesnt work properly
    // because h264 file doesn't have any context information such as fps, duration and etc.
    // therefore fps is now set arbitrarily as 30 according to h264 input file.
    // if(inStreamIndex != -1 && inputFormatContext->streams[inStreamIndex])
    // {
    //     if(inputFormatContext->streams[inStreamIndex]->r_frame_rate.num != AV_NOPTS_VALUE && inputFormatContext->streams[inStreamIndex]->r_frame_rate.den != 0)
    //     {
    //         fps =  (inputFormatContext->streams[inStreamIndex]->r_frame_rate.num)/ (inputFormatContext->streams[inStreamIndex]->r_frame_rate.den);
    //     }
    // }
    // else
    // {
    //     fps = 30;
    // }
    fps = 30;

	// Create outputFile and allocate output format
	sprintf(outputFile, "VIDEO%d.ts", fileNumber);
	outFormat = av_guess_format(NULL, outputFile, NULL);
	if(outFormat == NULL) // ts format doesnt exist
	{
		printf("output file format doesnt exist");
		return -1;
	}
	else // ts format exists
	{
		outputFormatContext = avformat_alloc_context();
		if(outputFormatContext != NULL)
		{
			outputFormatContext->oformat = outFormat;
			snprintf(outputFormatContext->filename, sizeof(outputFormatContext->filename), "%s", outputFile); // ?????
		}
		else
		{
			printf("outputFormatContext allocation error");
			return -1;
		}
	}

	// Add video stream to output format
	if((outFormat->video_codec != 0) && (inStream != NULL))
	{
		outCodec = avcodec_find_encoder(outFormat->video_codec);
		if(outCodec == NULL)
		{
			printf("could not find vid encoder");
			return -1;
		}
		else
		{
			printf("found out vid encoder : %s\n", outCodec->name);
			outStream = avformat_new_stream(outputFormatContext, outCodec);
            if(NULL == outStream)
            {
            	printf("failed to allocated output vid strm");
            	return -1;
            }
            else
            {	 // avcodec_copy_context() return 0 when ok
				if(avcodec_copy_context(outStream->codec, inputFormatContext->streams[inStreamIndex]->codec) != 0)
				{
					printf("Failed to copy context");
					return -1;
				}
				else
				{
					// time_base is used to calculate when to decode and show the frame
					outStream->sample_aspect_ratio.den = outStream->codec->sample_aspect_ratio.den;
                    outStream->sample_aspect_ratio.num = inStream->codec->sample_aspect_ratio.num;
                    outStream->codec->codec_id = inStream->codec->codec_id;
                    outStream->codec->time_base.num = 2;
                    outStream->codec->time_base.den = fps * (inStream->codec->ticks_per_frame);
                    outStream->time_base.num = 1;
                    outStream->time_base.den = 1000;
                    outStream->r_frame_rate.num = fps;
                    outStream->r_frame_rate.den = 1;
                    outStream->avg_frame_rate.num = fps;
                    outStream->avg_frame_rate.den = 1;
				}
			}
		}
	}
	else
		printf("stream context outputting fail !!!!!!!!!!!!!!!!\n");

	// in avformat.h, #define AVFMT_NOFILE 0x0001.
	// Demuxer will use avio_open, no opened file should be provided by the caller.
	if(!(outFormat->flags & AVFMT_NOFILE))
	{
		if (avio_open2(&outputFormatContext->pb, outputFile, AVIO_FLAG_WRITE, NULL, NULL) < 0) 
		{
			printf("Could Not Open File ");
			return -1;
		}
		// else
		// 	printf("avio_open2 success!!!\n");
	}

    // Write the stream header, if any.
	if (avformat_write_header(outputFormatContext, NULL) < 0)
	{
		printf("Error Occurred While Writing Header ");
		return -1;
	}
	else
		// printf("Written Output header");

	// Now in while loop read frame using av_read_frame and write to output format using 
	// av_interleaved_write_frame(). You can use following loop
	// while(av_read_frame(inputFormatContext, &pkt) >= 0 && (m_num_frames-- > 0))
	while(av_read_frame(inputFormatContext, &pkt) >= 0)
	{
		if(pkt.stream_index == inStreamIndex)
		{
			// av_rescale_q(pkt.pts, inStream->time_base, inStream->codec->time_base);
			// av_rescale_q(pkt.dts, inStream->time_base, inStream->codec->time_base);
			
			av_init_packet(&outpkt);
			
			if(pkt.pts != AV_NOPTS_VALUE) // AV_NOPTS_VALUE means undefined timestamp value
			{
				if(last_pts == pts)
				{
					pts++;
					last_pts = pts;
				}
				
				outpkt.pts = pts;   
			}
			else // pkt.pts is undefined
				outpkt.pts = AV_NOPTS_VALUE;

			if(pkt.dts == AV_NOPTS_VALUE) // if pkt's dts value is undefined
				outpkt.dts = AV_NOPTS_VALUE;
			// if pkt's dts value is defined with a value
			else
				outpkt.dts = pts;

			outpkt.data = pkt.data;
			outpkt.size = pkt.size;
			outpkt.stream_index = pkt.stream_index;
			outpkt.flags |= AV_PKT_FLAG_KEY; // #define AV_PKT_FLAG_KEY 0x0001, which means the packet contains a keyframe
			last_pts = pts;

			if(av_interleaved_write_frame(outputFormatContext, &outpkt) < 0)
				printf("failed video write\n");
			else
			{
				// printf("video write ok!!!!\n");
				outStream->codec->frame_number++;
			}
			
			av_free_packet(&outpkt);
			av_free_packet(&pkt);
		}
	}

	// Finally write trailer and clean up everything
	av_write_trailer(outputFormatContext);
	
	// free the memory
	if(inStream && inStream->codec)
		avcodec_close(inStream->codec);
	if(inputFormatContext)
		avformat_close_input(&inputFormatContext);
	if(outStream && outStream->codec)
		avcodec_close(outStream->codec);
	if(outputFormatContext)
	{
		avformat_close_input(&outputFormatContext);
		outputFormatContext = NULL;
	}
	
	return 0;
}
Beispiel #25
0
int decode_thread(void *arg) {

  VideoState *is = (VideoState *)arg;
  AVFormatContext *pFormatCtx = NULL;
  AVPacket pkt1, *packet = &pkt1;

  int video_index = -1;
  int audio_index = -1;
  int i;

  AVDictionary *io_dict = NULL;
  AVIOInterruptCB callback;

  is->videoStream=-1;
  is->audioStream=-1;

  global_video_state = is;
  // will interrupt blocking functions if we quit!
  callback.callback = decode_interrupt_cb;
  callback.opaque = is;
  if (avio_open2(&is->io_context, is->filename, 0, &callback, &io_dict))
  {
    fprintf(stderr, "Unable to open I/O for %s\n", is->filename);
    return -1;
  }

  // Open video file
  if(avformat_open_input(&pFormatCtx, is->filename, NULL, NULL)!=0)
    return -1; // Couldn't open file

  is->pFormatCtx = pFormatCtx;
  
  // Retrieve stream information
  if(avformat_find_stream_info(pFormatCtx, NULL)<0)
    return -1; // Couldn't find stream information
  
  // Dump information about file onto standard error
  av_dump_format(pFormatCtx, 0, is->filename, 0);
  
  // Find the first video stream

  for(i=0; i<pFormatCtx->nb_streams; i++) {
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO &&
       video_index < 0) {
      video_index=i;
    }
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO &&
       audio_index < 0) {
      audio_index=i;
    }
  }
  if(audio_index >= 0) {
    stream_component_open(is, audio_index);
  }
  if(video_index >= 0) {
    stream_component_open(is, video_index);
  }   

  if(is->videoStream < 0 || is->audioStream < 0) {
    fprintf(stderr, "%s: could not open codecs\n", is->filename);
    goto fail;
  }

  // main decode loop

  for(;;) {
    if(is->quit) {
      break;
    }
    // seek stuff goes here
    if(is->audioq.size > MAX_AUDIOQ_SIZE ||
       is->videoq.size > MAX_VIDEOQ_SIZE) {
      SDL_Delay(10);
      continue;
    }
    if(av_read_frame(is->pFormatCtx, packet) < 0) {
      if(is->pFormatCtx->pb->error == 0) {
	SDL_Delay(100); /* no error; wait for user input */
	continue;
      } else {
	break;
      }
    }
    // Is this a packet from the video stream?
    if(packet->stream_index == is->videoStream) {
      packet_queue_put(&is->videoq, packet);
    } else if(packet->stream_index == is->audioStream) {
      packet_queue_put(&is->audioq, packet);
    } else {
      av_free_packet(packet);
    }
  }
  /* all done - wait for it */
  while(!is->quit) {
    SDL_Delay(100);
  }

 fail:
  if(1){
    SDL_Event event;
    event.type = FF_QUIT_EVENT;
    event.user.data1 = is;
    SDL_PushEvent(&event);
  }
  return 0;
}
Beispiel #26
0
int main(int argc, const char *argv[]) {
  char buf[256];
  unsigned int i;
  int r;

  AVFormatContext *format_ctx;
  AVStream *stream;
  AVCodecContext *codec_ctx;
  AVCodec *codec;

  /* Initialize libav. */
  av_register_all();
  av_log_set_level(AV_LOG_QUIET);

  /* Open stdin. */
  format_ctx = avformat_alloc_context();
  if (format_ctx == NULL)
    return 1;

  r = avformat_open_input(&format_ctx, "pipe:0", NULL, NULL);
  if (r < 0)
    return 1;

  r = avformat_find_stream_info(format_ctx, NULL);
  if (r < 0)
    return 1;

  printf("{\n  \"streams\": [");

  /* Dump information for each stream. */
  for (i = 0; i < format_ctx->nb_streams; i++) {
    stream = format_ctx->streams[i];
    codec_ctx = stream->codec;

    codec = avcodec_find_decoder(codec_ctx->codec_id);
    if (codec == NULL)
      return -1;

    r = avcodec_open2(codec_ctx, codec, NULL);
    if (r < 0)
      return -1;

    /* Open the stream's JSON object. */
    printf(i == 0 ? "\n    {" : ",\n    {");
    printf("\n      \"index\": %d", stream->index);

    switch (codec_ctx->codec_type) {
    case AVMEDIA_TYPE_VIDEO:    printf(",\n      \"type\": \"video\"");    break;
    case AVMEDIA_TYPE_AUDIO:    printf(",\n      \"type\": \"audio\"");    break;
    case AVMEDIA_TYPE_SUBTITLE: printf(",\n      \"type\": \"subtitle\""); break;
    default:                    printf(",\n      \"type\": \"unknown\"");
    }

    printf(",\n      \"codec\": \"%s\"", codec->name);
    printf(",\n      \"start_time\": %f", stream->start_time > 0 ? stream->start_time * av_q2d(stream->time_base) : 0.0);
    printf(",\n      \"duration\": %f", stream->duration > 0 ? stream->duration * av_q2d(stream->time_base) : 0.0);

    /* Video-specific fields. */
    if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
      printf(",\n      \"width\": %d", codec_ctx->width);
      printf(",\n      \"height\": %d", codec_ctx->height);
      printf(",\n      \"bit_rate\": %d", codec_ctx->bit_rate);
      printf(",\n      \"frames\": %lld", stream->nb_frames);
      printf(",\n      \"frame_rate\": %f", stream->nb_frames > 0 ? av_q2d(stream->avg_frame_rate) : 0.0);

      if (codec_ctx->pix_fmt != -1) {
        printf(",\n      \"pixel_format\": \"%s\"", av_get_pix_fmt_name(codec_ctx->pix_fmt));
      } else {
        printf(",\n      \"pixel_format\": \"unknown\"");
      }
    }

    /* Audio-specific fields. */
    if (codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
      printf(",\n      \"channels\": %d", codec_ctx->channels);

      if (codec_ctx->channel_layout != -1) {
        av_get_channel_layout_string(&buf[0], 256, codec_ctx->channels, codec_ctx->channel_layout);
        printf(",\n      \"channel_layout\": \"%s\"", buf);
      } else {
        printf(",\n      \"channel_layout\": \"unknown\"");
      }

      printf(",\n      \"bit_rate\": %d", codec_ctx->bit_rate);
      printf(",\n      \"sample_rate\": %d", codec_ctx->sample_rate);

      if (codec_ctx->sample_fmt != -1) {
        printf(",\n      \"sample_format\": \"%s\"", av_get_sample_fmt_name(codec_ctx->sample_fmt));
      }
    }

    /* Close the stream's JSON object. */
    printf("\n    }");
  }

  printf("\n  ]\n}\n");

  /* Close the input. */
  avformat_close_input(&format_ctx);

  return 0;
}
//------------------------------------------------------------------------------
void videoDecodingThread(ThreadInfo* p_threadInfo)
{
    // Read ThreadInfo struct, then delete it
    FFmpegVideoPlayer* videoPlayer = p_threadInfo->videoPlayer;
    VideoInfo& videoInfo = videoPlayer->getVideoInfo();
    boost::mutex* playerMutex = p_threadInfo->playerMutex;
    boost::condition_variable* playerCondVar = p_threadInfo->playerCondVar;
    boost::mutex* decodeMutex = p_threadInfo->decodingMutex;
    boost::condition_variable* decodeCondVar = p_threadInfo->decodingCondVar;
    bool isLoop = p_threadInfo->isLoop;
    staticOgreLog = videoPlayer->getLog();
    delete p_threadInfo;
    
    // Initialize FFmpeg  
    av_register_all();
    av_log_set_callback(log_callback);
    av_log_set_level(AV_LOG_WARNING);
    
    // Initialize video decoding, filling the VideoInfo
    // Open the input file
    AVFormatContext* formatContext = NULL;
    const char* name = videoPlayer->getVideoFilename().c_str();
    if (avformat_open_input(&formatContext, name, NULL, NULL) < 0) 
    {
        videoInfo.error = "Could not open input: ";
        videoInfo.error.append(videoPlayer->getVideoFilename());
        playerCondVar->notify_all();
        return;
    }
    
    // Read stream information
    if (avformat_find_stream_info(formatContext, NULL) < 0) 
    {
        videoInfo.error = "Could not find stream information.";
        playerCondVar->notify_all();
        return;
    }
    
    // Get streams
    // Audio stream
    AVStream* audioStream = NULL;
    AVCodecContext* audioCodecContext = NULL;
    int audioStreamIndex = -1;
    if (!openCodecContext(formatContext, AVMEDIA_TYPE_AUDIO, videoInfo, audioStreamIndex)) 
    {
        // The error itself is set by openCodecContext
        playerCondVar->notify_all();
        return;
    }
    audioStream = formatContext->streams[audioStreamIndex];
    audioCodecContext = audioStream->codec;
    
    // Video stream
    AVStream* videoStream = NULL;
    AVCodecContext* videoCodecContext = NULL;
    int videoStreamIndex = -1;
    if (!openCodecContext(formatContext, AVMEDIA_TYPE_VIDEO, videoInfo, videoStreamIndex)) 
    {
        // The error itself is set by openCodecContext
        playerCondVar->notify_all();
        return;
    }
    videoStream = formatContext->streams[videoStreamIndex];
    videoCodecContext = videoStream->codec;
    
    // Dump information
    av_dump_format(formatContext, 0, videoPlayer->getVideoFilename().c_str(), 0);
    
    // Store useful information in VideoInfo struct
    double timeBase = ((double)audioStream->time_base.num) / (double)audioStream->time_base.den;
    videoInfo.audioDuration = audioStream->duration * timeBase;
    videoInfo.audioSampleRate = audioCodecContext->sample_rate;
    videoInfo.audioBitRate = audioCodecContext->bit_rate;
    videoInfo.audioNumChannels = 
            videoInfo.audioNumChannels > 0 ? videoInfo.audioNumChannels : audioCodecContext->channels;
    
    timeBase = ((double)videoStream->time_base.num) / (double)videoStream->time_base.den;
    videoInfo.videoDuration = videoStream->duration * timeBase;
    videoInfo.videoWidth = videoCodecContext->width;
    videoInfo.videoHeight = videoCodecContext->height;
    
    // If the a duration is below 0 seconds, something is very fishy. 
    // Use format duration instead, it's the best guess we have
    if (videoInfo.audioDuration < 0.0)
    {
        videoInfo.audioDuration = ((double)formatContext->duration) / AV_TIME_BASE;
    }
    if (videoInfo.videoDuration < 0.0)
    {
        videoInfo.videoDuration = ((double)formatContext->duration) / AV_TIME_BASE;
    }
 
    // Store the longer of both durations. This is what determines when looped videos
    // will begin anew
    videoInfo.longerDuration = videoInfo.videoDuration > videoInfo.audioDuration ? 
                                videoInfo.videoDuration : videoInfo.audioDuration;
            
    // Wake up video player
    videoInfo.infoFilled = true;
    playerCondVar->notify_all();
    
    // Initialize packet, set data to NULL, let the demuxer fill it
    AVPacket packet;
    av_init_packet(&packet);
    packet.data = NULL;
    packet.size = 0;
    
    // Initialize SWS context
    SwsContext* swsContext = NULL;
    swsContext = sws_getCachedContext(swsContext,
                                videoInfo.videoWidth, videoInfo.videoHeight, videoCodecContext->pix_fmt, 
                                videoInfo.videoWidth, videoInfo.videoHeight, PIX_FMT_RGBA, 
                                SWS_BICUBIC, NULL, NULL, NULL);
    
    // Create destination picture
    AVFrame* destPic = avcodec_alloc_frame();
    avpicture_alloc((AVPicture*)destPic, PIX_FMT_RGBA, videoInfo.videoWidth, videoInfo.videoHeight);
    
    // Get the correct target channel layout
    uint64_t targetChannelLayout;
    // Keep the source layout
    if (audioCodecContext->channels == videoInfo.audioNumChannels)
    {
        targetChannelLayout = audioCodecContext->channel_layout;
    }
    // Or determine a new one
    else
    {
        switch (videoInfo.audioNumChannels)
        {
            case 1:
                targetChannelLayout = AV_CH_LAYOUT_MONO;
                break;
                
            case 2:
                targetChannelLayout = AV_CH_LAYOUT_STEREO;
                break;
                
            default:
                targetChannelLayout = audioCodecContext->channel_layout;
                break;
        }
    }
    
    // Initialize SWR context
    SwrContext* swrContext = swr_alloc_set_opts(NULL, 
                targetChannelLayout, AV_SAMPLE_FMT_FLT, audioCodecContext->sample_rate,
                audioCodecContext->channel_layout, audioCodecContext->sample_fmt, audioCodecContext->sample_rate, 
                0, NULL);
    int result = swr_init(swrContext);
    if (result != 0) 
    {
        videoInfo.error = "Could not initialize swr context: " + boost::lexical_cast<std::string>(result);
        playerCondVar->notify_all();
        return;
    }
    
    // Create destination sample buffer
    uint8_t** destBuffer = NULL;
    int destBufferLinesize;
    av_samples_alloc_array_and_samples( &destBuffer,
                                        &destBufferLinesize,
                                        videoInfo.audioNumChannels,
                                        2048,
                                        AV_SAMPLE_FMT_FLT,
                                        0);
    
    // Main decoding loop
    // Read the input file frame by frame
    AVFrame* frame = NULL;
    while (av_read_frame(formatContext, &packet) >= 0) 
    {
        // Only start decoding when at least one of the buffers is not full
        while (videoPlayer->getVideoBufferIsFull() && videoPlayer->getAudioBufferIsFull())
        {
            boost::unique_lock<boost::mutex> lock(*decodeMutex);
            boost::chrono::steady_clock::time_point const timeOut = 
                boost::chrono::steady_clock::now() + boost::chrono::milliseconds((int)videoPlayer->getBufferTarget() * 1000);
            decodeCondVar->wait_until(lock, timeOut);
            
            if (videoInfo.decodingAborted)
            {
                break;
            }
        }
            
        // Break if the decoding was aborted
        if (videoInfo.decodingAborted)
        {
            break;
        }
        
        // Initialize frame
        if (!frame) 
        {
            if (!(frame = avcodec_alloc_frame())) 
            {
                videoInfo.error = "Out of memory.";
                return;
            }
        } 
        else
        {
            avcodec_get_frame_defaults(frame);
        }
        
        // Decode the packet
        AVPacket orig_pkt = packet;
        do 
        {
            int decoded = 0;
            if (packet.stream_index == audioStreamIndex)
            {
                decoded = decodeAudioPacket(packet, audioCodecContext, audioStream, frame, swrContext,
                                            destBuffer, destBufferLinesize, videoPlayer, videoInfo, isLoop);
            }
            else if (packet.stream_index == videoStreamIndex)
            {
                decoded = decodeVideoPacket(packet, videoCodecContext, videoStream, frame, swsContext, 
                                            (AVPicture*)destPic, videoPlayer, videoInfo, isLoop);
            }
            else
            {
                // This means that we have a stream that is neither our video nor audio stream
                // Just skip the package
                break;
            }
            
            // decoded will be negative on an error
            if (decoded < 0)
            {
                // The error itself is set by the decode functions
                playerCondVar->notify_all();
                return;
            }
            
            // Increment data pointer, subtract from size
            packet.data += decoded;
            packet.size -= decoded;
        } while (packet.size > 0);
        
        av_free_packet(&orig_pkt);
    }
    
    // We're done. Close everything
    avcodec_free_frame(&frame);
    avpicture_free((AVPicture*)destPic);
    avcodec_free_frame(&destPic);
    avcodec_close(videoCodecContext);
    avcodec_close(audioCodecContext);
    sws_freeContext(swsContext);
    av_freep(&destBuffer[0]);
    swr_free(&swrContext);
    avformat_close_input(&formatContext);
    
    videoInfo.audioDuration = videoInfo.audioDecodedDuration;
    videoInfo.decodingDone = videoInfo.decodingAborted ? false : true;
}
Beispiel #28
0
int hardsubx_process_data(struct lib_hardsubx_ctx *ctx)
{
	// Get the required media attributes and initialize structures
	av_register_all();
	
	if(avformat_open_input(&ctx->format_ctx, ctx->inputfile[0], NULL, NULL)!=0)
	{
		fatal (EXIT_READ_ERROR, "Error reading input file!\n");
	}

	if(avformat_find_stream_info(ctx->format_ctx, NULL)<0)
	{
		fatal (EXIT_READ_ERROR, "Error reading input stream!\n");
	}

	// Important call in order to determine media information using ffmpeg
	// TODO: Handle multiple inputs
	av_dump_format(ctx->format_ctx, 0, ctx->inputfile[0], 0);
	

	ctx->video_stream_id = -1;
	for(int i = 0; i < ctx->format_ctx->nb_streams; i++)
	{
		if(ctx->format_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
		{
			ctx->video_stream_id = i;
			break;
		}
	}
	if(ctx->video_stream_id == -1)
	{
		fatal (EXIT_READ_ERROR, "Video Stream not found!\n");
	}

	ctx->codec_ctx = ctx->format_ctx->streams[ctx->video_stream_id]->codec;
	ctx->codec = avcodec_find_decoder(ctx->codec_ctx->codec_id);
	if(ctx->codec == NULL)
	{
		fatal (EXIT_READ_ERROR, "Input codec is not supported!\n");
	}

	if(avcodec_open2(ctx->codec_ctx, ctx->codec, &ctx->options_dict) < 0)
	{
		fatal (EXIT_READ_ERROR, "Error opening input codec!\n");
	}

	ctx->frame = av_frame_alloc();
	ctx->rgb_frame = av_frame_alloc();
	if(!ctx->frame || !ctx->rgb_frame)
	{
		fatal(EXIT_NOT_ENOUGH_MEMORY, "Not enough memory to initialize frame!");
	}

	int frame_bytes = av_image_get_buffer_size(AV_PIX_FMT_RGB24, ctx->codec_ctx->width, ctx->codec_ctx->height, 16);
	ctx->rgb_buffer = (uint8_t *)av_malloc(frame_bytes*sizeof(uint8_t));
	
	ctx->sws_ctx = sws_getContext(
			ctx->codec_ctx->width,
			ctx->codec_ctx->height,
			ctx->codec_ctx->pix_fmt,
			ctx->codec_ctx->width,
			ctx->codec_ctx->height,
			AV_PIX_FMT_RGB24,
			SWS_BILINEAR,
			NULL,NULL,NULL
		);

	av_image_fill_arrays(ctx->rgb_frame->data, ctx->rgb_frame->linesize, ctx->rgb_buffer, AV_PIX_FMT_RGB24, ctx->codec_ctx->width, ctx->codec_ctx->height, 1);

	// int frame_bytes = av_image_get_buffer_size(AV_PIX_FMT_RGB24, 1280, 720, 16);
	// ctx->rgb_buffer = (uint8_t *)av_malloc(frame_bytes*sizeof(uint8_t));
	
	// ctx->sws_ctx = sws_getContext(
	// 		ctx->codec_ctx->width,
	// 		ctx->codec_ctx->height,
	// 		ctx->codec_ctx->pix_fmt,
	// 		1280,
	// 		720,
	// 		AV_PIX_FMT_RGB24,
	// 		SWS_BILINEAR,
	// 		NULL,NULL,NULL
	// 	);
	// avpicture_fill((AVPicture*)ctx->rgb_frame, ctx->rgb_buffer, AV_PIX_FMT_RGB24, 1280, 720);
	// av_image_fill_arrays(ctx->rgb_frame->data, ctx->rgb_frame->linesize, ctx->rgb_buffer, AV_PIX_FMT_RGB24, 1280, 720, 1);

	// Pass on the processing context to the appropriate functions
	struct encoder_ctx *enc_ctx;
	enc_ctx = init_encoder(&ccx_options.enc_cfg);
	
	mprint("Beginning burned-in subtitle detection...\n");
	hardsubx_process_frames_linear(ctx, enc_ctx);

	dinit_encoder(&enc_ctx, 0); //TODO: Replace 0 with end timestamp

	// Free the allocated memory for frame processing
	av_free(ctx->rgb_buffer);
	av_free(ctx->rgb_frame);
	av_free(ctx->frame);
	avcodec_close(ctx->codec_ctx);
	avformat_close_input(&ctx->format_ctx);
}
void cVideo::ShowPicture(const char *fname, const char *)
{
	lt_info("%s(%s)\n", __func__, fname);
	if (access(fname, R_OK))
		return;

	unsigned int i;
	int stream_id = -1;
	int got_frame = 0;
	int len;
	AVFormatContext *avfc = NULL;
	AVCodecContext *c = NULL;
	AVCodec *codec;
	AVFrame *frame, *rgbframe;
	AVPacket avpkt;

	if (avformat_open_input(&avfc, fname, NULL, NULL) < 0) {
		lt_info("%s: Could not open file %s\n", __func__, fname);
		return;
	}

	if (avformat_find_stream_info(avfc, NULL) < 0) {
		lt_info("%s: Could not find file info %s\n", __func__, fname);
		goto out_close;
	}
	for (i = 0; i < avfc->nb_streams; i++) {
		if (avfc->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
			stream_id = i;
			break;
		}
	}
	if (stream_id < 0)
		goto out_close;
	c = avfc->streams[stream_id]->codec;
	codec = avcodec_find_decoder(c->codec_id);
	if (!avcodec_open2(c, codec, NULL) < 0) {
		lt_info("%s: Could not find/open the codec, id 0x%x\n", __func__, c->codec_id);
		goto out_close;
	}
	frame = av_frame_alloc();
	rgbframe = av_frame_alloc();
	if (!frame || !rgbframe) {
		lt_info("%s: Could not allocate video frame\n", __func__);
		goto out_free;
	}
	av_init_packet(&avpkt);
	if (av_read_frame(avfc, &avpkt) < 0) {
		lt_info("%s: av_read_frame < 0\n", __func__);
		goto out_free;
	}
	len = avcodec_decode_video2(c, frame, &got_frame, &avpkt);
	if (len < 0) {
		lt_info("%s: avcodec_decode_video2 %d\n", __func__, len);
		av_free_packet(&avpkt);
		goto out_free;
	}
	if (avpkt.size > len)
		lt_info("%s: WARN: pkt->size %d != len %d\n", __func__, avpkt.size, len);
	if (got_frame) {
		unsigned int need = avpicture_get_size(PIX_FMT_RGB32, c->width, c->height);
		struct SwsContext *convert = sws_getContext(c->width, c->height, c->pix_fmt,
							    c->width, c->height, PIX_FMT_RGB32,
							    SWS_BICUBIC, 0, 0, 0);
		if (!convert)
			lt_info("%s: ERROR setting up SWS context\n", __func__);
		else {
			buf_m.lock();
			SWFramebuffer *f = &buffers[buf_in];
			if (f->size() < need)
				f->resize(need);
			avpicture_fill((AVPicture *)rgbframe, &(*f)[0], PIX_FMT_RGB32,
					c->width, c->height);
			sws_scale(convert, frame->data, frame->linesize, 0, c->height,
					rgbframe->data, rgbframe->linesize);
			sws_freeContext(convert);
			f->width(c->width);
			f->height(c->height);
			f->pts(AV_NOPTS_VALUE);
			AVRational a = av_guess_sample_aspect_ratio(avfc, avfc->streams[stream_id], frame);
			f->AR(a);
			buf_in++;
			buf_in %= VDEC_MAXBUFS;
			buf_num++;
			if (buf_num > (VDEC_MAXBUFS - 1)) {
				lt_debug("%s: buf_num overflow\n", __func__);
				buf_out++;
				buf_out %= VDEC_MAXBUFS;
				buf_num--;
			}
			buf_m.unlock();
		}
	}
	av_free_packet(&avpkt);
 out_free:
	avcodec_close(c);
	av_frame_free(&frame);
	av_frame_free(&rgbframe);
 out_close:
	avformat_close_input(&avfc);
	lt_debug("%s(%s) end\n", __func__, fname);
}
int FfmpegCamera::OpenFfmpeg() {

    Debug ( 2, "OpenFfmpeg called." );

    mOpenStart = time(NULL);
    mIsOpening = true;

    // Open the input, not necessarily a file
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 4, 0)
    Debug ( 1, "Calling av_open_input_file" );
    if ( av_open_input_file( &mFormatContext, mPath.c_str(), NULL, 0, NULL ) !=0 )
#else
    // Handle options
    AVDictionary *opts = 0;
    StringVector opVect = split(Options(), ",");
    
    // Set transport method as specified by method field, rtpUni is default
    if ( Method() == "rtpMulti" )
    	opVect.push_back("rtsp_transport=udp_multicast");
    else if ( Method() == "rtpRtsp" )
        opVect.push_back("rtsp_transport=tcp");
    else if ( Method() == "rtpRtspHttp" )
        opVect.push_back("rtsp_transport=http");
    
  	Debug(2, "Number of Options: %d",opVect.size());
    for (size_t i=0; i<opVect.size(); i++)
    {
    	StringVector parts = split(opVect[i],"=");
    	if (parts.size() > 1) {
    		parts[0] = trimSpaces(parts[0]);
    		parts[1] = trimSpaces(parts[1]);
    	    if ( av_dict_set(&opts, parts[0].c_str(), parts[1].c_str(), 0) == 0 ) {
    	        Debug(2, "set option %d '%s' to '%s'", i,  parts[0].c_str(), parts[1].c_str());
    	    }
    	    else
    	    {
    	        Warning( "Error trying to set option %d '%s' to '%s'", i, parts[0].c_str(), parts[1].c_str() );
    	    }
    		  
    	}
    }    
	Debug ( 1, "Calling avformat_open_input" );

    mFormatContext = avformat_alloc_context( );
    mFormatContext->interrupt_callback.callback = FfmpegInterruptCallback;
    mFormatContext->interrupt_callback.opaque = this;

    if ( avformat_open_input( &mFormatContext, mPath.c_str(), NULL, &opts ) !=0 )
#endif
    {
        mIsOpening = false;
        Error( "Unable to open input %s due to: %s", mPath.c_str(), strerror(errno) );
        return -1;
    }

    mIsOpening = false;
    Debug ( 1, "Opened input" );

    Info( "Stream open %s", mPath.c_str() );
    startTime=av_gettime();//FIXME here or after find_Stream_info
    
    //FIXME can speed up initial analysis but need sensible parameters...
    //mFormatContext->probesize = 32;
    //mFormatContext->max_analyze_duration = 32;
    // Locate stream info from avformat_open_input
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 4, 0)
    Debug ( 1, "Calling av_find_stream_info" );
    if ( av_find_stream_info( mFormatContext ) < 0 )
#else
    Debug ( 1, "Calling avformat_find_stream_info" );
    if ( avformat_find_stream_info( mFormatContext, 0 ) < 0 )
#endif
        Fatal( "Unable to find stream info from %s due to: %s", mPath.c_str(), strerror(errno) );
    
    Info( "Find stream info complete %s", mPath.c_str() );
    Debug ( 1, "Got stream info" );

    // Find first video stream present
    mVideoStreamId = -1;
    for (unsigned int i=0; i < mFormatContext->nb_streams; i++ )
    {
#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51,2,1)
        if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO )
#else
        if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO )
#endif
		{
			mVideoStreamId = i;
			break;
		}
        if(mAudioStreamId == -1) //FIXME best way to copy all other streams?
        {
#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51,2,1)
		    if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO )
#else
		    if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO )
#endif
		    {
                mAudioStreamId = i;
		    }
        }
    }
    if ( mVideoStreamId == -1 )
        Fatal( "Unable to locate video stream in %s", mPath.c_str() );

    Debug ( 1, "Found video stream" );

    mCodecContext = mFormatContext->streams[mVideoStreamId]->codec;

    // Try and get the codec from the codec context
    if ( (mCodec = avcodec_find_decoder( mCodecContext->codec_id )) == NULL )
        Fatal( "Can't find codec for video stream from %s", mPath.c_str() );

    Debug ( 1, "Found decoder" );

    // Open the codec
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 7, 0)
    Debug ( 1, "Calling avcodec_open" );
    if ( avcodec_open( mCodecContext, mCodec ) < 0 )
#else
    Debug ( 1, "Calling avcodec_open2" );
    if ( avcodec_open2( mCodecContext, mCodec, 0 ) < 0 )
#endif
        Fatal( "Unable to open codec for video stream from %s", mPath.c_str() );

    Debug ( 1, "Opened codec" );

    // Allocate space for the native video frame
    mRawFrame = avcodec_alloc_frame();

    // Allocate space for the converted video frame
    mFrame = avcodec_alloc_frame();
    
    if(mRawFrame == NULL || mFrame == NULL)
        Fatal( "Unable to allocate frame for %s", mPath.c_str() );

    Debug ( 1, "Allocated frames" );
    
    int pSize = avpicture_get_size( imagePixFormat, width, height );
    if( (unsigned int)pSize != imagesize) {
        Fatal("Image size mismatch. Required: %d Available: %d",pSize,imagesize);
    }

    Debug ( 1, "Validated imagesize" );
    
#if HAVE_LIBSWSCALE
    Debug ( 1, "Calling sws_isSupportedInput" );
    if(!sws_isSupportedInput(mCodecContext->pix_fmt)) {
        Fatal("swscale does not support the codec format: %c%c%c%c",(mCodecContext->pix_fmt)&0xff,((mCodecContext->pix_fmt>>8)&0xff),((mCodecContext->pix_fmt>>16)&0xff),((mCodecContext->pix_fmt>>24)&0xff));
    }