Example #1
0
bool CFFmpegImage::LoadImageFromMemory(unsigned char* buffer, unsigned int bufSize,
                                      unsigned int width, unsigned int height)
{
  
  uint8_t* fbuffer = (uint8_t*)av_malloc(FFMPEG_FILE_BUFFER_SIZE);
  MemBuffer buf;
  buf.data = buffer;
  buf.size = bufSize;
  buf.pos = 0;

  AVIOContext* ioctx = avio_alloc_context(fbuffer, FFMPEG_FILE_BUFFER_SIZE, 0, &buf,
                                          mem_file_read, NULL, mem_file_seek);

  if (!ioctx)
  {
    av_free(fbuffer);
    CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate AVIOContext");
    return false;
  }

  AVFormatContext* fctx = avformat_alloc_context();
  if (!fctx)
  {
    av_free(ioctx->buffer);
    av_free(ioctx);
    CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate AVFormatContext");
    return false;
  }

  fctx->pb = ioctx;
  ioctx->max_packet_size = FFMPEG_FILE_BUFFER_SIZE;

  if (avformat_open_input(&fctx, "", NULL, NULL) < 0)
  {
    avformat_close_input(&fctx);
    FreeIOCtx(ioctx);
    return false;
  }

  AVCodecContext* codec_ctx = fctx->streams[0]->codec;
  AVCodec* codec = avcodec_find_decoder(codec_ctx->codec_id);
  if (avcodec_open2(codec_ctx, codec, NULL) < 0)
  {
    avformat_close_input(&fctx);
    FreeIOCtx(ioctx);
    return false;
  }

  AVPacket pkt;
  AVFrame* frame = av_frame_alloc();
  av_read_frame(fctx, &pkt);
  int frame_decoded;
  int ret = avcodec_decode_video2(codec_ctx, frame, &frame_decoded, &pkt);
  if (ret < 0)
    CLog::Log(LOGDEBUG, "Error [%d] while decoding frame: %s\n", ret, strerror(AVERROR(ret)));

  if (frame_decoded != 0)
  {
    av_frame_free(&m_pFrame);
    m_pFrame = av_frame_clone(frame);

    if (m_pFrame)
    {
      m_height = m_pFrame->height;
      m_width = m_pFrame->width;
    }    
    else
    {
      CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate a picture data buffer");
      frame_decoded = 0;
    }
  }
  else
    CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not decode a frame");

  av_frame_free(&frame);
  av_free_packet(&pkt);
  avcodec_close(codec_ctx);
  avformat_close_input(&fctx);
  FreeIOCtx(ioctx);

  return (frame_decoded != 0);
}
Example #2
0
int openAVDumping(void* window, bool video_opengl, char* dumpfile, int sf) {

    if (tasflags.framerate <= 0) {
        debuglog(LCF_DUMP | LCF_ERROR, "Not supporting non deterministic timer");
        return 1;
    }

    start_frame = sf;
    accum_samples = 0;

    int width, height;
    AVPixelFormat pixfmt = initVideoCapture(window, video_opengl, &width, &height);
    if (pixfmt == AV_PIX_FMT_NONE) {
        debuglog(LCF_DUMP | LCF_ERROR, "Unable to initialize video capture");
        return 1;
    }

    /* Initialize AVCodec and AVFormat libraries */
    av_register_all();

    /* Initialize AVOutputFormat */
    outputFormat = av_guess_format(NULL, dumpfile, NULL);
    if (!outputFormat) {
        debuglog(LCF_DUMP | LCF_ERROR, "Could not find suitable output format for file ", dumpfile);
        return 1;
    }

    /* Initialize AVFormatContext */

    formatContext = avformat_alloc_context();
    if (!formatContext) {
        debuglog(LCF_DUMP | LCF_ERROR, "Could not initialize AVFormatContext");
        return 1;
    }
    formatContext->oformat = outputFormat;

    /*** Create video stream ***/

    /* Initialize video AVCodec */

    AVCodec *video_codec = NULL;
    AVCodecID codec_id = AV_CODEC_ID_MPEG4;
    //int codec_id = AV_CODEC_ID_H264;
    video_codec = avcodec_find_encoder(codec_id);
    if (!video_codec) {
        debuglog(LCF_DUMP | LCF_ERROR, "Video codec not found");
        return 1;
    }
    outputFormat->video_codec = codec_id;

    /* Initialize video stream */

    video_st = avformat_new_stream(formatContext, video_codec);
    if (!video_st) {
        debuglog(LCF_DUMP | LCF_ERROR, "Could not initialize video AVStream");
        return 1;
    }

    /* Fill video stream parameters */
    video_st->id = formatContext->nb_streams - 1;
    video_st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
    video_st->codec->codec_id = codec_id;

    video_st->codec->bit_rate = 400000;
    video_st->codec->width = width;
    video_st->codec->height = height;
    video_st->time_base = (AVRational){1,static_cast<int>(tasflags.framerate)};
    video_st->codec->time_base = (AVRational){1,static_cast<int>(tasflags.framerate)};
    video_st->codec->gop_size = 10; /* emit one intra frame every ten frames */
    video_st->codec->max_b_frames = 1;
    video_st->codec->pix_fmt = AV_PIX_FMT_YUV420P;

    /* Some formats want stream headers to be separate. */
    if (formatContext->oformat->flags & AVFMT_GLOBALHEADER)
        video_st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;

    /* Use a preset for h264 */
    if (codec_id == AV_CODEC_ID_H264)
        av_opt_set(video_st->codec->priv_data, "preset", "slow", 0);

    /* Open the codec */
    if (avcodec_open2(video_st->codec, video_codec, NULL) < 0) {
        debuglog(LCF_DUMP | LCF_ERROR, "Could not open video codec");
        return 1;
    }
    
    /*** Create audio stream ***/

    /* Initialize audio AVCodec */

    AVCodec *audio_codec = NULL;
    AVCodecID audio_codec_id = AV_CODEC_ID_PCM_S16LE;
    //AVCodecID audio_codec_id = AV_CODEC_ID_VORBIS;
    audio_codec = avcodec_find_encoder(audio_codec_id);
    if (!audio_codec) {
        debuglog(LCF_DUMP | LCF_ERROR, "Audio codec not found");
        return 1;
    }

    /* Initialize audio stream */

    audio_st = avformat_new_stream(formatContext, audio_codec);
    if (!audio_st) {
        debuglog(LCF_DUMP | LCF_ERROR, "Could not initialize video AVStream");
        return 1;
    }

    /* Fill audio stream parameters */

    audio_st->id = formatContext->nb_streams - 1;
    audio_st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
    if (audiocontext.outBitDepth == 8)
        audio_st->codec->sample_fmt = AV_SAMPLE_FMT_U8;
    else if (audiocontext.outBitDepth == 16)
        audio_st->codec->sample_fmt = AV_SAMPLE_FMT_S16;
    else {
        debuglog(LCF_DUMP | LCF_ERROR, "Unknown audio format");
        return 1;
    }
    audio_st->codec->bit_rate = 64000;
    audio_st->codec->sample_rate = audiocontext.outFrequency;
    audio_st->codec->channels = audiocontext.outNbChannels;

    /* Some formats want stream headers to be separate. */

    if (formatContext->oformat->flags & AVFMT_GLOBALHEADER)
        audio_st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;

    /* Open the codec */
    if (avcodec_open2(audio_st->codec, audio_codec, NULL) < 0) {
        debuglog(LCF_DUMP | LCF_ERROR, "Could not open audio codec");
        return 1;
    }

    /* Initialize video AVFrame */

    video_frame = av_frame_alloc();
    if (!video_frame) {
        debuglog(LCF_DUMP | LCF_ERROR, "Could not allocate AVFrame");
        return 1;
    }
    video_frame->format = video_st->codec->pix_fmt;
    video_frame->width  = video_st->codec->width;
    video_frame->height = video_st->codec->height;

    /* Initialize audio AVFrame */
    audio_frame = av_frame_alloc();

    /* Allocate the image buffer inside the AVFrame */

    int ret = av_image_alloc(video_frame->data, video_frame->linesize, video_st->codec->width, video_st->codec->height, video_st->codec->pix_fmt, 32);
    if (ret < 0) {
        debuglog(LCF_DUMP | LCF_ERROR, "Could not allocate raw picture buffer");
        return 1;
    }


    /* Initialize swscale context for pixel format conversion */

    toYUVctx = sws_getContext(video_frame->width, video_frame->height,  
                              pixfmt,
                              video_frame->width, video_frame->height, 
                              AV_PIX_FMT_YUV420P,
                              SWS_LANCZOS | SWS_ACCURATE_RND, NULL,NULL,NULL);

    if (toYUVctx == NULL) {
        debuglog(LCF_DUMP | LCF_ERROR, "Could not allocate swscale context");
        return 1;
    }

    /* Print informations on input and output streams */
    threadState.setOwnCode(true); // We protect the following code because it performs IO that we hook
    av_dump_format(formatContext, 0, dumpfile, 1);
    
    /* Set up output file */
    if (avio_open(&formatContext->pb, dumpfile, AVIO_FLAG_WRITE) < 0) {
        threadState.setOwnCode(false);
        debuglog(LCF_DUMP | LCF_ERROR, "Could not open video file");
        return 1;
    }

    /* Write header */
    if (avformat_write_header(formatContext, NULL) < 0) {
        threadState.setOwnCode(false);
        debuglog(LCF_DUMP | LCF_ERROR, "Could not write header");
        return 1;
    }

    threadState.setOwnCode(false);
    return 0;
}
Example #3
0
static void init_fps(int bf, int audio_preroll, int fps)
{
    AVStream *st;
    int iobuf_size = force_iobuf_size ? force_iobuf_size : sizeof(iobuf);
    ctx = avformat_alloc_context();
    if (!ctx)
        exit(1);
    ctx->oformat = av_guess_format(format, NULL, NULL);
    if (!ctx->oformat)
        exit(1);
    ctx->pb = avio_alloc_context(iobuf, iobuf_size, AVIO_FLAG_WRITE, NULL, NULL, io_write, NULL);
    if (!ctx->pb)
        exit(1);
    ctx->pb->write_data_type = io_write_data_type;
    ctx->flags |= AVFMT_FLAG_BITEXACT;

    st = avformat_new_stream(ctx, NULL);
    if (!st)
        exit(1);
    st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
    st->codecpar->codec_id = AV_CODEC_ID_H264;
    st->codecpar->width = 640;
    st->codecpar->height = 480;
    st->time_base.num = 1;
    st->time_base.den = 30;
    st->codecpar->extradata_size = sizeof(h264_extradata);
    st->codecpar->extradata = av_mallocz(st->codecpar->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
    if (!st->codecpar->extradata)
        exit(1);
    memcpy(st->codecpar->extradata, h264_extradata, sizeof(h264_extradata));
    video_st = st;

    st = avformat_new_stream(ctx, NULL);
    if (!st)
        exit(1);
    st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
    st->codecpar->codec_id = AV_CODEC_ID_AAC;
    st->codecpar->sample_rate = 44100;
    st->codecpar->channels = 2;
    st->time_base.num = 1;
    st->time_base.den = 44100;
    st->codecpar->extradata_size = sizeof(aac_extradata);
    st->codecpar->extradata = av_mallocz(st->codecpar->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
    if (!st->codecpar->extradata)
        exit(1);
    memcpy(st->codecpar->extradata, aac_extradata, sizeof(aac_extradata));
    audio_st = st;

    if (avformat_write_header(ctx, &opts) < 0)
        exit(1);
    av_dict_free(&opts);

    frames = 0;
    gop_size = 30;
    duration = video_st->time_base.den / fps;
    audio_duration = 1024LL * audio_st->time_base.den / audio_st->codecpar->sample_rate;
    if (audio_preroll)
        audio_preroll = 2048LL * audio_st->time_base.den / audio_st->codecpar->sample_rate;

    bframes = bf;
    video_dts = bframes ? -duration : 0;
    audio_dts = -audio_preroll;
}
Example #4
0
bool FormatContext::open(const QString &_url, const QString &param)
{
    static const QStringList disabledDemuxers {
        "ass",
        "tty", //txt files
        "srt",
    };

    const QByteArray scheme = Functions::getUrlScheme(_url).toUtf8();
    if (scheme.isEmpty() || scheme == "sftp")
        return false;

    const Settings &settings = QMPlay2Core.getSettings();

    artistWithTitle = !settings.getBool("HideArtistMetadata");

    bool limitedLength = false;
    qint64 oggOffset = -1, oggSize = -1;
    int oggTrack = -1;
    QString url;

    if (param.startsWith("CUE:")) //For CUE files
    {
        const QStringList splitted = param.split(':');
        if (splitted.count() != 3)
            return false;
        bool ok1 = false, ok2 = false;
        startTime = splitted[1].toDouble(&ok1);
        lengthToPlay = splitted[2].toDouble(&ok2);
        if (!ok1 || !ok2 || startTime < 0.0 || (!qFuzzyCompare(lengthToPlay, -1.0) && lengthToPlay <= 0.0))
            return false;
        if (lengthToPlay > 0.0)
            lengthToPlay -= startTime;
        limitedLength = true;
    }
    else if (param.startsWith("OGG:")) //For chained OGG files
    {
        const QStringList splitted = param.split(':');
        if (splitted.count() != 4)
            return false;
        oggTrack = splitted[1].toInt();
        oggOffset = splitted[2].toLongLong();
        oggSize = splitted[3].toLongLong();
        if (oggTrack <= 0 || oggOffset < 0 || (oggSize != -1 && oggSize <= 0))
            return false;
    }

    AVInputFormat *inputFmt = nullptr;
    if (scheme == "file")
        isLocal = true;
    else
    {
        inputFmt = av_find_input_format(scheme);
        if (inputFmt)
            url = _url.right(_url.length() - scheme.length() - 3);
        isLocal = false;
    }

    AVDictionary *options = nullptr;
    if (!inputFmt)
    {
        url = Functions::prepareFFmpegUrl(_url, options);
        if (!isLocal && reconnectStreamed)
            av_dict_set(&options, "reconnect_streamed", "1", 0);
    }

    formatCtx = avformat_alloc_context();
    formatCtx->interrupt_callback.callback = (int(*)(void *))interruptCB;
    formatCtx->interrupt_callback.opaque = &abortCtx->isAborted;

    if (oggOffset >= 0)
    {
        oggHelper = new OggHelper(url, oggTrack, oggSize, formatCtx->interrupt_callback);
        if (!oggHelper->pb)
            return false;
        formatCtx->pb = oggHelper->pb;
        av_dict_set(&options, "skip_initial_bytes", QString::number(oggOffset).toLatin1(), 0);
    }

    // Useful, e.g. CUVID decoder needs valid PTS
    formatCtx->flags |= AVFMT_FLAG_GENPTS;

    OpenFmtCtxThr *openThr = new OpenFmtCtxThr(formatCtx, url.toUtf8(), inputFmt, options, abortCtx);
    formatCtx = openThr->getFormatCtx();
    openThr->drop();
    if (!formatCtx || disabledDemuxers.contains(name()))
        return false;

    if (name().startsWith("image2") || name().endsWith("_pipe"))
    {
        if (!settings.getBool("StillImages"))
            return false;
        stillImage = true;
    }

    if (name() == "mp3")
        formatCtx->flags |= AVFMT_FLAG_FAST_SEEK; //This should be set before "avformat_open_input", but seems to be working for MP3...

    if (avformat_find_stream_info(formatCtx, nullptr) < 0)
        return false;

    isStreamed = !isLocal && formatCtx->duration <= 0; //QMPLAY2_NOPTS_VALUE is negative

#ifdef QMPlay2_libavdevice
    forceCopy = name().contains("v4l2"); //Workaround for v4l2 - if many buffers are referenced demuxer doesn't produce proper timestamps (FFmpeg BUG?).
#else
    forceCopy = false;
#endif

    if (!limitedLength && (startTime = formatCtx->start_time / (double)AV_TIME_BASE) < 0.0)
        startTime = 0.0;

    if (limitedLength && lengthToPlay < 0.0)
    {
        lengthToPlay = length() - startTime;
        if (lengthToPlay <= 0.0)
            return false;
    }

    index_map.resize(formatCtx->nb_streams);
    streamsTS.resize(formatCtx->nb_streams);
    streamsOffset.resize(formatCtx->nb_streams);
    nextDts.resize(formatCtx->nb_streams);
    for (unsigned i = 0; i < formatCtx->nb_streams; ++i)
    {
        fixFontsAttachment(formatCtx->streams[i]);
        StreamInfo *streamInfo = getStreamInfo(formatCtx->streams[i]);
        if (!streamInfo)
            index_map[i] = -1;
        else
        {
            index_map[i] = streamsInfo.count();
            streamsInfo += streamInfo;
        }
        if (!fixMkvAss && formatCtx->streams[i]->codecpar->codec_id == AV_CODEC_ID_ASS && !strncasecmp(formatCtx->iformat->name, "matroska", 8))
            fixMkvAss = true;
        formatCtx->streams[i]->event_flags = 0;
        streams += formatCtx->streams[i];

        streamsTS[i] = 0.0;
    }
    if (streamsInfo.isEmpty())
        return false;

    isOneStreamOgg = (name() == "ogg" && streamsInfo.count() == 1); //Workaround for OGG network streams

    if (isStreamed && streamsInfo.count() == 1 && streamsInfo.at(0)->type == QMPLAY2_TYPE_SUBTITLE && formatCtx->pb && avio_size(formatCtx->pb) > 0)
        isStreamed = false; //Allow subtitles streams to be non-streamed if size is known

    formatCtx->event_flags = 0;

    packet = av_packet_alloc();

    if (lengthToPlay > 0.0)
        return seek(0.0, false);
    return true;
}
static int applehttp_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
    AppleHTTPContext *c = s->priv_data;
    int ret = 0, i, j, stream_offset = 0;

    if ((ret = parse_playlist(c, s->filename, NULL, s->pb)) < 0)
        goto fail;

    if (c->n_variants == 0) {
        av_log(NULL, AV_LOG_WARNING, "Empty playlist\n");
        ret = AVERROR_EOF;
        goto fail;
    }
    /* If the playlist only contained variants, parse each individual
     * variant playlist. */
    if (c->n_variants > 1 || c->variants[0]->n_segments == 0) {
        for (i = 0; i < c->n_variants; i++) {
            struct variant *v = c->variants[i];
            if ((ret = parse_playlist(c, v->url, v, NULL)) < 0)
                goto fail;
        }
    }

    if (c->variants[0]->n_segments == 0) {
        av_log(NULL, AV_LOG_WARNING, "Empty playlist\n");
        ret = AVERROR_EOF;
        goto fail;
    }

    /* If this isn't a live stream, calculate the total duration of the
     * stream. */
    if (c->variants[0]->finished) {
        int64_t duration = 0;
        for (i = 0; i < c->variants[0]->n_segments; i++)
            duration += c->variants[0]->segments[i]->duration;
        s->duration = duration * AV_TIME_BASE;
    }

    /* Open the demuxer for each variant */
    for (i = 0; i < c->n_variants; i++) {
        struct variant *v = c->variants[i];
        AVInputFormat *in_fmt = NULL;
        char bitrate_str[20];
        if (v->n_segments == 0)
            continue;

        if (!(v->ctx = avformat_alloc_context())) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }

        v->index  = i;
        v->needed = 1;
        v->parent = s;

        /* If this is a live stream with more than 3 segments, start at the
         * third last segment. */
        v->cur_seq_no = v->start_seq_no;
        if (!v->finished && v->n_segments > 3)
            v->cur_seq_no = v->start_seq_no + v->n_segments - 3;

        v->read_buffer = av_malloc(INITIAL_BUFFER_SIZE);
        ffio_init_context(&v->pb, v->read_buffer, INITIAL_BUFFER_SIZE, 0, v,
                          read_data, NULL, NULL);
        v->pb.seekable = 0;
        ret = av_probe_input_buffer(&v->pb, &in_fmt, v->segments[0]->url,
                                    NULL, 0, 0);
        if (ret < 0)
            goto fail;
        v->ctx->pb       = &v->pb;
        ret = avformat_open_input(&v->ctx, v->segments[0]->url, in_fmt, NULL);
        if (ret < 0)
            goto fail;
        v->stream_offset = stream_offset;
        snprintf(bitrate_str, sizeof(bitrate_str), "%d", v->bandwidth);
        /* Create new AVStreams for each stream in this variant */
        for (j = 0; j < v->ctx->nb_streams; j++) {
            AVStream *st = av_new_stream(s, i);
            if (!st) {
                ret = AVERROR(ENOMEM);
                goto fail;
            }
            avcodec_copy_context(st->codec, v->ctx->streams[j]->codec);
            if (v->bandwidth)
                av_dict_set(&st->metadata, "variant_bitrate", bitrate_str,
                                 0);
        }
        stream_offset += v->ctx->nb_streams;
    }

    c->first_packet = 1;

    return 0;
fail:
    free_variant_list(c);
    return ret;
}
Example #6
0
bool CFFmpegImage::Initialize(unsigned char* buffer, unsigned int bufSize)
{
  uint8_t* fbuffer = (uint8_t*)av_malloc(FFMPEG_FILE_BUFFER_SIZE);
  if (!fbuffer)
  {
    CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate FFMPEG_FILE_BUFFER_SIZE");
    return false;
  }
  m_buf.data = buffer;
  m_buf.size = bufSize;
  m_buf.pos = 0;

  m_ioctx = avio_alloc_context(fbuffer, FFMPEG_FILE_BUFFER_SIZE, 0, &m_buf,
    mem_file_read, NULL, mem_file_seek);

  if (!m_ioctx)
  {
    av_free(fbuffer);
    CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate AVIOContext");
    return false;
  }

  m_fctx = avformat_alloc_context();
  if (!m_fctx)
  {
    FreeIOCtx(&m_ioctx);
    CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate AVFormatContext");
    return false;
  }

  m_fctx->pb = m_ioctx;
  m_ioctx->max_packet_size = FFMPEG_FILE_BUFFER_SIZE;

  // Some clients have pngs saved as jpeg or ask us for png but are jpeg
  // mythv throws all mimetypes away and asks us with application/octet-stream
  // this is poor man's fallback to at least identify png / jpeg
  bool is_jpeg = (bufSize > 2 && buffer[0] == 0xFF && buffer[1] == 0xD8 && buffer[2] == 0xFF);
  bool is_png = (bufSize > 3 && buffer[1] == 'P' && buffer[2] == 'N' && buffer[3] == 'G');
  bool is_tiff = (bufSize > 2 && buffer[0] == 'I' && buffer[1] == 'I' && buffer[2] == '*');

  AVInputFormat* inp = nullptr;
  if (is_jpeg)
    inp = av_find_input_format("jpeg_pipe");
  else if (m_strMimeType == "image/apng")
    inp = av_find_input_format("apng");
  else if (is_png)
    inp = av_find_input_format("png_pipe");
  else if (is_tiff)
    inp = av_find_input_format("tiff_pipe");
  else if (m_strMimeType == "image/jp2")
    inp = av_find_input_format("j2k_pipe");
  else if (m_strMimeType == "image/webp")
    inp = av_find_input_format("webp_pipe");
  // brute force parse if above check already failed
  else if (m_strMimeType == "image/jpeg" || m_strMimeType == "image/jpg")
    inp = av_find_input_format("jpeg_pipe");
  else if (m_strMimeType == "image/png")
    inp = av_find_input_format("png_pipe");
  else if (m_strMimeType == "image/tiff")
    inp = av_find_input_format("tiff_pipe");
  else if (m_strMimeType == "image/gif")
    inp = av_find_input_format("gif");

  if (avformat_open_input(&m_fctx, NULL, inp, NULL) < 0)
  {
    CLog::Log(LOGERROR, "Could not find suitable input format: %s", m_strMimeType.c_str());
    avformat_close_input(&m_fctx);
    FreeIOCtx(&m_ioctx);
    return false;
  }

  AVCodecContext* codec_ctx = m_fctx->streams[0]->codec;
  AVCodec* codec = avcodec_find_decoder(codec_ctx->codec_id);
  if (avcodec_open2(codec_ctx, codec, NULL) < 0)
  {
    avformat_close_input(&m_fctx);
    FreeIOCtx(&m_ioctx);
    return false;
  }

  return true;
}
Example #7
0
int EncoderFfmpegCore::initEncoder(int bitrate, int samplerate) {

#ifndef avformat_alloc_output_context2
    qDebug() << "EncoderFfmpegCore::initEncoder: Old Style initialization";
    m_pEncodeFormatCtx = avformat_alloc_context();
#endif

    m_lBitrate = bitrate * 1000;
    m_lSampleRate = samplerate;

#if LIBAVCODEC_VERSION_INT > 3544932
    if (m_SCcodecId == AV_CODEC_ID_MP3) {
#else
    if (m_SCcodecId == CODEC_ID_MP3) {
#endif // LIBAVCODEC_VERSION_INT > 3544932
        qDebug() << "EncoderFfmpegCore::initEncoder: Codec MP3";
#ifdef avformat_alloc_output_context2
        avformat_alloc_output_context2(&m_pEncodeFormatCtx, NULL, NULL, "output.mp3");
#else
        m_pEncoderFormat = av_guess_format(NULL, "output.mp3", NULL);
#endif // avformat_alloc_output_context2

#if LIBAVCODEC_VERSION_INT > 3544932
    } else if (m_SCcodecId == AV_CODEC_ID_AAC) {
#else
    } else if (m_SCcodecId == CODEC_ID_AAC) {
#endif // LIBAVCODEC_VERSION_INT > 3544932
        qDebug() << "EncoderFfmpegCore::initEncoder: Codec M4A";
#ifdef avformat_alloc_output_context2
        avformat_alloc_output_context2(&m_pEncodeFormatCtx, NULL, NULL, "output.m4a");
#else
        m_pEncoderFormat = av_guess_format(NULL, "output.m4a", NULL);
#endif // avformat_alloc_output_context2

    } else {
        qDebug() << "EncoderFfmpegCore::initEncoder: Codec OGG/Vorbis";
#ifdef avformat_alloc_output_context2
        avformat_alloc_output_context2(&m_pEncodeFormatCtx, NULL, NULL, "output.ogg");
        m_pEncodeFormatCtx->oformat->audio_codec=AV_CODEC_ID_VORBIS;
#else
        m_pEncoderFormat = av_guess_format(NULL, "output.ogg", NULL);
#if LIBAVCODEC_VERSION_INT > 3544932
        m_pEncoderFormat->audio_codec=AV_CODEC_ID_VORBIS;
#else
        m_pEncoderFormat->audio_codec=CODEC_ID_VORBIS;
#endif // LIBAVCODEC_VERSION_INT > 3544932
#endif // avformat_alloc_output_context2
    }

#ifdef avformat_alloc_output_context2
    m_pEncoderFormat = m_pEncodeFormatCtx->oformat;
#else
    m_pEncodeFormatCtx->oformat = m_pEncoderFormat;
#endif // avformat_alloc_output_context2

    m_pEncoderAudioStream = addStream(m_pEncodeFormatCtx, &m_pEncoderAudioCodec,
                                      m_pEncoderFormat->audio_codec);

    openAudio(m_pEncoderAudioCodec, m_pEncoderAudioStream);

    // qDebug() << "jepusti";

    return 0;
}

// Private methods

int EncoderFfmpegCore::writeAudioFrame(AVFormatContext *formatctx,
                                       AVStream *stream) {
    AVCodecContext *l_SCodecCtx = NULL;;
    AVPacket l_SPacket;
    AVFrame *l_SFrame = avcodec_alloc_frame();
    int l_iGotPacket;
    int l_iRet;
#ifdef av_make_error_string
    char l_strErrorBuff[256];
#endif // av_make_error_string

    av_init_packet(&l_SPacket);
    l_SPacket.size = 0;
    l_SPacket.data = NULL;

    // Calculate correct DTS for FFMPEG
    m_lDts = round(((double)m_lRecordedBytes / (double)44100 / (double)2. *
                    (double)m_pEncoderAudioStream->time_base.den));
    m_lPts = m_lDts;

    l_SCodecCtx = stream->codec;
#ifdef av_make_error_string
    memset(l_strErrorBuff, 0x00, 256);
#endif // av_make_error_string

    l_SFrame->nb_samples = m_iAudioInputFrameSize;
    // Mixxx uses float (32 bit) samples..
    l_SFrame->format = AV_SAMPLE_FMT_FLT;
#ifndef __FFMPEGOLDAPI__
    l_SFrame->channel_layout = l_SCodecCtx->channel_layout;
#endif // __FFMPEGOLDAPI__

    l_iRet = avcodec_fill_audio_frame(l_SFrame,
                                      l_SCodecCtx->channels,
                                      AV_SAMPLE_FMT_FLT,
                                      (const uint8_t *)m_pFltSamples,
                                      m_iFltAudioCpyLen,
                                      1);

    if (l_iRet != 0) {
#ifdef av_make_error_string
        qDebug() << "Can't fill FFMPEG frame: error " << l_iRet << "String '" <<
                 av_make_error_string(l_strErrorBuff, 256, l_iRet) << "'" <<
                 m_iFltAudioCpyLen;
#endif // av_make_error_string
        qDebug() << "Can't refill 1st FFMPEG frame!";
        return -1;
    }

    // If we have something else than AV_SAMPLE_FMT_FLT we have to convert it
    // to something that fits..
    if (l_SCodecCtx->sample_fmt != AV_SAMPLE_FMT_FLT) {

        reSample(l_SFrame);
        // After we have turned our samples to destination
        // Format we must re-alloc l_SFrame.. it easier like this..
#if LIBAVCODEC_VERSION_INT > 3544932
        avcodec_free_frame(&l_SFrame);
#else
        av_free(l_SFrame);
#endif // LIBAVCODEC_VERSION_INT > 3544932
        l_SFrame = NULL;
        l_SFrame = avcodec_alloc_frame();
        l_SFrame->nb_samples = m_iAudioInputFrameSize;
        l_SFrame->format = l_SCodecCtx->sample_fmt;
#ifndef __FFMPEGOLDAPI__
        l_SFrame->channel_layout = m_pEncoderAudioStream->codec->channel_layout;
#endif // __FFMPEGOLDAPI__

        l_iRet = avcodec_fill_audio_frame(l_SFrame, l_SCodecCtx->channels,
                                          l_SCodecCtx->sample_fmt,
                                          (const uint8_t *)m_pResample->getBuffer(),
                                          m_iAudioCpyLen,
                                          1);

        if (l_iRet != 0) {
#ifdef av_make_error_string
            qDebug() << "Can't refill FFMPEG frame: error " << l_iRet << "String '" <<
                     av_make_error_string(l_strErrorBuff, 256,
                                          l_iRet) << "'" <<  m_iAudioCpyLen <<
                     " " <<  av_samples_get_buffer_size(
                         NULL, 2,
                         m_iAudioInputFrameSize,
                         m_pEncoderAudioStream->codec->sample_fmt,
                         1) << " " << m_pOutSize;
#endif // av_make_error_string
            qDebug() << "Can't refill 2nd FFMPEG frame!";
            return -1;
        }
    }

    //qDebug() << "!!" << l_iRet;
    l_iRet = avcodec_encode_audio2(l_SCodecCtx, &l_SPacket, l_SFrame,
                                   &l_iGotPacket);

    if (l_iRet < 0) {
        qDebug() << "Error encoding audio frame";
        return -1;
    }

    if (!l_iGotPacket) {
        // qDebug() << "No packet! Can't encode audio!!";
        return -1;
    }

    l_SPacket.stream_index = stream->index;

    // Let's calculate DTS/PTS and give it to FFMPEG..
    // THEN codecs like OGG/Voris works ok!!
    l_SPacket.dts = m_lDts;
    l_SPacket.pts = m_lDts;

    // Some times den is zero.. so 0 dived by 0 is
    // Something?
    if (m_pEncoderAudioStream->pts.den == 0) {
        qDebug() << "Time hack!";
        m_pEncoderAudioStream->pts.den = 1;
    }

    // Write the compressed frame to the media file. */
    l_iRet = av_interleaved_write_frame(formatctx, &l_SPacket);

    if (l_iRet != 0) {
        qDebug() << "Error while writing audio frame";
        return -1;
    }

    av_free_packet(&l_SPacket);
    av_destruct_packet(&l_SPacket);
    av_free(l_SFrame);

    return 0;
}
Example #8
0
int
scan_metadata_ffmpeg(char *file, struct media_file_info *mfi)
{
  AVFormatContext *ctx;
  AVDictionary *options;
  const struct metadata_map *extra_md_map;
  struct http_icy_metadata *icy_metadata;
#if LIBAVCODEC_VERSION_MAJOR >= 55 || (LIBAVCODEC_VERSION_MAJOR == 54 && LIBAVCODEC_VERSION_MINOR >= 35)
  enum AVCodecID codec_id;
  enum AVCodecID video_codec_id;
  enum AVCodecID audio_codec_id;
#else
  enum CodecID codec_id;
  enum CodecID video_codec_id;
  enum CodecID audio_codec_id;
#endif
  AVStream *video_stream;
  AVStream *audio_stream;
  char *path;
  int mdcount;
  int i;
  int ret;

  ctx = NULL;
  options = NULL;
  path = strdup(file);

#if LIBAVFORMAT_VERSION_MAJOR >= 54 || (LIBAVFORMAT_VERSION_MAJOR == 53 && LIBAVFORMAT_VERSION_MINOR >= 3)
# ifndef HAVE_FFMPEG
  // Without this, libav is slow to probe some internet streams
  if (mfi->data_kind == DATA_KIND_HTTP)
    {
      ctx = avformat_alloc_context();
      ctx->probesize = 64000;
    }
# endif

  if (mfi->data_kind == DATA_KIND_HTTP)
    {
      free(path);
      ret = http_stream_setup(&path, file);
      if (ret < 0)
	return -1;

      av_dict_set(&options, "icy", "1", 0);
      mfi->artwork = ARTWORK_HTTP;
    }

  ret = avformat_open_input(&ctx, path, NULL, &options);

  if (options)
    av_dict_free(&options);
#else
  ret = av_open_input_file(&ctx, path, NULL, 0, NULL);
#endif
  if (ret != 0)
    {
      DPRINTF(E_WARN, L_SCAN, "Cannot open media file '%s': %s\n", path, err2str(ret));

      free(path);
      return -1;
    }

  free(path);

#if LIBAVFORMAT_VERSION_MAJOR >= 54 || (LIBAVFORMAT_VERSION_MAJOR == 53 && LIBAVFORMAT_VERSION_MINOR >= 3)
  ret = avformat_find_stream_info(ctx, NULL);
#else
  ret = av_find_stream_info(ctx);
#endif
  if (ret < 0)
    {
      DPRINTF(E_WARN, L_SCAN, "Cannot get stream info of '%s': %s\n", path, err2str(ret));

#if LIBAVFORMAT_VERSION_MAJOR >= 54 || (LIBAVFORMAT_VERSION_MAJOR == 53 && LIBAVFORMAT_VERSION_MINOR >= 21)
      avformat_close_input(&ctx);
#else
      av_close_input_file(ctx);
#endif
      return -1;
    }

#if 0
  /* Dump input format as determined by ffmpeg */
  av_dump_format(ctx, 0, file, 0);
#endif

  DPRINTF(E_DBG, L_SCAN, "File has %d streams\n", ctx->nb_streams);

  /* Extract codec IDs, check for video */
#if LIBAVCODEC_VERSION_MAJOR >= 55 || (LIBAVCODEC_VERSION_MAJOR == 54 && LIBAVCODEC_VERSION_MINOR >= 35)
  video_codec_id = AV_CODEC_ID_NONE;
  video_stream = NULL;

  audio_codec_id = AV_CODEC_ID_NONE;
  audio_stream = NULL;
#else
  video_codec_id = CODEC_ID_NONE;
  video_stream = NULL;

  audio_codec_id = CODEC_ID_NONE;
  audio_stream = NULL;
#endif

  for (i = 0; i < ctx->nb_streams; i++)
    {
      switch (ctx->streams[i]->codec->codec_type)
	{
#if LIBAVCODEC_VERSION_MAJOR >= 53 || (LIBAVCODEC_VERSION_MAJOR == 52 && LIBAVCODEC_VERSION_MINOR >= 64)
	  case AVMEDIA_TYPE_VIDEO:
#else
	  case CODEC_TYPE_VIDEO:
#endif
#if LIBAVFORMAT_VERSION_MAJOR >= 55 || (LIBAVFORMAT_VERSION_MAJOR == 54 && LIBAVFORMAT_VERSION_MINOR >= 6)
	    if (ctx->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC)
	      {
		DPRINTF(E_DBG, L_SCAN, "Found embedded artwork (stream %d)\n", i);
		mfi->artwork = ARTWORK_EMBEDDED;

		break;
	      }
#endif
	    // We treat these as audio no matter what
	    if (mfi->compilation || (mfi->media_kind & (MEDIA_KIND_PODCAST | MEDIA_KIND_AUDIOBOOK)))
	      break;

	    if (!video_stream)
	      {
		DPRINTF(E_DBG, L_SCAN, "File has video (stream %d)\n", i);

		mfi->has_video = 1;
		video_stream = ctx->streams[i];
		video_codec_id = video_stream->codec->codec_id;
	      }
	    break;

#if LIBAVCODEC_VERSION_MAJOR >= 53 || (LIBAVCODEC_VERSION_MAJOR == 52 && LIBAVCODEC_VERSION_MINOR >= 64)
	  case AVMEDIA_TYPE_AUDIO:
#else
	  case CODEC_TYPE_AUDIO:
#endif
	    if (!audio_stream)
	      {
		audio_stream = ctx->streams[i];
		audio_codec_id = audio_stream->codec->codec_id;
	      } 
	    break;

	  default:
	    break;
	}
    }

#if LIBAVCODEC_VERSION_MAJOR >= 55 || (LIBAVCODEC_VERSION_MAJOR == 54 && LIBAVCODEC_VERSION_MINOR >= 35)
  if (audio_codec_id == AV_CODEC_ID_NONE)
#else
  if (audio_codec_id == CODEC_ID_NONE)
#endif
    {
      DPRINTF(E_DBG, L_SCAN, "File has no audio streams, discarding\n");

#if LIBAVFORMAT_VERSION_MAJOR >= 54 || (LIBAVFORMAT_VERSION_MAJOR == 53 && LIBAVFORMAT_VERSION_MINOR >= 21)
      avformat_close_input(&ctx);
#else
      av_close_input_file(ctx);
#endif
      return -1;
    }

  /* Common media information */
  if (ctx->duration > 0)
    mfi->song_length = ctx->duration / (AV_TIME_BASE / 1000); /* ms */

  if (ctx->bit_rate > 0)
    mfi->bitrate = ctx->bit_rate / 1000;
  else if (ctx->duration > AV_TIME_BASE) /* guesstimate */
    mfi->bitrate = ((mfi->file_size * 8) / (ctx->duration / AV_TIME_BASE)) / 1000;

  DPRINTF(E_DBG, L_SCAN, "Duration %d ms, bitrate %d kbps\n", mfi->song_length, mfi->bitrate);

  /* Try to extract ICY metadata if http stream */
  if (mfi->data_kind == DATA_KIND_HTTP)
    {
      icy_metadata = http_icy_metadata_get(ctx, 0);
      if (icy_metadata && icy_metadata->name)
	{
	  DPRINTF(E_DBG, L_SCAN, "Found ICY metadata, name is '%s'\n", icy_metadata->name);

	  if (mfi->title)
	    free(mfi->title);
	  if (mfi->artist)
	    free(mfi->artist);
	  if (mfi->album_artist)
	    free(mfi->album_artist);

	  mfi->title = strdup(icy_metadata->name);
	  mfi->artist = strdup(icy_metadata->name);
	  mfi->album_artist = strdup(icy_metadata->name);
	}
      if (icy_metadata && icy_metadata->description)
	{
	  DPRINTF(E_DBG, L_SCAN, "Found ICY metadata, description is '%s'\n", icy_metadata->description);

	  if (mfi->album)
	    free(mfi->album);

	  mfi->album = strdup(icy_metadata->description);
	}
      if (icy_metadata && icy_metadata->genre)
	{
	  DPRINTF(E_DBG, L_SCAN, "Found ICY metadata, genre is '%s'\n", icy_metadata->genre);

	  if (mfi->genre)
	    free(mfi->genre);

	  mfi->genre = strdup(icy_metadata->genre);
	}
      if (icy_metadata)
	http_icy_metadata_free(icy_metadata, 0);
    }

  /* Get some more information on the audio stream */
  if (audio_stream)
    {
      if (audio_stream->codec->sample_rate != 0)
	mfi->samplerate = audio_stream->codec->sample_rate;

      /* Try sample format first */
#if LIBAVUTIL_VERSION_MAJOR >= 52 || (LIBAVUTIL_VERSION_MAJOR == 51 && LIBAVUTIL_VERSION_MINOR >= 4)
      mfi->bits_per_sample = 8 * av_get_bytes_per_sample(audio_stream->codec->sample_fmt);
#elif LIBAVCODEC_VERSION_MAJOR >= 53
      mfi->bits_per_sample = av_get_bits_per_sample_fmt(audio_stream->codec->sample_fmt);
#else
      mfi->bits_per_sample = av_get_bits_per_sample_format(audio_stream->codec->sample_fmt);
#endif
      if (mfi->bits_per_sample == 0)
	{
	  /* Try codec */
	  mfi->bits_per_sample = av_get_bits_per_sample(audio_codec_id);
	}

      DPRINTF(E_DBG, L_SCAN, "samplerate %d, bps %d\n", mfi->samplerate, mfi->bits_per_sample);
    }

  /* Check codec */
  extra_md_map = NULL;
  codec_id = (mfi->has_video) ? video_codec_id : audio_codec_id;
  switch (codec_id)
    {
#if LIBAVCODEC_VERSION_MAJOR >= 55 || (LIBAVCODEC_VERSION_MAJOR == 54 && LIBAVCODEC_VERSION_MINOR >= 35)
      case AV_CODEC_ID_AAC:
#else
      case CODEC_ID_AAC:
#endif
	DPRINTF(E_DBG, L_SCAN, "AAC\n");
	mfi->type = strdup("m4a");
	mfi->codectype = strdup("mp4a");
	mfi->description = strdup("AAC audio file");
	break;

#if LIBAVCODEC_VERSION_MAJOR >= 55 || (LIBAVCODEC_VERSION_MAJOR == 54 && LIBAVCODEC_VERSION_MINOR >= 35)
      case AV_CODEC_ID_ALAC:
#else
      case CODEC_ID_ALAC:
#endif
	DPRINTF(E_DBG, L_SCAN, "ALAC\n");
	mfi->type = strdup("m4a");
	mfi->codectype = strdup("alac");
	mfi->description = strdup("Apple Lossless audio file");
	break;

#if LIBAVCODEC_VERSION_MAJOR >= 55 || (LIBAVCODEC_VERSION_MAJOR == 54 && LIBAVCODEC_VERSION_MINOR >= 35)
      case AV_CODEC_ID_FLAC:
#else
      case CODEC_ID_FLAC:
#endif
	DPRINTF(E_DBG, L_SCAN, "FLAC\n");
	mfi->type = strdup("flac");
	mfi->codectype = strdup("flac");
	mfi->description = strdup("FLAC audio file");

	extra_md_map = md_map_vorbis;
	break;

#if LIBAVCODEC_VERSION_MAJOR >= 55 || (LIBAVCODEC_VERSION_MAJOR == 54 && LIBAVCODEC_VERSION_MINOR >= 35)
      case AV_CODEC_ID_APE:
#else
      case CODEC_ID_APE:
#endif
	DPRINTF(E_DBG, L_SCAN, "APE\n");
	mfi->type = strdup("ape");
	mfi->codectype = strdup("ape");
	mfi->description = strdup("Monkey's audio");
	break;

#if LIBAVCODEC_VERSION_MAJOR >= 55 || (LIBAVCODEC_VERSION_MAJOR == 54 && LIBAVCODEC_VERSION_MINOR >= 35)
      case AV_CODEC_ID_MUSEPACK7:
      case AV_CODEC_ID_MUSEPACK8:
#else
      case CODEC_ID_MUSEPACK7:
      case CODEC_ID_MUSEPACK8:
#endif
	DPRINTF(E_DBG, L_SCAN, "Musepack\n");
	mfi->type = strdup("mpc");
	mfi->codectype = strdup("mpc");
	mfi->description = strdup("Musepack audio file");
	break;

#if LIBAVCODEC_VERSION_MAJOR >= 55 || (LIBAVCODEC_VERSION_MAJOR == 54 && LIBAVCODEC_VERSION_MINOR >= 35)
      case AV_CODEC_ID_MPEG4: /* Video */
      case AV_CODEC_ID_H264:
#else
      case CODEC_ID_MPEG4: /* Video */
      case CODEC_ID_H264:
#endif
	DPRINTF(E_DBG, L_SCAN, "MPEG4 video\n");
	mfi->type = strdup("m4v");
	mfi->codectype = strdup("mp4v");
	mfi->description = strdup("MPEG-4 video file");

	extra_md_map = md_map_tv;
	break;

#if LIBAVCODEC_VERSION_MAJOR >= 55 || (LIBAVCODEC_VERSION_MAJOR == 54 && LIBAVCODEC_VERSION_MINOR >= 35)
      case AV_CODEC_ID_MP3:
#else
      case CODEC_ID_MP3:
#endif
	DPRINTF(E_DBG, L_SCAN, "MP3\n");
	mfi->type = strdup("mp3");
	mfi->codectype = strdup("mpeg");
	mfi->description = strdup("MPEG audio file");

	extra_md_map = md_map_id3;
	break;

#if LIBAVCODEC_VERSION_MAJOR >= 55 || (LIBAVCODEC_VERSION_MAJOR == 54 && LIBAVCODEC_VERSION_MINOR >= 35)
      case AV_CODEC_ID_VORBIS:
#else
      case CODEC_ID_VORBIS:
#endif
	DPRINTF(E_DBG, L_SCAN, "VORBIS\n");
	mfi->type = strdup("ogg");
	mfi->codectype = strdup("ogg");
	mfi->description = strdup("Ogg Vorbis audio file");

	extra_md_map = md_map_vorbis;
	break;

#if LIBAVCODEC_VERSION_MAJOR >= 55 || (LIBAVCODEC_VERSION_MAJOR == 54 && LIBAVCODEC_VERSION_MINOR >= 35)
      case AV_CODEC_ID_WMAV1:
      case AV_CODEC_ID_WMAV2:
      case AV_CODEC_ID_WMAVOICE:
#else
      case CODEC_ID_WMAV1:
      case CODEC_ID_WMAV2:
      case CODEC_ID_WMAVOICE:
#endif
	DPRINTF(E_DBG, L_SCAN, "WMA Voice\n");
	mfi->type = strdup("wma");
	mfi->codectype = strdup("wmav");
	mfi->description = strdup("WMA audio file");
	break;

#if LIBAVCODEC_VERSION_MAJOR >= 55 || (LIBAVCODEC_VERSION_MAJOR == 54 && LIBAVCODEC_VERSION_MINOR >= 35)
      case AV_CODEC_ID_WMAPRO:
#else
      case CODEC_ID_WMAPRO:
#endif
	DPRINTF(E_DBG, L_SCAN, "WMA Pro\n");
	mfi->type = strdup("wmap");
	mfi->codectype = strdup("wma");
	mfi->description = strdup("WMA audio file");
	break;

#if LIBAVCODEC_VERSION_MAJOR >= 55 || (LIBAVCODEC_VERSION_MAJOR == 54 && LIBAVCODEC_VERSION_MINOR >= 35)
      case AV_CODEC_ID_WMALOSSLESS:
#else
      case CODEC_ID_WMALOSSLESS:
#endif
	DPRINTF(E_DBG, L_SCAN, "WMA Lossless\n");
	mfi->type = strdup("wma");
	mfi->codectype = strdup("wmal");
	mfi->description = strdup("WMA audio file");
	break;

#if LIBAVCODEC_VERSION_MAJOR >= 55 || (LIBAVCODEC_VERSION_MAJOR == 54 && LIBAVCODEC_VERSION_MINOR >= 35)
      case AV_CODEC_ID_PCM_S16LE ... AV_CODEC_ID_PCM_F64LE:
#else
      case CODEC_ID_PCM_S16LE ... CODEC_ID_PCM_F64LE:
#endif
	if (strcmp(ctx->iformat->name, "aiff") == 0)
	  {
	    DPRINTF(E_DBG, L_SCAN, "AIFF\n");
	    mfi->type = strdup("aif");
	    mfi->codectype = strdup("aif");
	    mfi->description = strdup("AIFF audio file");
	    break;
	  }
	else if (strcmp(ctx->iformat->name, "wav") == 0)
	  {
	    DPRINTF(E_DBG, L_SCAN, "WAV\n");
	    mfi->type = strdup("wav");
	    mfi->codectype = strdup("wav");
	    mfi->description = strdup("WAV audio file");
	    break;
	  }
	/* WARNING: will fallthrough to default case, don't move */
	/* FALLTHROUGH */

      default:
	DPRINTF(E_DBG, L_SCAN, "Unknown codec 0x%x (video: %s), format %s (%s)\n",
		codec_id, (mfi->has_video) ? "yes" : "no", ctx->iformat->name, ctx->iformat->long_name);
	mfi->type = strdup("unkn");
	mfi->codectype = strdup("unkn");
	if (mfi->has_video)
	  {
	    mfi->description = strdup("Unknown video file format");
	    extra_md_map = md_map_tv;
	  }
	else
	  mfi->description = strdup("Unknown audio file format");
	break;
    }

  mdcount = 0;

  if ((!ctx->metadata) && (!audio_stream->metadata)
      && (video_stream && !video_stream->metadata))
    {
      DPRINTF(E_WARN, L_SCAN, "ffmpeg reports no metadata\n");

      goto skip_extract;
    }

  if (extra_md_map)
    {
      ret = extract_metadata(mfi, ctx, audio_stream, video_stream, extra_md_map);
      mdcount += ret;

      DPRINTF(E_DBG, L_SCAN, "Picked up %d tags with extra md_map\n", ret);
    }

  ret = extract_metadata(mfi, ctx, audio_stream, video_stream, md_map_generic);
  mdcount += ret;

  DPRINTF(E_DBG, L_SCAN, "Picked up %d tags with generic md_map, %d tags total\n", ret, mdcount);

  /* fix up TV metadata */
  if (mfi->media_kind == 10)
    {
      /* I have no idea why this is, but iTunes reports a media kind of 64 for stik==10 (?!) */
      mfi->media_kind = MEDIA_KIND_TVSHOW;
    }
  /* Unspecified video files are "Movies", media_kind 2 */
  else if (mfi->has_video == 1)
    {
      mfi->media_kind = MEDIA_KIND_MOVIE;
    }

 skip_extract:
#if LIBAVFORMAT_VERSION_MAJOR >= 54 || (LIBAVFORMAT_VERSION_MAJOR == 53 && LIBAVFORMAT_VERSION_MINOR >= 21)
  avformat_close_input(&ctx);
#else
  av_close_input_file(ctx);
#endif

  if (mdcount == 0)
    DPRINTF(E_WARN, L_SCAN, "ffmpeg/libav could not extract any metadata\n");

  /* Just in case there's no title set ... */
  if (mfi->title == NULL)
    mfi->title = strdup(mfi->fname);

  /* All done */

  return 0;
}
Example #9
0
//返回-1解析失败,返回-2无封堵建议,其它有封堵建议
int content_analysis2(char* videofile, int mn)
{
	AVFormatContext *pFormatCtx=NULL;
	AVCodecContext *pCodecCtx=NULL;
	AVCodec *pCodec=NULL;
	AVFrame *pFrame=NULL;
	AVFrame *pFrameYUV=NULL;
	struct SwsContext *img_convert_ctx;
	AVPacket packet;
	uint8_t *buffer=NULL;
	uint8_t *base_buffer=NULL;
	int base_flag = 1;
	int frameFinished;
	int videoStream = 0;
	int matchFrameNum = 0;

	//
	//unsigned char* yuvdata = NULL;
	int numBytes;
	int total_frame = 0;
	int width,height,linesize;
	int video_id=0,hit_id=0;
	uint8_t* ptr = NULL;



	int keynum = 0;
	int skipframe = 0;
	int frameRate = 0;
	int framecount = 0;

	//av_log_set_flags(AV_LOG_SKIP_REPEATED);

	//av_log_set_level(AV_LOG_DEBUG);
	avcodec_register_all();
	av_register_all();
	avformat_network_init();

	pFormatCtx = avformat_alloc_context();
	//pFormatCtx->interrupt_callback.callback =  decode_interrupt_cb;
	if(avformat_open_input(&pFormatCtx,videofile,NULL,NULL) < 0){
		//if(av_open_input_file(&pFormatCtx, videofile, NULL, 0, NULL) < 0){
		fprintf(stderr, "Couldn't Open video file %s\n",videofile);
		return -1;
	}

	if(avformat_find_stream_info(pFormatCtx,NULL)<0){
	//	av_close_input_file(pFormatCtx);
		fprintf(stderr, "av_find_stream_info error\n");
		return -1; // Couldn't open file
	}

	//== find video stream
	int i;
	videoStream=-1;
	for(i=0;i<pFormatCtx->nb_streams;i++){
		if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
			videoStream=i;
			break;
		}
	}

	if(videoStream==-1){
	//	av_close_input_file(pFormatCtx);
		fprintf(stderr, "Didn't find a video stream and pFormatCtx->nb_streams is %d\n",pFormatCtx->nb_streams);
		return -1; // Didn't find a video stream
	}

	pCodecCtx=pFormatCtx->streams[videoStream]->codec;
	if(pCodecCtx==NULL){
	//	av_close_input_file(pFormatCtx);

		fprintf(stderr, "Codec not found\n");
		return -1; // Codec not found
	}
	printf("%d\n",pCodecCtx->codec_id);

	// 	if(pCodecCtx->codec_id == CODEC_ID_H263)
	// 	{
	// 		pCodecCtx->codec_id = CODEC_ID_H264;
	// 	}

	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
	if(pCodec==NULL){
		if(pCodecCtx!=NULL)
			avcodec_close(pCodecCtx);
	//	if(pFormatCtx!=NULL)
	//		av_close_input_file(pFormatCtx);
		return -1;
	}



	//  	if(pCodecCtx->codec_id != CODEC_ID_MPEG4 && pCodecCtx->codec_id!=CODEC_ID_MJPEG)
	//  	{
	// 		if(pCodec->capabilities&CODEC_CAP_TRUNCATED){
	// 			pCodecCtx->flags|=CODEC_FLAG_TRUNCATED;
	// 		}
	// 	}

	if(pCodecCtx->pix_fmt == PIX_FMT_NONE)
	{
		//avcodec_close(pCodecCtx);
		if(pCodecCtx!=NULL)
			avcodec_close(pCodecCtx);
	//	if(pFormatCtx!=NULL)
	//		av_close_input_file(pFormatCtx);
		return -1; // Could not open codec
	}

	//== open decoder
	if(avcodec_open2(pCodecCtx,pCodec, NULL)<0){
		if(pCodecCtx!=NULL)
			avcodec_close(pCodecCtx);
	//	if(pFormatCtx!=NULL)
	//		av_close_input_file(pFormatCtx);
		return -1; // Could not open codec
	}

	// 	int frame_width = (pCodecCtx->width/4)*4;
	// 	int frame_height = (pCodecCtx->height/4)*4;
	int frame_width = pCodecCtx->width;
	int frame_height = pCodecCtx->height;
	int frame_pix_fmt = pCodecCtx->pix_fmt;

	pFrame = avcodec_alloc_frame();
	if(pFrame== NULL)
	{
		avcodec_close(pCodecCtx);
	//	av_close_input_file(pFormatCtx);
		fprintf(stderr, "pFrame==NULL\n");
		return -1;
	}

	pFrameYUV=avcodec_alloc_frame();
	if(pFrameYUV==NULL){
		av_free(pFrame);
		avcodec_close(pCodecCtx);
		fprintf(stderr, "pFrameYUV==NULL\n");
		return -1;
	}

	numBytes=avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width,pCodecCtx->height);

	buffer=(uint8_t*)calloc(numBytes,sizeof(uint8_t));
	base_buffer=(uint8_t*)calloc(numBytes,sizeof(uint8_t));
	if(buffer == NULL)
	{
		av_free(pFrame);
	//	av_close_input_file(pFormatCtx);
		return -1;
	}
	avpicture_fill((AVPicture *)pFrameYUV, buffer, PIX_FMT_YUV420P,pCodecCtx->width, pCodecCtx->height);
	img_convert_ctx = sws_getContext(pCodecCtx->width,pCodecCtx->height,pCodecCtx->pix_fmt,
		pCodecCtx->width,pCodecCtx->height,PIX_FMT_YUV420P,
		SWS_BICUBIC,NULL,NULL,NULL);


	if (mn==0) mn=0x7fffffff;
	int FlagIFrame = 0;
	while(av_read_frame(pFormatCtx,&packet)>=0)
	{
		//printf("I Frame = %d streamid = %d\n",packet.flags,packet.stream_index);
		if(packet.stream_index==videoStream/*&&packet.flags*/)
		{
			//avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,packet.data, packet.size);
			// 			if(packet.pos > 1600000)
			// 				printf("%d\n",packet.pos);
			// 			printf("%d\n",packet.pos);
			// 			if(packet.flags != 1)
			// 				continue;
			int nRet = avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,&packet);



			if(nRet < 0)
			{
				av_free_packet(&packet);
				continue;
			}
			// 			if(pFrame->pict_type != AV_PICTURE_TYPE_I)
			// 				continue;
			if(frameFinished)
			{

				if(frame_width   != pFrame->width  ||
					frame_height  != pFrame->height ||
					frame_pix_fmt != pFrame->format)
				{
					printf("error\n");
					av_free_packet(&packet);
					break;
				}


				int offset = 0;
				width = pCodecCtx->width;
				height = pCodecCtx->height;
				if(frame_height!=height||frame_width!=width)
				{
					av_free_packet(&packet);
					break;
				}
				sws_scale(img_convert_ctx,(const uint8_t* const*)pFrame->data,pFrame->linesize,0,pCodecCtx->height,pFrameYUV->data,pFrameYUV->linesize);
				// 				printf("%d*%d\n",frame_width,frame_height);
				// 				printf("%d*%d\n",pCodecCtx->width,pCodecCtx->height);
				int mi;
				for( mi=0;mi<3;mi++) {
					ptr = pFrameYUV->data[mi];
					linesize = pFrameYUV->linesize[mi];
					if (mi == 1) {
						width = (width+1)/2;
						height = (height+1)/2;
					}
					int j;
					for( j=0;j<height;j++) {
						memcpy(buffer+offset,ptr,width*sizeof(uint8_t));
						ptr += linesize;
						offset += width;
					}
				}

				if(base_flag == 1)
				{
					memcpy(base_buffer,buffer,numBytes);
					base_flag = 0;
					continue;
				}
				skipframe++;
				//roi_rect roiRect = trim_blacl_edge(buffer,pCodecCtx->width, pCodecCtx->height);
				int ret = calc_frame_diff(base_buffer,buffer,pCodecCtx->width, pCodecCtx->height);
				if(ret>10||skipframe>30)
				{
					base_flag = 1;
					total_frame++;
					printf("decode frame: %d %d\n",total_frame,ret);
					char outfilename[32];
					sprintf(outfilename,"bmp\\frame-%d-%d.bmp",total_frame,ret);

					calc_subtitle_diff(buffer,pCodecCtx->width, pCodecCtx->height);
					//SaveFrame(buffer, pCodecCtx->width, pCodecCtx->height, video_id, outfilename);
					saveYUV2BMP(buffer,pCodecCtx->width, pCodecCtx->height,outfilename);
					skipframe = 0;
				}

				//video_id = feature_match(buffer,pCodecCtx->width, pCodecCtx->height);
				//SaveFrame(buffer, pCodecCtx->width, pCodecCtx->height, video_id, videofile);
				if (video_id > 0) {
					printf("match frame: %d, %d, %d\n", hit_id, video_id, total_frame);
					//SaveFrame(buffer, pCodecCtx->width, pCodecCtx->height, video_id,videofile);
					//	hitID[i] = video_id;
					hit_id++;
				}

				//total_frame++;
				if (hit_id > mn) {
					av_free_packet(&packet);
					break;
				}


				if(total_frame>2000&&!FlagIFrame)
				{
					FlagIFrame = 1;
				}
			}
		}
		av_free_packet(&packet);

	}

	printf("decode frame:%d %d\n",hit_id,total_frame);



	if(pFrame!=NULL)
		av_free(pFrame);
	if(pFrameYUV!=NULL)
		av_free(pFrameYUV);
	if(buffer!=NULL)
		free(buffer);

	// 	if (pCodecCtx)
	// 		avcodec_close(pCodecCtx);
	// 	avformat_close_input(&fmt_ctx);
	//
	// 	if(pCodecCtx!=NULL)
	// 		avcodec_close(pCodecCtx);
	if(pFormatCtx!=NULL)
		avformat_close_input(&pFormatCtx);

	return hit_id;

}
Example #10
0
void VideoWriter::initialize(QString filename)
{

    filename = filename + QString(".mp4");
    qDebug() << "Initializing writing to " << filename;

    vFilename = &filename;
    if ((width == -1) || (height == -1)) {
        // Haven't received any frames yet, so we don't know what size they are...
        waitingToInitialize = true;
    }
    else {
        //qDebug() << "Thread for initialize: " << QThread::currentThreadId();
        // Write header here
        // FFMPEG initialization
        av_register_all(); // initialize libavcodec, and register all codecs and formats
        fmt = av_guess_format("mp4", NULL, NULL);
        if (!fmt) {
            qCritical() << "Error initializing fmt.";
        }
        fmt->video_codec = (AVCodecID) videoEncoder->currentData().value<unsigned int>();

        /* allocate the output media context */
        oc = avformat_alloc_context();
        if (!oc)
            qCritical() << "Error allocating output media context.";

        qDebug() << "Output context allocated";

        oc->oformat = fmt;
        qsnprintf(oc->filename, sizeof(oc->filename), "%s", vFilename->toLocal8Bit().data());

        /* add the video stream to the container and initialize the codecs */
        video_st = NULL;

        AVCodec *codec = avcodec_find_encoder(fmt->video_codec); // should add error checking
        video_st = avformat_new_stream(oc, codec); // should add error checking

        AVCodecContext *c = video_st->codec;
        c->codec_id = fmt->video_codec;
        c->codec_type = AVMEDIA_TYPE_VIDEO;

        /* put sample parameters */
        c->width = width;
        c->height = height; // resolution must be a multiple of two
        c->pix_fmt = PIX_FMT_YUV420P;
        c->time_base = timeBase; // frames per second

        // Stuff to try to force high quality encoding
        if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
            c->qmin = 1; // low is better, so frame-by-frame force high quality
            c->qmax = 2; // set this to 1 for highest quality
            /* just for testing, we also get rid of B frames */
            //c->max_b_frames = 0;
        }
        else if (c->codec_id == AV_CODEC_ID_H264) {
            av_opt_set(c->priv_data, "preset", "veryfast",0); // ultrafast and superfast also work
            //c->qmax = 18;
            //c->qmin = 18;
        }
        // some formats want stream headers to be seperate
        if (oc->oformat->flags & AVFMT_GLOBALHEADER)
            c->flags |= CODEC_FLAG_GLOBAL_HEADER;

        //av_dump_format(oc, 0, vFilename->toLocal8Bit().data(), 1);

        /* now that all the parameters are set, we can open the
           video codec and allocate the necessary encode buffers */
        /* open the codec */
        int err = avcodec_open2(c, codec, NULL); // should error check

        video_outbuf = NULL;
        if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
            /* allocate output buffer */
            video_outbuf_size = 1000000;
            video_outbuf = (uint8_t*)malloc(video_outbuf_size);
        }

        picture = avcodec_alloc_frame(); // should error check
        if (!picture) {
            qCritical() << "Error allocating picture frame.";
        }
        int size = avpicture_get_size(c->pix_fmt, width, height);
        picture_buf = (uint8_t*)av_malloc(size); // should error check
        if (picture_buf == NULL) {
            qCritical() << "Error allocating memory for picture buffer.";
        }
        avpicture_fill((AVPicture *)picture, picture_buf, c->pix_fmt, width, height);

        /* open the output file, if needed */
        if (avio_open(&oc->pb, vFilename->toLocal8Bit().data(), AVIO_FLAG_WRITE) < 0) {
            qDebug() << "Could not open " << vFilename;
        }

        /* write the stream header, if any */
        avformat_write_header(oc, NULL);

        // Open subtitles here

        // Set up frame conversion from RGB24 to YUV420P
        sws_ctx = sws_getContext(c->width, c->height, PIX_FMT_RGB24,
                                 c->width, c->height, c->pix_fmt,
                                 SWS_FAST_BILINEAR | SWS_CPU_CAPS_SSE2 | SWS_CPU_CAPS_MMX2,
                                 NULL, NULL, NULL);
        if (!sws_ctx) {
            qCritical() << "Error allocating SWS context.";
        }
        tmp_picture = avcodec_alloc_frame();
        if (!tmp_picture) {
            qCritical() << "Error allocating tmp_picture frame.";
        }

        waitingToInitialize = false;
        qDebug() << "Video Initialized emitted";

        // Should check here for errors above
    }


}
Example #11
0
bool CFFmpegImage::LoadImageFromMemory(unsigned char* buffer, unsigned int bufSize,
                                       unsigned int width, unsigned int height)
{

    uint8_t* fbuffer = (uint8_t*)av_malloc(FFMPEG_FILE_BUFFER_SIZE);
    if (!fbuffer)
    {
        CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate FFMPEG_FILE_BUFFER_SIZE");
        return false;
    }
    MemBuffer buf;
    buf.data = buffer;
    buf.size = bufSize;
    buf.pos = 0;

    AVIOContext* ioctx = avio_alloc_context(fbuffer, FFMPEG_FILE_BUFFER_SIZE, 0, &buf,
                                            mem_file_read, NULL, mem_file_seek);

    if (!ioctx)
    {
        av_free(fbuffer);
        CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate AVIOContext");
        return false;
    }

    AVFormatContext* fctx = avformat_alloc_context();
    if (!fctx)
    {
        av_free(ioctx->buffer);
        av_free(ioctx);
        CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate AVFormatContext");
        return false;
    }

    fctx->pb = ioctx;
    ioctx->max_packet_size = FFMPEG_FILE_BUFFER_SIZE;

    // Some clients have pngs saved as jpeg or ask us for png but are jpeg
    // mythv throws all mimetypes away and asks us with application/octet-stream
    // this is poor man's fallback to at least identify png / jpeg
    bool is_jpeg = (bufSize > 2 && buffer[0] == 0xFF && buffer[1] == 0xD8 && buffer[2] == 0xFF);
    bool is_png = (bufSize > 3 && buffer[1] == 'P' && buffer[2] == 'N' && buffer[3] == 'G');
    bool is_tiff = (bufSize > 2 && buffer[0] == 'I' && buffer[1] == 'I' && buffer[2] == '*');

    AVInputFormat* inp = nullptr;
    if (is_jpeg)
        inp = av_find_input_format("jpeg_pipe");
    else if (is_png)
        inp = av_find_input_format("png_pipe");
    else if (is_tiff)
        inp = av_find_input_format("tiff_pipe");
    else if (m_strMimeType == "image/jp2")
        inp = av_find_input_format("j2k_pipe");
    else if (m_strMimeType == "image/webp")
        inp = av_find_input_format("webp_pipe");
    // brute force parse if above check already failed
    else if (m_strMimeType == "image/jpeg" || m_strMimeType == "image/jpg")
        inp = av_find_input_format("jpeg_pipe");
    else if (m_strMimeType == "image/png")
        inp = av_find_input_format("png_pipe");
    else if (m_strMimeType == "image/tiff")
        inp = av_find_input_format("tiff_pipe");

    if (avformat_open_input(&fctx, "", inp, NULL) < 0)
    {
        CLog::Log(LOGERROR, "Could not find suitable input format: %s", m_strMimeType.c_str());
        avformat_close_input(&fctx);
        FreeIOCtx(ioctx);
        return false;
    }

    AVCodecContext* codec_ctx = fctx->streams[0]->codec;
    AVCodec* codec = avcodec_find_decoder(codec_ctx->codec_id);
    if (avcodec_open2(codec_ctx, codec, NULL) < 0)
    {
        avformat_close_input(&fctx);
        FreeIOCtx(ioctx);
        return false;
    }

    AVPacket pkt;
    AVFrame* frame = av_frame_alloc();
    av_read_frame(fctx, &pkt);
    int frame_decoded;
    int ret = avcodec_decode_video2(codec_ctx, frame, &frame_decoded, &pkt);
    if (ret < 0)
        CLog::Log(LOGDEBUG, "Error [%d] while decoding frame: %s\n", ret, strerror(AVERROR(ret)));

    if (frame_decoded != 0)
    {
        av_frame_free(&m_pFrame);
        m_pFrame = av_frame_clone(frame);

        if (m_pFrame)
        {
            m_height = m_pFrame->height;
            m_width = m_pFrame->width;
            m_originalWidth = m_width;
            m_originalHeight = m_height;

            const AVPixFmtDescriptor* pixDescriptor = av_pix_fmt_desc_get(static_cast<AVPixelFormat>(m_pFrame->format));
            if (pixDescriptor && ((pixDescriptor->flags & (AV_PIX_FMT_FLAG_ALPHA | AV_PIX_FMT_FLAG_PAL)) != 0))
                m_hasAlpha = true;
        }
        else
        {
            CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate a picture data buffer");
            frame_decoded = 0;
        }
    }
    else
        CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not decode a frame");

    av_frame_free(&frame);
    av_free_packet(&pkt);
    avcodec_close(codec_ctx);
    avformat_close_input(&fctx);
    FreeIOCtx(ioctx);

    return (frame_decoded != 0);
}
Example #12
0
int main(int argc, char** argv) {
	if (argc != 2)
		fprintf(stderr, "usage: %s webm-file\n", argv[0]), exit(1);
	
	char errmsg[512];
	// Switch stdin to non-blocking IO to test out a non-blocking av_read_frame()
	if ( fcntl(0, F_SETFL, fcntl(0, F_GETFL, NULL) | O_NONBLOCK) == -1 )
		perror("fcntl"), exit(1);
	
	av_register_all();
	
	AVInputFormat* webm_fmt = av_find_input_format("webm");
	AVFormatContext* demuxer = avformat_alloc_context();
	demuxer->flags |= AVFMT_FLAG_NONBLOCK;
	int error = avformat_open_input(&demuxer, argv[1], webm_fmt, NULL);
	//int error = avformat_open_input(&demuxer, "pipe:0", webm_fmt, NULL);
	if (error < 0)
		fprintf(stderr, "avformat_open_input(): %s\n", av_make_error_string(errmsg, sizeof(errmsg), error)), exit(1);
	
	printf("found %d streams:\n", demuxer->nb_streams);
	for(size_t i = 0; i < demuxer->nb_streams; i++) {
		AVStream* stream = demuxer->streams[i];
		printf("%d: time base %d/%d, codec: %s, extradata: %p, %d bytes\n",
			stream->index, stream->time_base.num, stream->time_base.den,
			stream->codec->codec_name, stream->codec->extradata, stream->codec->extradata_size);
		switch (stream->codec->codec_type) {
			case AVMEDIA_TYPE_VIDEO:
				printf("   video, w: %d, h: %d, sar: %d/%d, %dx%d\n",
					stream->codec->width, stream->codec->height, stream->sample_aspect_ratio.num, stream->sample_aspect_ratio.den,
					stream->codec->width * stream->sample_aspect_ratio.num / stream->sample_aspect_ratio.den, stream->codec->height);
				break;
			case AVMEDIA_TYPE_AUDIO:
				printf("   audio, %d channels, sampel rate: %d, bits per sample: %d\n",
					stream->codec->channels, stream->codec->sample_rate, stream->codec->bits_per_coded_sample);
				break;
			default:
				break;
		}
	}
	
	AVPacket packet;
	int ret =0;
	while (true) {
		ret = av_read_frame(demuxer, &packet);
		if (ret == AVERROR(EAGAIN)) {
			printf("sleep\n");
			struct timespec duration = {0, 250 * 1000000};
			nanosleep(&duration, NULL);
			continue;
		} else if (ret != 0) {
			break;
		}
		
		if (packet.flags & AV_PKT_FLAG_KEY && packet.stream_index == 0)
			printf("keyframe: stream %d, pts: %lu, dts: %lu, duration: %d, buf: %p\n", packet.stream_index, packet.pts, packet.dts, packet.duration, packet.buf);
		
		av_free_packet(&packet);
	}
	
	avformat_close_input(&demuxer);
	
	return 0;
}
Example #13
0
int main(int argc, char* argv[])
{
	AVFormatContext* pFormatCtx;
	AVOutputFormat* fmt;
	AVStream* video_st;
	AVCodecContext* pCodecCtx;
	AVCodec* pCodec;
	AVPacket pkt;
	uint8_t* picture_buf;
	AVFrame* pFrame;
	int picture_size;
	int y_size;
	int framecnt=0;
	//FILE *in_file = fopen("src01_480x272.yuv", "rb");	//Input raw YUV data 
	FILE *in_file = fopen("../ds_480x272.yuv", "rb");   //Input raw YUV data
	int in_w=480,in_h=272;                              //Input data's width and height
	int framenum=100;                                   //Frames to encode
	//const char* out_file = "src01.h264";              //Output Filepath 
	//const char* out_file = "src01.ts";
	//const char* out_file = "src01.hevc";
	const char* out_file = "ds.h264";

	av_register_all();
	//Method1.
	pFormatCtx = avformat_alloc_context();
	//Guess Format
	fmt = av_guess_format(NULL, out_file, NULL);
	pFormatCtx->oformat = fmt;
	
	//Method 2.
	//avformat_alloc_output_context2(&pFormatCtx, NULL, NULL, out_file);
	//fmt = pFormatCtx->oformat;


	//Open output URL
	if (avio_open(&pFormatCtx->pb,out_file, AVIO_FLAG_READ_WRITE) < 0){
		printf("Failed to open output file! \n");
		return -1;
	}

	video_st = avformat_new_stream(pFormatCtx, 0);
	video_st->time_base.num = 1; 
	video_st->time_base.den = 25;  

	if (video_st==NULL){
		return -1;
	}
	//Param that must set
	pCodecCtx = video_st->codec;
	//pCodecCtx->codec_id =AV_CODEC_ID_HEVC;
	pCodecCtx->codec_id = fmt->video_codec;
	pCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
	pCodecCtx->pix_fmt = PIX_FMT_YUV420P;
	pCodecCtx->width = in_w;  
	pCodecCtx->height = in_h;
	pCodecCtx->time_base.num = 1;  
	pCodecCtx->time_base.den = 25;  
	pCodecCtx->bit_rate = 400000;  
	pCodecCtx->gop_size=250;
	//H264
	//pCodecCtx->me_range = 16;
	//pCodecCtx->max_qdiff = 4;
	//pCodecCtx->qcompress = 0.6;
	pCodecCtx->qmin = 10;
	pCodecCtx->qmax = 51;

	//Optional Param
	pCodecCtx->max_b_frames=3;

	// Set Option
	AVDictionary *param = 0;
	//H.264
	if(pCodecCtx->codec_id == AV_CODEC_ID_H264) {
		av_dict_set(&param, "preset", "slow", 0);
		av_dict_set(&param, "tune", "zerolatency", 0);
		//av_dict_set(&param, "profile", "main", 0);
	}
	//H.265
	if(pCodecCtx->codec_id == AV_CODEC_ID_H265){
		av_dict_set(&param, "preset", "ultrafast", 0);
		av_dict_set(&param, "tune", "zero-latency", 0);
	}

	//Show some Information
	av_dump_format(pFormatCtx, 0, out_file, 1);

	pCodec = avcodec_find_encoder(pCodecCtx->codec_id);
	if (!pCodec){
		printf("Can not find encoder! \n");
		return -1;
	}
	if (avcodec_open2(pCodecCtx, pCodec,&param) < 0){
		printf("Failed to open encoder! \n");
		return -1;
	}


	pFrame = av_frame_alloc();
	picture_size = avpicture_get_size(pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
	picture_buf = (uint8_t *)av_malloc(picture_size);
	avpicture_fill((AVPicture *)pFrame, picture_buf, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);

	//Write File Header
	avformat_write_header(pFormatCtx,NULL);

	av_new_packet(&pkt,picture_size);

	y_size = pCodecCtx->width * pCodecCtx->height;

	for (int i=0; i<framenum; i++){
		//Read raw YUV data
		if (fread(picture_buf, 1, y_size*3/2, in_file) <= 0){
			printf("Failed to read raw data! \n");
			return -1;
		}else if(feof(in_file)){
			break;
		}
		pFrame->data[0] = picture_buf;              // Y
		pFrame->data[1] = picture_buf+ y_size;      // U 
		pFrame->data[2] = picture_buf+ y_size*5/4;  // V
		//PTS
		pFrame->pts=i;
		int got_picture=0;
		//Encode
		int ret = avcodec_encode_video2(pCodecCtx, &pkt,pFrame, &got_picture);
		if(ret < 0){
			printf("Failed to encode! \n");
			return -1;
		}
		if (got_picture==1){
			printf("Succeed to encode frame: %5d\tsize:%5d\n",framecnt,pkt.size);
			framecnt++;
			pkt.stream_index = video_st->index;
			ret = av_write_frame(pFormatCtx, &pkt);
			av_free_packet(&pkt);
		}
	}
	//Flush Encoder
	int ret = flush_encoder(pFormatCtx,0);
	if (ret < 0) {
		printf("Flushing encoder failed\n");
		return -1;
	}

	//Write file trailer
	av_write_trailer(pFormatCtx);

	//Clean
	if (video_st){
		avcodec_close(video_st->codec);
		av_free(pFrame);
		av_free(picture_buf);
	}
	avio_close(pFormatCtx->pb);
	avformat_free_context(pFormatCtx);

	fclose(in_file);

	return 0;
}
void VideoWriterThread::open()
{
    av_register_all(); // TODO: make sure this is only done once. 
//    av_log_set_level(AV_LOG_DEBUG);
#if LIBAVFORMAT_VERSION_MAJOR > 52
    m_pOutputFormat = av_guess_format(0, m_sFilename.c_str(), 0);
#else
    m_pOutputFormat = guess_format(0, m_sFilename.c_str(), 0);
#endif
    m_pOutputFormat->video_codec = CODEC_ID_MJPEG;

#if LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(52, 24, 0)
    m_pOutputFormatContext = avformat_alloc_context();
#else
    m_pOutputFormatContext = av_alloc_format_context();
#endif
    m_pOutputFormatContext->oformat = m_pOutputFormat;

    strncpy(m_pOutputFormatContext->filename, m_sFilename.c_str(),
            sizeof(m_pOutputFormatContext->filename));

    if (m_pOutputFormat->video_codec != CODEC_ID_NONE) {
        setupVideoStream();
    }
#if LIBAVFORMAT_VERSION_MAJOR < 52
    av_set_parameters(m_pOutputFormatContext, NULL);
#endif

    float muxMaxDelay = 0.7;
    m_pOutputFormatContext->max_delay = int(muxMaxDelay * AV_TIME_BASE);

//    av_dump_format(m_pOutputFormatContext, 0, m_sFilename.c_str(), 1);

    openVideoCodec();

    m_pVideoBuffer = NULL;
    if (!(m_pOutputFormatContext->oformat->flags & AVFMT_RAWPICTURE)) {
        m_pVideoBuffer = (unsigned char*)(av_malloc(VIDEO_BUFFER_SIZE));
    }

    if (!(m_pOutputFormat->flags & AVFMT_NOFILE)) {
#if LIBAVCODEC_VERSION_INT > AV_VERSION_INT(53, 8, 0)
        int retVal = avio_open(&m_pOutputFormatContext->pb, m_sFilename.c_str(),
                URL_WRONLY);
#else
        int retVal = url_fopen(&m_pOutputFormatContext->pb, m_sFilename.c_str(),
                URL_WRONLY);
#endif
        if (retVal < 0) {
            throw Exception(AVG_ERR_VIDEO_INIT_FAILED, 
                    string("Could not open output file: '") + m_sFilename + "'");
        }
    }

    m_pFrameConversionContext = sws_getContext(m_Size.x, m_Size.y, 
            ::PIX_FMT_RGB32, m_Size.x, m_Size.y, STREAM_PIXEL_FORMAT, 
            SWS_BILINEAR, NULL, NULL, NULL);

    m_pConvertedFrame = createFrame(STREAM_PIXEL_FORMAT, m_Size);

#if LIBAVFORMAT_VERSION_MAJOR > 52
    avformat_write_header(m_pOutputFormatContext, 0);
#else
    av_write_header(m_pOutputFormatContext);
#endif
}
Example #15
0
void * thread_routine(void *arg)
{
	struct mypara *recv_para = (struct mypara *)arg;;  //recv para data
	AVFormatContext	*pFormatCtx;
	int				i, videoindex;
	AVCodecContext	*pCodecCtx;
	AVCodec			*pCodec;
	AVFrame	*pFrame, *pFrameYUV;
	unsigned char *out_buffer;
	AVPacket *packet;
	int y_size;
	int ret, got_picture;
	struct SwsContext *img_convert_ctx;

	//char filepath[]="bigbuckbunny_480x272.h265";
	char filepath[] = "rtsp://192.168.131.4/0";
	//SDL---------------------------
	int screen_w = 0, screen_h = 0;
	SDL_Window *screen;
	SDL_Renderer* sdlRenderer;
	SDL_Texture* sdlTexture;
	SDL_Rect sdlRect, sdlRect_tmp;

	FILE *fp_yuv;

	//av_register_all();
	//avformat_network_init();
	pFormatCtx = avformat_alloc_context();

	if (avformat_open_input(&pFormatCtx, filepath, NULL, NULL) != 0){
		printf("Couldn't open input stream.\n");
		return -1;
	}
	if (avformat_find_stream_info(pFormatCtx, NULL) < 0){
		printf("Couldn't find stream information.\n");
		return -1;
	}
	videoindex = -1;
	for (i = 0; i < pFormatCtx->nb_streams; i++)
		if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO){
			videoindex = i;
			break;
		}
	if (videoindex == -1){
		printf("Didn't find a video stream.\n");
		return -1;
	}

	pCodecCtx = pFormatCtx->streams[videoindex]->codec;
	pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
	if (pCodec == NULL){
		printf("Codec not found.\n");
		return -1;
	}
	//pthread_mutex_lock(&mutex);
	if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0){
		printf("Could not open codec.\n");
		return -1;
	}
	//pthread_mutex_unlock(&mutex);

	pFrame = av_frame_alloc();
	pFrameYUV = av_frame_alloc();
	out_buffer = (unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1));
	av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, out_buffer,
		AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1);

	packet = (AVPacket *)av_malloc(sizeof(AVPacket));
	//Output Info-----------------------------
	printf("--------------- File Information ----------------\n");
	av_dump_format(pFormatCtx, 0, filepath, 0);
	printf("-------------------------------------------------\n");
	img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
		pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);

#if OUTPUT_YUV420P 
	fp_yuv = fopen("output.yuv", "wb+");
#endif  

	//if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {  
	//	printf( "Could not initialize SDL - %s\n", SDL_GetError()); 
	//	return -1;
	//} 

	screen_w = pCodecCtx->width;
	screen_h = pCodecCtx->height;
	//SDL 2.0 Support for multiple windows
	//screen = SDL_CreateWindow("Simplest ffmpeg player's Window", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,
	//	screen_w*2, screen_h,
	//	SDL_WINDOW_OPENGL);

	screen = (*recv_para).screen; //get the screen
	if (!screen) {
		printf("SDL: could not create window - exiting:%s\n", SDL_GetError());
		return -1;
	}

	//sdlRenderer = SDL_CreateRenderer(screen, -1, 0);  
	sdlRenderer = (*recv_para).sdlRenderer;//get the sdlRenderer
	//IYUV: Y + U + V  (3 planes)
	//YV12: Y + V + U  (3 planes)
	pthread_mutex_lock(&mutex);
	sdlTexture = SDL_CreateTexture(sdlRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING, pCodecCtx->width, pCodecCtx->height);
	pthread_mutex_unlock(&mutex);


	//temp sdlRect for render copy
	sdlRect_tmp.x = 0;
	sdlRect_tmp.y = 0;
	sdlRect_tmp.w = screen_w;
	sdlRect_tmp.h = screen_h;

	//four rect in one line
	// total 4*4 = 16 rect
	sdlRect.x = 0 + screen_w / 2 * ((*recv_para).id % 4);
	sdlRect.y = 0 + screen_h / 2 * ((*recv_para).id / 4);
	sdlRect.w = screen_w / 2;
	sdlRect.h = screen_h / 2;


	//SDL End----------------------
	while (thread_exit && av_read_frame(pFormatCtx, packet) >= 0){
		if (packet->stream_index == videoindex){
			ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
			if (ret < 0){
				printf("Decode Error.\n");
				return -1;
			}
			if (got_picture){
				//printf("id:%d\n",(*recv_para).id); //打印线程id
				//printf("x_pos:%d   y_pos:%d\n",sdlRect.x,sdlRect.y); //print rect position
				sws_scale(img_convert_ctx, (const unsigned char* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
					pFrameYUV->data, pFrameYUV->linesize);

#if OUTPUT_YUV420P
				y_size = pCodecCtx->width*pCodecCtx->height;
				fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv);    //Y 
				fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv);  //U
				fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv);  //V
#endif
				//SDL---------------------------
#if 0
				SDL_UpdateTexture(sdlTexture, NULL, pFrameYUV->data[0], pFrameYUV->linesize[0]);
#else
				pthread_mutex_lock(&mutex);  //mutex or SEGFAULT
				SDL_UpdateYUVTexture(sdlTexture, &sdlRect_tmp,//sdl tmp
					pFrameYUV->data[0], pFrameYUV->linesize[0],
					pFrameYUV->data[1], pFrameYUV->linesize[1],
					pFrameYUV->data[2], pFrameYUV->linesize[2]);
#endif	
				//SDL_RenderClear( sdlRenderer );  
				SDL_RenderCopy(sdlRenderer, sdlTexture, NULL, &sdlRect);
				//SDL_RenderCopy( sdlRenderer, sdlTexture,  NULL, &sdlRect1);  
				SDL_RenderPresent(sdlRenderer);
				pthread_mutex_unlock(&mutex);
				//SDL End-----------------------
				//Delay 40ms
				//SDL_Delay(40);
			}
		}
		av_free_packet(packet);
	}
	//flush decoder
	//FIX: Flush Frames remained in Codec
	while (1) {
		ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
		if (ret < 0)
			break;
		if (!got_picture)
			break;
		sws_scale(img_convert_ctx, (const unsigned char* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
			pFrameYUV->data, pFrameYUV->linesize);
#if OUTPUT_YUV420P
		int y_size = pCodecCtx->width*pCodecCtx->height;
		fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv);    //Y 
		fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv);  //U
		fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv);  //V
#endif
		//SDL---------------------------

		SDL_UpdateTexture(sdlTexture, &sdlRect, pFrameYUV->data[0], pFrameYUV->linesize[0]);
		SDL_RenderClear(sdlRenderer);
		SDL_RenderCopy(sdlRenderer, sdlTexture, NULL, &sdlRect);
		SDL_RenderPresent(sdlRenderer);
		//SDL End-----------------------
		//Delay 40ms
		//SDL_Delay(40);
	}

	sws_freeContext(img_convert_ctx);

#if OUTPUT_YUV420P 
	fclose(fp_yuv);
#endif 

	SDL_RenderClear(sdlRenderer);
	SDL_Quit();

	av_frame_free(&pFrameYUV);
	av_frame_free(&pFrame);
	avcodec_close(pCodecCtx);
	avformat_close_input(&pFormatCtx);
}
Example #16
0
bool AVIDump::CreateFile()
{
  AVCodec* codec = nullptr;

  s_format_context = avformat_alloc_context();
  std::stringstream s_file_index_str;
  s_file_index_str << s_file_index;
  snprintf(s_format_context->filename, sizeof(s_format_context->filename), "%s",
           (File::GetUserPath(D_DUMPFRAMES_IDX) + "framedump" + s_file_index_str.str() + ".avi")
               .c_str());
  File::CreateFullPath(s_format_context->filename);

  // Ask to delete file
  if (File::Exists(s_format_context->filename))
  {
    if (SConfig::GetInstance().m_DumpFramesSilent ||
        AskYesNoT("Delete the existing file '%s'?", s_format_context->filename))
    {
      File::Delete(s_format_context->filename);
    }
    else
    {
      // Stop and cancel dumping the video
      return false;
    }
  }

  if (!(s_format_context->oformat = av_guess_format("avi", nullptr, nullptr)) ||
      !(s_stream = avformat_new_stream(s_format_context, codec)))
  {
    return false;
  }

  s_stream->codec->codec_id =
      g_Config.bUseFFV1 ? AV_CODEC_ID_FFV1 : s_format_context->oformat->video_codec;
  if (!g_Config.bUseFFV1)
    s_stream->codec->codec_tag =
        MKTAG('X', 'V', 'I', 'D');  // Force XVID FourCC for better compatibility
  s_stream->codec->codec_type = AVMEDIA_TYPE_VIDEO;
  s_stream->codec->bit_rate = 400000;
  s_stream->codec->width = s_width;
  s_stream->codec->height = s_height;
  s_stream->codec->time_base.num = 1;
  s_stream->codec->time_base.den = VideoInterface::GetTargetRefreshRate();
  s_stream->codec->gop_size = 12;
  s_stream->codec->pix_fmt = g_Config.bUseFFV1 ? AV_PIX_FMT_BGRA : AV_PIX_FMT_YUV420P;

  if (!(codec = avcodec_find_encoder(s_stream->codec->codec_id)) ||
      (avcodec_open2(s_stream->codec, codec, nullptr) < 0))
  {
    return false;
  }

  s_src_frame = av_frame_alloc();
  s_scaled_frame = av_frame_alloc();

  s_size = avpicture_get_size(s_stream->codec->pix_fmt, s_width, s_height);

  s_yuv_buffer = new uint8_t[s_size];
  avpicture_fill((AVPicture*)s_scaled_frame, s_yuv_buffer, s_stream->codec->pix_fmt, s_width,
                 s_height);

  NOTICE_LOG(VIDEO, "Opening file %s for dumping", s_format_context->filename);
  if (avio_open(&s_format_context->pb, s_format_context->filename, AVIO_FLAG_WRITE) < 0)
  {
    WARN_LOG(VIDEO, "Could not open %s", s_format_context->filename);
    return false;
  }

  avformat_write_header(s_format_context, nullptr);

  return true;
}
Example #17
0
AVWRAP_DECL int AVWrapper_Init(
         void (*pAddFileLogRaw)(const char*),
         const char* pFilename,
         const char* pDesc,
         const char* pSoundFile,
         const char* pFormatName,
         const char* pVCodecName,
         const char* pACodecName,
         int Width, int Height,
         int FramerateNum, int FramerateDen,
         int VQuality)
{
    int ret;
    AddFileLogRaw = pAddFileLogRaw;
    av_log_set_callback( &LogCallback );

    g_Width  = Width;
    g_Height = Height;
    g_Framerate.num = FramerateNum;
    g_Framerate.den = FramerateDen;
    g_VQuality = VQuality;

    // initialize libav and register all codecs and formats
    av_register_all();

    // find format
    g_pFormat = av_guess_format(pFormatName, NULL, NULL);
    if (!g_pFormat)
        return FatalError("Format \"%s\" was not found", pFormatName);

    // allocate the output media context
    g_pContainer = avformat_alloc_context();
    if (!g_pContainer)
        return FatalError("Could not allocate output context");

    g_pContainer->oformat = g_pFormat;

    // store description of file
    av_dict_set(&g_pContainer->metadata, "comment", pDesc, 0);

    // append extesnion to filename
    char ext[16];
    strncpy(ext, g_pFormat->extensions, 16);
    ext[15] = 0;
    ext[strcspn(ext,",")] = 0;
    snprintf(g_pContainer->filename, sizeof(g_pContainer->filename), "%s.%s", pFilename, ext);

    // find codecs
    g_pVCodec = avcodec_find_encoder_by_name(pVCodecName);
    g_pACodec = avcodec_find_encoder_by_name(pACodecName);

    // add audio and video stream to container
    g_pVStream = NULL;
    g_pAStream = NULL;

    if (g_pVCodec)
    {
        ret = AddVideoStream();
        if (ret < 0)
            return ret;
    }
    else
        Log("Video codec \"%s\" was not found; video will be ignored.\n", pVCodecName);

    if (g_pACodec)
    {
        g_pSoundFile = fopen(pSoundFile, "rb");
        if (g_pSoundFile)
        {
            fread(&g_Frequency, 4, 1, g_pSoundFile);
            fread(&g_Channels, 4, 1, g_pSoundFile);
            AddAudioStream();
        }
        else
            Log("Could not open %s\n", pSoundFile);
    }
    else
        Log("Audio codec \"%s\" was not found; audio will be ignored.\n", pACodecName);

    if (!g_pAStream && !g_pVStream)
        return FatalError("No video, no audio, aborting...");

    // write format info to log
    av_dump_format(g_pContainer, 0, g_pContainer->filename, 1);

    // open the output file, if needed
    if (!(g_pFormat->flags & AVFMT_NOFILE))
    {
        if (avio_open(&g_pContainer->pb, g_pContainer->filename, AVIO_FLAG_WRITE) < 0)
            return FatalError("Could not open output file (%s)", g_pContainer->filename);
    }

    // write the stream header, if any
    avformat_write_header(g_pContainer, NULL);

    g_pVFrame->pts = -1;
    return 0;
}
Example #18
0
int main(int argc, char **argv)
{
    /*
     int i;
     char    b[40];
     char    c[21];
     HANDLE hConsole;
     int k;
     
     #ifdef _DEBUG
     _CrtSetDbgFlag ( _CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF );
     #endif
     
     for(i = 0; i < sizeof(b); ++i )
     {
     b[i] = ' ';
     }
     b[255] = '\0';
     
     system("color 5");
     for(i = 0; i < 20; ++i) {
     c[i] = '>';
     c[i+1] = '\0';
     printf("Progress |%s%*s|\r",c,19-i,&"");
     Sleep(100);
     //printf("%s\r", b);
     }
     printf("\n");
     printf("sizeof(structa_t) = %d\n", sizeof(structa_t));
     printf("sizeof(structb_t) = %d\n", sizeof(structb_t));
     printf("sizeof(structc_t) = %d\n", sizeof(structc_t));
     printf("sizeof(structd_t) = %d\n", AV_TIME_BASE);
     
     hConsole = GetStdHandle(STD_OUTPUT_HANDLE);
     
     // you can loop k higher to see more color choices
     for(k = 1; k < 255; k++)
     {
     // pick the colorattribute k you want
     SetConsoleTextAttribute(hConsole, k);
     printf("%d I want to be nice today!",k);
     }*/
    
    AVFormatContext             *in_ctx = NULL, *out_ctx = NULL;
    AVInputFormat               *file_iformat = NULL;
    AVOutputFormat              *out_fmt = NULL;
    AVFrame                     *frame = NULL, *frameRGB = NULL;
    AVStream                    *st = NULL;
    AVCodecContext              *codec_ctx = NULL, *pCodecCtx = NULL;
    AVCodec                     *codec = NULL, *pCodec = NULL;
    AVCodec                     dummy_codec = {0};
    AVPacket                    pkt, p;
    AVBitStreamFilterContext    *bsf = NULL;
    struct SwsContext           *sws_ctx = NULL;
    BOOL                        tom = TRUE;
    char                        b[1024];
    int                         err, i, ret, frameFinished, numBytes;
    const char                  *src_filename = "final.mp4";
    int64_t                     timestamp;
    uint8_t                     buf[128];
    uint8_t                     *buffer = NULL;
    int                         video_stream_idx = -1;
    int                         audio_stream_idx = -1;
    FILE*                       sdp_file;
    
#ifdef _DEBUG
    _CrtSetDbgFlag ( _CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF );
#endif
    
    /* register all formats and codecs */
    av_register_all();
    avformat_network_init();
    av_log_set_level(AV_LOG_DEBUG);

    /* open input file, and allocate format context */
    ret = avformat_open_input(&in_ctx, src_filename, NULL, NULL);
    if (ret < 0) {
        fprintf(stderr, "Could not open source file %s\n", src_filename);
        PAUSE_EXIT(1);
    }
    in_ctx->flags |= AVFMT_FLAG_GENPTS;

    ret = avformat_find_stream_info(in_ctx, NULL);
    if (ret < 0) {
        av_log(NULL, AV_LOG_FATAL, "%s: could not find codec parameters\n", src_filename);
        avformat_close_input(&in_ctx);
        PAUSE_EXIT(1);
    }
    
    av_dump_format(in_ctx, 0, src_filename, 0);
    
    for (i = 0; i < in_ctx->nb_streams; i++) {
        AVStream        *st_ptr;
        AVCodecContext  *coctx_ptr;
        
        if (in_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
	  if (in_ctx->streams[i]->codec->codec_id == CODEC_ID_MPEG4) {
	      bsf = av_bitstream_filter_init("dump_extra");
	  } else if (in_ctx->streams[i]->codec->codec_id == CODEC_ID_H264) {
	      fprintf(stderr, "Found h264 Stream\n");
	      bsf = av_bitstream_filter_init("h264_mp4toannexb");
	  } else {
	      bsf = NULL;
	  }
	  pCodecCtx=in_ctx->streams[i]->codec;
	  pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
	  if(pCodec==NULL) {
	      fprintf(stderr, "Unsupported codec!\n");
	      return -1; // Codec not found
	  }
	  // Open codec
	  if(avcodec_open2(pCodecCtx, pCodec, NULL)<0)
	      return -1; // Could not open codec
	  
	  out_ctx = avformat_alloc_context();
	  out_fmt = av_guess_format("rtp", NULL, NULL);
	  if (!out_fmt) {
	      fprintf(stderr, "Unable for find the RTP format for output\n");
	      avformat_close_input(&in_ctx);
	      PAUSE_EXIT(1);
	  }
	  out_ctx->oformat = out_fmt;
	  //out_ctx->flags |= AVFMT_FLAG_NONBLOCK;
	  
	  st = avformat_new_stream(out_ctx, 0);
	  if (!st) {
	      fprintf(stderr, "Cannot allocate stream\n");
	      avformat_close_input(&in_ctx);
	      PAUSE_EXIT(1);
	  }
	  
	  dummy_codec.type = in_ctx->streams[i]->codec->codec_type;
	  codec_ctx = st->codec;
	  avcodec_get_context_defaults3(codec_ctx, &dummy_codec);
	  avcodec_open2(codec_ctx, NULL, NULL);
	  codec_ctx->codec_type = in_ctx->streams[i]->codec->codec_type;
	  
	  /* FIXME: global headers stuff... */
	  
	  snprintf(out_ctx->filename, sizeof(out_ctx->filename), "rtp://%s:%d", "127.0.0.1", 55444);
	  
	  /* open the UDP sockets for RTP and RTCP */
	  if (!software_streaming) {
	      printf("Distant Connection\n");
	      ret = avio_open(&out_ctx->pb, out_ctx->filename, AVIO_FLAG_WRITE);
	      if (ret < 0) {
		fprintf(stderr, "Cannot open '%s'\n", out_ctx->filename);
		avformat_close_input(&in_ctx);
		PAUSE_EXIT(1);
	      }
	  } else {
	      ret = avio_open_dyn_buf(&out_ctx->pb);
	      out_ctx->pb->max_packet_size = 1460;
	      printf("MAX packet size = %d\n",out_ctx->pb->max_packet_size);
	  }
	  st_ptr = in_ctx->streams[i];
	  coctx_ptr = st_ptr->codec;
	  
	  codec_ctx->codec_id = coctx_ptr->codec_id;
	  codec_ctx->codec_type = coctx_ptr->codec_type;
	  
	  if(!codec_ctx->codec_tag) {
	      codec_ctx->codec_tag = coctx_ptr->codec_tag;
	  }
	  codec_ctx->bit_rate = coctx_ptr->bit_rate;
	  printf("\n\n\n\nFIRE!!!!! %d %d\n\n\n\n", codec_ctx->profile, codec_ctx->level);
	  if(coctx_ptr->extradata_size) {
	      codec_ctx->extradata = (uint8_t*)av_malloc(coctx_ptr->extradata_size);
	      memcpy(codec_ctx->extradata, coctx_ptr->extradata, coctx_ptr->extradata_size);
	  } else {
	      codec_ctx->extradata = NULL;
	  }
	  
	  codec_ctx->extradata_size = coctx_ptr->extradata_size;
	  /* FIXME: ExtraData ??? */
	  if (codec_ctx->codec_id == CODEC_ID_H264) {
	      printf("BINGO\n");
	      extradata_convert(codec_ctx);
	  }
	  
	  if(out_ctx->oformat->flags & AVFMT_GLOBALHEADER)
	      codec_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
	  
	  if(av_q2d(coctx_ptr->time_base) > av_q2d(st_ptr->time_base) && av_q2d(st_ptr->time_base) < 1.0/1000) {
	      codec_ctx->time_base = coctx_ptr->time_base;
	  } else {
	      codec_ctx->time_base = st_ptr->time_base;
	  }
	  
	  switch(codec_ctx->codec_type) {
	      case AVMEDIA_TYPE_AUDIO:
		codec_ctx->sample_rate = coctx_ptr->sample_rate;
		codec_ctx->time_base.den = 1;
		codec_ctx->time_base.num = coctx_ptr->sample_rate;
		codec_ctx->channels = coctx_ptr->channels;
		codec_ctx->frame_size = coctx_ptr->frame_size;
		codec_ctx->block_align= coctx_ptr->block_align;
		
		break;
	      case AVMEDIA_TYPE_VIDEO:
		//printf("Pixel Format %d\n", coctx_ptr->pix_fmt);
		codec_ctx->pix_fmt = coctx_ptr->pix_fmt;
		codec_ctx->width = coctx_ptr->width;
		codec_ctx->height = coctx_ptr->height;
		codec_ctx->has_b_frames = coctx_ptr->has_b_frames;
		
		break;
	      default:
		fprintf(stderr, "Strange Codec Type %d\n", codec_ctx->codec_type);
		PAUSE_EXIT(1);
	  }
	  
	  
	  ret = avformat_write_header(out_ctx, NULL);
	  if (ret < 0) {
	      fprintf(stderr, "Cannot Initialize output stream %d\n", i);
	      //close_output(rtp_c->out_s[i]);
	      
	      continue;
	  }
	  av_dump_format(out_ctx, i, out_ctx->filename, 1);
        }
    }
    
    frame = avcodec_alloc_frame();
    frameRGB = avcodec_alloc_frame();
    
    // Determine required buffer size and allocate buffer
    numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
			  pCodecCtx->height);
    buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
    printf("Allocated %d", numBytes);
    
    sws_ctx = sws_getContext (
     pCodecCtx->width,
     pCodecCtx->height,
     pCodecCtx->pix_fmt,
     pCodecCtx->width,
     pCodecCtx->height,
     PIX_FMT_RGB24,
     SWS_BILINEAR,
     NULL,
     NULL,
     NULL
     );
    
    // Assign appropriate parts of buffer to image planes in pFrameRGB
    // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
    // of AVPicture
    avpicture_fill((AVPicture *)frameRGB, buffer, PIX_FMT_RGB24,
		 pCodecCtx->width, pCodecCtx->height);
    
    av_sdp_create(&out_ctx,1,b,1024);
    printf("SDP : \n%s", b);
    sdp_file = fopen("rtp.sdp","w");
    fprintf(sdp_file, "%s",b);
    fclose(sdp_file);

    i = 0;
    av_init_packet(&pkt);
    av_init_packet(&p);
    printf("Payload Size %d\n", *(uint8_t *)out_ctx->streams[0]->codec->extradata != 1);
    while (av_read_frame(in_ctx, &pkt) >= 0) {
        if (pkt.stream_index == 0) {
	  int res;
	  uint8_t *ptr;
	  uint16_t ptr16;
	  
	  if (avcodec_decode_video2(pCodecCtx, frame, &frameFinished, &pkt) < 0 ) {
	      fprintf(stderr, "Error decoding packet\n");
	  }
	   
	  /* if(frameFinished) {
	  // Convert the image from its native format to RGB
	  sws_scale
	  (
	  sws_ctx,
	  (uint8_t const * const *)frame->data,
	  frame->linesize,
	  0,
	  pCodecCtx->height,
	  frameRGB->data,
	  frameRGB->linesize
	  );
	   
	  // Save the frame to disk
	  if(++i<=5)
	      SaveFrame(frameRGB, pCodecCtx->width, pCodecCtx->height, i);
	  }*/
	  printf("PTS %lld DTS%lld\n",pkt.pts,pkt.dts);
	  printf("Got frame %s %d %s\n",STRING_BOOL(frameFinished), pkt.size, STRING_BOOL(pkt.flags & AV_PKT_FLAG_KEY));
	  //break;
	  /*ret = av_bitstream_filter_filter(bsf,
	   in_ctx->streams[pkt.stream_index]->codec,
	   NULL, &p.data, &p.size,
	   pkt.data, pkt.size, pkt.flags & AV_PKT_FLAG_KEY);
	   if(ret > 0) {
	   av_free_packet(&pkt);
	   p.destruct = av_destruct_packet;
	   } else if (ret < 0) {
	   fprintf(stderr, "%s failed for stream %d, codec %s: ",
	   bsf->filter->name,
	   pkt.stream_index,
	   in_ctx->streams[pkt.stream_index]->codec->codec->name);
	   fprintf(stderr, "%d\n", ret);
	   }
	   pkt = p;*/
	  
	  stream_convert(&pkt);
	  printf("pkt size %d %d\n",pkt.size, pkt.flags);
	  av_usleep(4000000);
	  
	  if (av_write_frame(out_ctx, &pkt) < 0)
	      printf("MITSOS eisai!!!!\n");
	  
	  int written_size = avio_close_dyn_buf(out_ctx->pb,&ptr);
	  printf("Written Size %d\n", written_size);
	  ((uint8_t*)&ptr16)[0] = *(ptr+2);
	  ((uint8_t*)&ptr16)[1] = *(ptr+3);
	  printf("CC adsasd%d\n", ptr16 );
	  printByte(ptr);
	  printByte(ptr+1);
	  //printf("Second Byte %d\n", *(ptr+1));
	  
	  parseStream(ptr, written_size);
	  
	  printf("Version %d\n",(*(ptr) & 0xC0) >> 6);
	  printf("Padding %d\n",(*(ptr) & 0x20) >  0);
	  printf("Ext %d\n",(*(ptr) & 0x10) >  0);
	  printf("CC %d\n",(*(ptr) & 0xF));
	  printf("Marker %d\n",(*(ptr+1) & 0x80) > 0);
	  printf("Type %u\n",(*(ptr+1)));
	  printf("Seq %d\n",(*((uint16_t*)((uint8_t*)ptr+2))));
	  ret = avio_open_dyn_buf(&out_ctx->pb);
	  out_ctx->pb->max_packet_size = 1514;
        }
        
        av_free_packet(&pkt);
    }
Example #19
0
static int sap_read_header(AVFormatContext *s)
{
    struct SAPState *sap = s->priv_data;
    char host[1024], path[1024], url[1024];
    uint8_t recvbuf[RTP_MAX_PACKET_LENGTH];
    int port;
    int ret, i;
    AVInputFormat* infmt;

    if (!ff_network_init())
        return AVERROR(EIO);

    av_url_split(NULL, 0, NULL, 0, host, sizeof(host), &port,
                 path, sizeof(path), s->filename);
    if (port < 0)
        port = 9875;

    if (!host[0]) {
        /* Listen for announcements on sap.mcast.net if no host was specified */
        av_strlcpy(host, "224.2.127.254", sizeof(host));
    }

    ff_url_join(url, sizeof(url), "udp", NULL, host, port, "?localport=%d",
                port);
    ret = ffurl_open_whitelist(&sap->ann_fd, url, AVIO_FLAG_READ,
                               &s->interrupt_callback, NULL,
                               s->protocol_whitelist, s->protocol_blacklist);
    if (ret)
        goto fail;

    while (1) {
        int addr_type, auth_len;
        int pos;

        ret = ffurl_read(sap->ann_fd, recvbuf, sizeof(recvbuf) - 1);
        if (ret == AVERROR(EAGAIN))
            continue;
        if (ret < 0)
            goto fail;
        recvbuf[ret] = '\0'; /* Null terminate for easier parsing */
        if (ret < 8) {
            av_log(s, AV_LOG_WARNING, "Received too short packet\n");
            continue;
        }

        if ((recvbuf[0] & 0xe0) != 0x20) {
            av_log(s, AV_LOG_WARNING, "Unsupported SAP version packet "
                                      "received\n");
            continue;
        }

        if (recvbuf[0] & 0x04) {
            av_log(s, AV_LOG_WARNING, "Received stream deletion "
                                      "announcement\n");
            continue;
        }
        addr_type = recvbuf[0] & 0x10;
        auth_len  = recvbuf[1];
        sap->hash = AV_RB16(&recvbuf[2]);
        pos = 4;
        if (addr_type)
            pos += 16; /* IPv6 */
        else
            pos += 4; /* IPv4 */
        pos += auth_len * 4;
        if (pos + 4 >= ret) {
            av_log(s, AV_LOG_WARNING, "Received too short packet\n");
            continue;
        }
#define MIME "application/sdp"
        if (strcmp(&recvbuf[pos], MIME) == 0) {
            pos += strlen(MIME) + 1;
        } else if (strncmp(&recvbuf[pos], "v=0\r\n", 5) == 0) {
            // Direct SDP without a mime type
        } else {
            av_log(s, AV_LOG_WARNING, "Unsupported mime type %s\n",
                                      &recvbuf[pos]);
            continue;
        }

        sap->sdp = av_strdup(&recvbuf[pos]);
        break;
    }

    av_log(s, AV_LOG_VERBOSE, "SDP:\n%s\n", sap->sdp);
    ffio_init_context(&sap->sdp_pb, sap->sdp, strlen(sap->sdp), 0, NULL, NULL,
                  NULL, NULL);

    infmt = av_find_input_format("sdp");
    if (!infmt)
        goto fail;
    sap->sdp_ctx = avformat_alloc_context();
    if (!sap->sdp_ctx) {
        ret = AVERROR(ENOMEM);
        goto fail;
    }
    sap->sdp_ctx->max_delay = s->max_delay;
    sap->sdp_ctx->pb        = &sap->sdp_pb;
    sap->sdp_ctx->interrupt_callback = s->interrupt_callback;

    if ((ret = ff_copy_whiteblacklists(sap->sdp_ctx, s)) < 0)
        goto fail;

    ret = avformat_open_input(&sap->sdp_ctx, "temp.sdp", infmt, NULL);
    if (ret < 0)
        goto fail;
    if (sap->sdp_ctx->ctx_flags & AVFMTCTX_NOHEADER)
        s->ctx_flags |= AVFMTCTX_NOHEADER;
    for (i = 0; i < sap->sdp_ctx->nb_streams; i++) {
        AVStream *st = avformat_new_stream(s, NULL);
        if (!st) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }
        st->id = i;
        avcodec_parameters_copy(st->codecpar, sap->sdp_ctx->streams[i]->codecpar);
        st->time_base = sap->sdp_ctx->streams[i]->time_base;
    }

    return 0;

fail:
    sap_read_close(s);
    return ret;
}
Example #20
0
static int hls_read_header(AVFormatContext *s)
{
    URLContext *u = (s->flags & AVFMT_FLAG_CUSTOM_IO) ? NULL : s->pb->opaque;
    HLSContext *c = s->priv_data;
    int ret = 0, i, j, stream_offset = 0;

    c->interrupt_callback = &s->interrupt_callback;

    // if the URL context is good, read important options we must broker later
    if (u && u->prot->priv_data_class) {
        // get the previous user agent & set back to null if string size is zero
        av_freep(&c->user_agent);
        av_opt_get(u->priv_data, "user-agent", 0, (uint8_t**)&(c->user_agent));
        if (c->user_agent && !strlen(c->user_agent))
            av_freep(&c->user_agent);

        // get the previous cookies & set back to null if string size is zero
        av_freep(&c->cookies);
        av_opt_get(u->priv_data, "cookies", 0, (uint8_t**)&(c->cookies));
        if (c->cookies && !strlen(c->cookies))
            av_freep(&c->cookies);
    }

    if ((ret = parse_playlist(c, s->filename, NULL, s->pb)) < 0)
        goto fail;

    if (c->n_variants == 0) {
        av_log(NULL, AV_LOG_WARNING, "Empty playlist\n");
        ret = AVERROR_EOF;
        goto fail;
    }
    /* If the playlist only contained variants, parse each individual
     * variant playlist. */
    if (c->n_variants > 1 || c->variants[0]->n_segments == 0) {
        for (i = 0; i < c->n_variants; i++) {
            struct variant *v = c->variants[i];
            if ((ret = parse_playlist(c, v->url, v, NULL)) < 0)
                goto fail;
        }
    }

    if (c->variants[0]->n_segments == 0) {
        av_log(NULL, AV_LOG_WARNING, "Empty playlist\n");
        ret = AVERROR_EOF;
        goto fail;
    }

    /* If this isn't a live stream, calculate the total duration of the
     * stream. */
    if (c->variants[0]->finished) {
        int64_t duration = 0;
        for (i = 0; i < c->variants[0]->n_segments; i++)
            duration += round(c->variants[0]->segments[i]->duration * AV_TIME_BASE);
        s->duration = duration;
    }

    /* Open the demuxer for each variant */
    for (i = 0; i < c->n_variants; i++) {
        struct variant *v = c->variants[i];
        AVInputFormat *in_fmt = NULL;
        char bitrate_str[20];
        AVProgram *program = NULL;
        if (v->n_segments == 0)
            continue;

        if (!(v->ctx = avformat_alloc_context())) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }

        v->index  = i;
        v->needed = 1;
        v->parent = s;

        /* If this is a live stream with more than 3 segments, start at the
         * third last segment. */
        v->cur_seq_no = v->start_seq_no;
        if (!v->finished && v->n_segments > 3)
            v->cur_seq_no = v->start_seq_no + v->n_segments - 3;

        v->read_buffer = av_malloc(INITIAL_BUFFER_SIZE);
        ffio_init_context(&v->pb, v->read_buffer, INITIAL_BUFFER_SIZE, 0, v,
                          read_data, NULL, NULL);
        v->pb.seekable = 0;
        ret = av_probe_input_buffer(&v->pb, &in_fmt, v->segments[0]->url,
                                    NULL, 0, 0);
        if (ret < 0) {
            /* Free the ctx - it isn't initialized properly at this point,
             * so avformat_close_input shouldn't be called. If
             * avformat_open_input fails below, it frees and zeros the
             * context, so it doesn't need any special treatment like this. */
            av_log(s, AV_LOG_ERROR, "Error when loading first segment '%s'\n", v->segments[0]->url);
            avformat_free_context(v->ctx);
            v->ctx = NULL;
            goto fail;
        }
        v->ctx->pb       = &v->pb;
        ret = avformat_open_input(&v->ctx, v->segments[0]->url, in_fmt, NULL);
        if (ret < 0)
            goto fail;

        v->stream_offset = stream_offset;
        v->ctx->ctx_flags &= ~AVFMTCTX_NOHEADER;
        ret = avformat_find_stream_info(v->ctx, NULL);
        if (ret < 0)
            goto fail;
        snprintf(bitrate_str, sizeof(bitrate_str), "%d", v->bandwidth);

        /* Create new AVprogram for variant i */
        program = av_new_program(s, i);
        if (!program)
            goto fail;
        av_dict_set(&program->metadata, "variant_bitrate", bitrate_str, 0);

        /* Create new AVStreams for each stream in this variant */
        for (j = 0; j < v->ctx->nb_streams; j++) {
            AVStream *st = avformat_new_stream(s, NULL);
            AVStream *ist = v->ctx->streams[j];
            if (!st) {
                ret = AVERROR(ENOMEM);
                goto fail;
            }
            ff_program_add_stream_index(s, i, stream_offset + j);
            st->id = i;
            avpriv_set_pts_info(st, ist->pts_wrap_bits, ist->time_base.num, ist->time_base.den);
            avcodec_copy_context(st->codec, v->ctx->streams[j]->codec);
            if (v->bandwidth)
                av_dict_set(&st->metadata, "variant_bitrate", bitrate_str,
                                 0);
        }
        stream_offset += v->ctx->nb_streams;
    }

    c->first_packet = 1;
    c->first_timestamp = AV_NOPTS_VALUE;
    c->seek_timestamp  = AV_NOPTS_VALUE;

    return 0;
fail:
    free_variant_list(c);
    return ret;
}
int main(int argc, char *argv[]) {
	// Decoder local variable declaration
	AVFormatContext *pFormatCtx = NULL;
	int i, videoStream;
	AVCodecContext *pCodecCtx = NULL;
	AVCodec *pCodec;
	AVFrame *pFrame;
	AVPacket packet;
	int frameFinished;

	// Encoder local variable declaration
	const char *filename;
	AVOutputFormat *fmt;
	AVFormatContext *oc;
	AVStream *video_st;
	AVCodec *video_codec;
	int ret, frame_count;
	StreamInfo sInfo;

	// Register all formats, codecs and network
	av_register_all();
	avcodec_register_all();
	avformat_network_init();

	// Open video file
	if (avformat_open_input(&pFormatCtx, "input_file.wmv", NULL, NULL) != 0)
		return -1; // Couldn't open file

	// Retrieve stream information
	if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
		return -1; // Couldn't find stream information

	// Dump information about file onto standard error
	av_dump_format(pFormatCtx, 0, "input_file.wmv", 0);

	// Find the first video stream
	videoStream = -1;
	for (i = 0; i < pFormatCtx->nb_streams; i++)
		if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
			videoStream = i;
			break;
		}
	if (videoStream == -1)
		return -1; // Didn't find a video stream

	// Get a pointer to the codec context for the video stream
	pCodecCtx = pFormatCtx->streams[videoStream]->codec;

	// Find the decoder for the video stream
	pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
	if (pCodec == NULL) {
		fprintf(stderr, "Unsupported codec!\n");
		return -1; // Codec not found
	}
	// Open codec (decoder)
	if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
		return -1; // Could not open codec

	// Allocate video frame
	pFrame = avcodec_alloc_frame();

	// Setup mux
	filename = "output_file.flv";
	
	// To stream to a media server (e.g. FMS)
	// filename = "rtmp://chineseforall.org/live/beta";
	
	fmt = av_guess_format("flv", filename, NULL);
	if (fmt == NULL) {
		printf("Could not guess format.\n");
		return -1;
	}
	// allocate the output media context
	oc = avformat_alloc_context();
	if (oc == NULL) {
		printf("could not allocate context.\n");
		return -1;
	}

	// Set output format context to the format ffmpeg guessed
	oc->oformat = fmt;

	// Add the video stream using the h.264
	// codec and initialize the codec.
	video_st = NULL;
	sInfo.width = pFormatCtx->streams[i]->codec->width;
	sInfo.height = pFormatCtx->streams[i]->codec->height;
	sInfo.pix_fmt = AV_PIX_FMT_YUV420P;
	sInfo.frame_rate = 30;
	sInfo.bitrate = 450*1000;
	video_st = add_stream(oc, &video_codec, AV_CODEC_ID_H264, &sInfo);

	// Now that all the parameters are set, we can open the audio and
	// video codecs and allocate the necessary encode buffers.
	if (video_st)
		open_video(oc, video_codec, video_st);

	/* open the output file, if needed */
	if (!(fmt->flags & AVFMT_NOFILE)) {
		ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
		if (ret < 0) {
			fprintf(stderr, "Could not open '%s': %s\n", filename, av_err2str(ret));
			return 1;
		}
	}

	// dump output format
	av_dump_format(oc, 0, filename, 1);

	// Write the stream header, if any.
	ret = avformat_write_header(oc, NULL);
	if (ret < 0) {
		fprintf(stderr, "Error occurred when opening output file: %s\n", av_err2str(ret));
		return 1;
	}

	// Read frames, decode, and re-encode
	frame_count = 1;
	while (av_read_frame(pFormatCtx, &packet) >= 0) {
		// Is this a packet from the video stream?
		if (packet.stream_index == videoStream) {
			// Decode video frame
			avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

			// Did we get a video frame?
			if (frameFinished) {

				// Initialize a new frame
				AVFrame* newFrame = avcodec_alloc_frame();

				int size = avpicture_get_size(video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height);
				uint8_t* picture_buf = av_malloc(size);

				avpicture_fill((AVPicture *) newFrame, picture_buf, video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height);

				// Copy only the frame content without additional fields
				av_picture_copy((AVPicture*) newFrame, (AVPicture*) pFrame, video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height);

				// encode the image
				AVPacket pkt;
				int got_output;
				av_init_packet(&pkt);
				pkt.data = NULL; // packet data will be allocated by the encoder
				pkt.size = 0;

				// Set the frame's pts (this prevents the warning notice 'non-strictly-monotonic PTS'
				newFrame->pts = frame_count;

				ret = avcodec_encode_video2(video_st->codec, &pkt, newFrame, &got_output);
				if (ret < 0) {
					fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
					exit(1);
				}

				if (got_output) {
					if (video_st->codec->coded_frame->key_frame)
						pkt.flags |= AV_PKT_FLAG_KEY;
					pkt.stream_index = video_st->index;

					if (pkt.pts != AV_NOPTS_VALUE)
						pkt.pts = av_rescale_q(pkt.pts, video_st->codec->time_base, video_st->time_base);
					if (pkt.dts != AV_NOPTS_VALUE)
						pkt.dts = av_rescale_q(pkt.dts, video_st->codec->time_base, video_st->time_base);

					// Write the compressed frame to the media file.
					ret = av_interleaved_write_frame(oc, &pkt);
				} else {
					ret = 0;
				}
				if (ret != 0) {
					fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
					exit(1);
				}

				fprintf(stderr, "encoded frame #%d\n", frame_count);
				frame_count++;

				// Free the YUV picture frame we copied from the
				// decoder to eliminate the additional fields
				// and other packets/frames used
				av_free(picture_buf);
				av_free_packet(&pkt);
				av_free(newFrame);
			}
		}

		// Free the packet that was allocated by av_read_frame
		av_free_packet(&packet);
	}

	/* Write the trailer, if any. The trailer must be written before you
	 * close the CodecContexts open when you wrote the header; otherwise
	 * av_write_trailer() may try to use memory that was freed on
	 * av_codec_close(). */
	av_write_trailer(oc);

	/* Close the video codec (encoder) */
	if (video_st)
		close_video(oc, video_st);
	// Free the output streams.
	for (i = 0; i < oc->nb_streams; i++) {
		av_freep(&oc->streams[i]->codec);
		av_freep(&oc->streams[i]);
	}
	if (!(fmt->flags & AVFMT_NOFILE))
		/* Close the output file. */
		avio_close(oc->pb);
	/* free the output format context */
	av_free(oc);

	// Free the YUV frame populated by the decoder
	av_free(pFrame);

	// Close the video codec (decoder)
	avcodec_close(pCodecCtx);

	// Close the input video file
	avformat_close_input(&pFormatCtx);

	return 0;
}
Example #22
0
int main( int argc, char *argv[] )
{
	AVFormatContext *pFormatCtx;
	int				i, audioStream;
	AVCodecContext	*pCodecCtx;
	AVCodec			*pCodec;
	AVPacket		*packet;
	uint8_t			*out_buffer;
	AVFrame			*pFrame;
	SDL_AudioSpec	wanted_spec;
	int ret;
	uint32_t len = 0;
	int got_picture;
	int index = 0;
	int64_t in_channel_layout;
	struct SwrContext* au_conert_ctx;

	FILE* pFile = NULL;
	char url[] = "F:/video/6s_kapian.flv";

	av_register_all();
	pFormatCtx = avformat_alloc_context();

	//Open
	if( avformat_open_input(&pFormatCtx, url, NULL, NULL) != 0 )
	{
		printf("Couldn't open input stream.\n");
		return -1;
	}

	//Find Stream information
	if( avformat_find_stream_info(pFormatCtx, NULL) < 0 )
	{
		printf("Couldn't find stream information.\n");
		return -1;
	}
	//Dump valid information into standard error
	av_dump_format(pFormatCtx, 0, url, false);

	//Find the first audio stream
	audioStream = -1;
	for( i = 0; i < pFormatCtx->nb_streams; ++i	)
		if( pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO )
		{
			audioStream = i;
			break;
		}

	if( audioStream == -1 )
	{
		printf("Didn't find a audio stream.\n");
		return -1;
	}

	//Get a pointer to the codec contex for the audio stream
	pCodecCtx = pFormatCtx->streams[audioStream]->codec;

	//Find the decoder for the audio stream
	pCodec = avcodec_find_decoder( pCodecCtx->codec_id );

	if( pCodec == NULL )
	{
		printf("Codec not found.\n");
		return -1;
	}

	//Open codec
	if( avcodec_open2(pCodecCtx, pCodec, NULL) < 0 )
	{
		printf("Could not open codec.\n");
		return -1;
	}

#if OUTPUT_PCM
	pFile=fopen("output.pcm", "wb");
#endif

	packet = (AVPacket *)av_malloc( sizeof(packet) );
	av_init_packet(packet);

	//Out Audio Param
	uint64_t out_channel_layout = AV_CH_LAYOUT_STEREO;
	//nb_samples:
	int out_nb_samples = pCodecCtx->frame_size;
	AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16;
	int out_sample_rate = 44100;
	int out_channels = av_get_channel_layout_nb_channels(out_channel_layout);

	//Out buffer size
	int out_buffer_size = av_samples_get_buffer_size( NULL, out_channels, out_nb_samples, out_sample_fmt, 1 );

	out_buffer = (uint8_t *)av_malloc( MAX_AUDIO_FRAME_SIZE );
	pFrame = av_frame_alloc();

	//SDL------------------
#if USE_SDL
	//Init
	if(SDL_Init( SDL_INIT_AUDIO | SDL_INIT_TIMER)) {  
		printf( "Could not initialize SDL - %s\n", SDL_GetError()); 
		return -1;
	}
	//SDL_AudioSpec
	wanted_spec.freq = out_sample_rate; 
	wanted_spec.format = AUDIO_S16SYS; 
	wanted_spec.channels = out_channels; 
	wanted_spec.silence = 0; 
	wanted_spec.samples = out_nb_samples; 
	wanted_spec.callback = fill_audio; 
	wanted_spec.userdata = pCodecCtx; 

	if (SDL_OpenAudio(&wanted_spec, NULL)<0){ 
		printf("can't open audio.\n"); 
		return -1; 
	} 
#endif

	in_channel_layout = av_get_default_channel_layout(pCodecCtx->channels);
	//swr

	au_conert_ctx = swr_alloc();
	au_conert_ctx = swr_alloc_set_opts( au_conert_ctx, out_channel_layout, out_sample_fmt, out_sample_rate,
		in_channel_layout, pCodecCtx->sample_fmt, pCodecCtx->sample_rate, 0, NULL);
	swr_init(au_conert_ctx);

	while( av_read_frame(pFormatCtx, packet) >= 0 ){
		if( packet->stream_index == audioStream ){
			ret = avcodec_decode_audio4(pCodecCtx, pFrame, &got_picture, packet);
			if( ret < 0 ){
				printf("Error in decoding audio frame.\n");
				return -1;
			}//end if
			if( got_picture > 0 ){
				swr_convert( au_conert_ctx, &out_buffer, MAX_AUDIO_FRAME_SIZE, (const uint8_t **)pFrame->data, pFrame->nb_samples );
#if 1
				printf("index:%5d\t pts:%lld\t packet size:%d\n",index,packet->pts,packet->size);
#endif
#if OUTPUT_PCM
				//Write PCM
				fwrite(out_buffer, 1, out_buffer_size, pFile);
#endif
				index++;
			}//end if
#if USE_SDL
			while(audio_len>0)//Wait until finish
				SDL_Delay(1); 

			//Set audio buffer (PCM data)
			audio_chunk = (Uint8 *) out_buffer; 
			//Audio buffer length
			audio_len =out_buffer_size;
			audio_pos = audio_chunk;

			//Play
			SDL_PauseAudio(0);
#endif
		}//end if
		av_free_packet(packet);
	}//end while

	swr_free(&au_conert_ctx);

#if USE_SDL
	SDL_CloseAudio();//Close SDL
	SDL_Quit();
#endif
	// Close file
#if OUTPUT_PCM
	fclose(pFile);
#endif
	av_free(out_buffer);
	// Close the codec
	avcodec_close(pCodecCtx);
	// Close the video file
	avformat_close_input(&pFormatCtx);

	return 0;
}
Example #23
0
int main(int argc, char* argv[])
{
#ifdef _DEBUG
	_CrtSetDbgFlag ( _CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF );
	//_CrtSetBreakAlloc(1166);
#endif

	char in_url[500]={0};
	char out_url[500]={0};
	int limit_num=0;
	int width=0;
	int height=0;
	bool limit_is=false;
	bool graphically_ti=false;
	bool graphically_si=false;
	bool isinterval=true;
	int intervalcnt=5;
	//接收参数------------------
	extern char *optarg;
	int opt;
	//--------------------------
	if(argc==1){
		usage();
		return 0;
	}

	while ((opt =getopt(argc, argv,"i:o:g:l:n:x:y:h")) != -1)
	{
		switch (opt)
		{
		case 'h':{
			usage();
			return 0;
				 }
		case 'i':{
			strcpy(in_url,optarg);
			break;
				 }
		case 'o':{
			strcpy(out_url,optarg);
			break;
				 }
		case 'l':{
			limit_num=atoi(optarg);
			limit_is=true;
			break;
				 }
		case 'n':{
			intervalcnt=atoi(optarg);
			break;
				 }
		case 'g':{
			if(strcmp(optarg,"ti")==0){
				graphically_ti=true;
			}else if(strcmp(optarg,"si")==0){
				graphically_si=true;
			}
			break;
				 }
		case 'x':{
			width=atoi(optarg);
			break;
				 }
		case 'y':{
			height=atoi(optarg);
			break;
				 }
		default:
			printf("Unknown: %c\n", opt);
			usage();
			return 0;
		}
	}

	if(strcmp(in_url,"")==0){
		printf("Input Video's URL is not set. Exit.\n");
		return 0;
	}

	if(strcmp(out_url,"")==0){
		printf("Output .csv file is not set. Default is {Input Name}.csv\n");
		char *suffix=strchr(in_url, '.');
		*suffix='\0';
		strcpy(out_url,in_url);
		*suffix='.';
		sprintf(out_url,"%s.csv",out_url);
	}
	
	AVFormatContext	*pFormatCtx;
	int				i, video_stream,audio_stream;
	AVCodecContext	*pCodecCtx,*pCodecCtx_au;
	AVCodec			*pCodec,*pCodec_au;

	av_register_all();
	pFormatCtx = avformat_alloc_context();
	if(avformat_open_input(&pFormatCtx,in_url,NULL,NULL)!=0){
		printf("Couldn't open file.\n");
		return FALSE;
	}
	if(strcmp(pFormatCtx->iformat->name,"rawvideo")!=0)
	{
		if(av_find_stream_info(pFormatCtx)<0)
		{
			printf("Couldn't find stream information.\n");
			return FALSE;
		}
	}
	
	video_stream=-1;
	audio_stream=-1;
	for(i=0; i<pFormatCtx->nb_streams; i++) {
		if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
			video_stream=i;
		}if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO){
			audio_stream=i;
		}
	}
	if(video_stream==-1)
	{
		printf("Didn't find a video stream.\n");
		return FALSE;
	}
	if(video_stream!=-1){

		pCodecCtx=pFormatCtx->streams[video_stream]->codec;
		pCodecCtx->width=width;
		pCodecCtx->height=height;
		pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
		if(pCodec==NULL)
		{
			printf("Codec not found.\n");
			return FALSE;
		}
		if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)
		{
			printf("Could not open codec.\n");
			return FALSE;
		}

		//------------SDL----------------
		SDLParam sdlparam={NULL,NULL,{0,0,0,0},graphically_ti,graphically_si,isinterval,NULL,NULL,0,0,0,0};
		if(graphically_ti==true||graphically_si==true){
			sdlparam.graphically_si=graphically_si;
			sdlparam.graphically_ti=graphically_ti;
			sdlparam.show_w=pCodecCtx->width-2*PADDING;
			sdlparam.show_h=pCodecCtx->height-2*PADDING;
			sdlparam.pixel_w=pCodecCtx->width-2*PADDING;
			sdlparam.pixel_h=pCodecCtx->height-2*PADDING;
			//FIX
			sdlparam.show_YBuffer=(char *)malloc(sdlparam.pixel_w*sdlparam.pixel_h);
			sdlparam.show_UVBuffer=(char *)malloc(sdlparam.pixel_w*sdlparam.pixel_h/2);
			memset(sdlparam.show_UVBuffer,0x80,sdlparam.pixel_w*sdlparam.pixel_h/2);

			SDL_Thread *video_tid = SDL_CreateThread(show_thread,&sdlparam);
		}

		//---------------
		float* silist=(float*)malloc(FRAMENUM*sizeof(float));
		float* tilist=(float*)malloc((FRAMENUM-1)*sizeof(float));
		float* old_silist;
		float* old_tilist;

		AVFrame	*pFrame,*pFrameYUV;
		pFrame=avcodec_alloc_frame();
		pFrameYUV=avcodec_alloc_frame();
		uint8_t *out_buffer;
		out_buffer=(uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));
		avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);

		int ret, got_picture;
		static struct SwsContext *img_convert_ctx;
		int y_size = pCodecCtx->width * pCodecCtx->height;

		AVPacket *packet=(AVPacket *)av_malloc(sizeof(AVPacket));
		av_new_packet(packet, y_size);
		
		//计算TI的时候使用
		int prev_has=0;
		uint8_t *prev_ydata=(uint8_t *)av_malloc(pCodecCtx->width*pCodecCtx->height);

		img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL); 
		
		//打开文件
		FILE *fp=fopen(out_url,"wb+");
		fprintf(fp,"TI,SI\n");
		
		//记个数
		int framecnt=0;
		int realloc_time=1;
		while(av_read_frame(pFormatCtx, packet)>=0&&(framecnt<limit_num||!limit_is))
		{
			if(packet->stream_index==video_stream)
			{
				ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
				if(ret < 0)
				{
					printf("Decode Error.\n");
					return -1;
				}
				if(got_picture)
				{
					sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
					//有前面的帧,才能计算TI
					if(prev_has==1){
						if(framecnt%intervalcnt==0){
							sdlparam.isinterval=false;
						}else{
							sdlparam.isinterval=true;
						}
						float ti=0,si=0;
						int retval=tisi((char *)pFrameYUV->data[0],(char *)prev_ydata,pCodecCtx->width,pCodecCtx->height,sdlparam,ti,si);
						if(retval==-1)
							break;

						if(framecnt>=FRAMENUM*realloc_time)
						{
							realloc_time++;
							old_tilist=tilist;
							old_silist=silist;
							if( (tilist = (float*)realloc( tilist, (FRAMENUM*realloc_time-1)*sizeof(float)))==NULL)								
							{
								free( old_tilist );  // free original block
								return -1;
							}
							if( (silist = (float*)realloc( silist, (FRAMENUM*realloc_time)*sizeof(float)))==NULL)								
							{
								free( old_silist );  // free original block
								return -1;
							}
						}
						tilist[framecnt]=ti;
						silist[framecnt]=si;
						printf("%f,%f\n",ti,si);
						fprintf(fp,"%f,%f\n",ti,si);
						framecnt++;
					}else{
						prev_has=1;
					}
					//拷贝亮度数据
					memcpy(prev_ydata,pFrameYUV->data[0],pCodecCtx->width*pCodecCtx->height);
				}
			}
			av_free_packet(packet);
		}
		sws_freeContext(img_convert_ctx);
		
		//计算平均值和最大值
		float sum=0;
		for (int i=0;i<framecnt;i++)
			sum +=silist[i];
		float avg_si=sum/framecnt;
		qsort(silist,FRAMENUM*realloc_time,sizeof(float),comp);
		float max_si=silist[FRAMENUM*realloc_time-1];

		sum=0;
		for (int i=0;i<framecnt-1;i++)
			sum +=tilist[i];
		float avg_ti=sum/(framecnt-1);
		qsort(tilist,(FRAMENUM*realloc_time-1),sizeof(float),comp);
		float max_ti=tilist[FRAMENUM*realloc_time-2];

		fprintf(fp,"TI_AVG,SI_AVG\n");
		fprintf(fp,"%f,%f\n",avg_ti,avg_si);
		fprintf(fp,"TI_MAX,SI_MAX\n");
		fprintf(fp,"%f,%f\n",max_ti,max_si);
		fclose(fp);

		av_free(out_buffer);
		av_free(pFrameYUV);
		avcodec_close(pCodecCtx);

		if(graphically_ti==true||graphically_si==true){
			free(sdlparam.show_YBuffer);
			free(sdlparam.show_UVBuffer);
			SDL_Event event;
			event.type=SDL_QUIT;
			SDL_PushEvent(&event);
		}
		delete silist;
		delete tilist;
	}
	avformat_close_input(&pFormatCtx);
	

#ifdef _DEBUG
	_CrtDumpMemoryLeaks();//调试运行到该步,输出检测信息
#endif

	return 0;
}
Example #24
0
static int init(struct dec_audio *da, const char *decoder)
{
    struct spdifContext *spdif_ctx = talloc_zero(NULL, struct spdifContext);
    da->priv = spdif_ctx;
    spdif_ctx->log = da->log;

    AVFormatContext *lavf_ctx  = avformat_alloc_context();
    if (!lavf_ctx)
        goto fail;

    lavf_ctx->oformat = av_guess_format("spdif", NULL, NULL);
    if (!lavf_ctx->oformat)
        goto fail;

    spdif_ctx->lavf_ctx = lavf_ctx;

    void *buffer = av_mallocz(OUTBUF_SIZE);
    if (!buffer)
        abort();
    lavf_ctx->pb = avio_alloc_context(buffer, OUTBUF_SIZE, 1, spdif_ctx, NULL,
                                      write_packet, NULL);
    if (!lavf_ctx->pb) {
        av_free(buffer);
        goto fail;
    }

    // Request minimal buffering (not available on Libav)
#if LIBAVFORMAT_VERSION_MICRO >= 100
    lavf_ctx->pb->direct = 1;
#endif

    AVStream *stream = avformat_new_stream(lavf_ctx, 0);
    if (!stream)
        goto fail;

    stream->codec->codec_id = mp_codec_to_av_codec_id(decoder);

    AVDictionary *format_opts = NULL;

    int num_channels = 0;
    int sample_format = 0;
    int samplerate = 0;
    switch (stream->codec->codec_id) {
    case AV_CODEC_ID_AAC:
        sample_format                   = AF_FORMAT_S_AAC;
        samplerate                      = 48000;
        num_channels                    = 2;
        break;
    case AV_CODEC_ID_AC3:
        sample_format                   = AF_FORMAT_S_AC3;
        samplerate                      = 48000;
        num_channels                    = 2;
        break;
    case AV_CODEC_ID_DTS:
        if (da->opts->dtshd) {
            av_dict_set(&format_opts, "dtshd_rate", "768000", 0); // 4*192000
            sample_format                   = AF_FORMAT_S_DTSHD;
            samplerate                      = 192000;
            num_channels                    = 2*4;
        } else {
            sample_format                   = AF_FORMAT_S_DTS;
            samplerate                      = 48000;
            num_channels                    = 2;
        }
        break;
    case AV_CODEC_ID_EAC3:
        sample_format                   = AF_FORMAT_S_EAC3;
        samplerate                      = 192000;
        num_channels                    = 2;
        break;
    case AV_CODEC_ID_MP3:
        sample_format                   = AF_FORMAT_S_MP3;
        samplerate                      = 48000;
        num_channels                    = 2;
        break;
    case AV_CODEC_ID_TRUEHD:
        sample_format                   = AF_FORMAT_S_TRUEHD;
        samplerate                      = 192000;
        num_channels                    = 8;
        break;
    default:
        abort();
    }
    mp_audio_set_num_channels(&spdif_ctx->fmt, num_channels);
    mp_audio_set_format(&spdif_ctx->fmt, sample_format);
    spdif_ctx->fmt.rate = samplerate;

    if (avformat_write_header(lavf_ctx, &format_opts) < 0) {
        MP_FATAL(da, "libavformat spdif initialization failed.\n");
        av_dict_free(&format_opts);
        goto fail;
    }
    av_dict_free(&format_opts);

    spdif_ctx->need_close = true;

    return 1;

fail:
    uninit(da);
    return 0;
}
Example #25
0
int main(int argc, char *argv[])
{
    IDeckLinkIterator *deckLinkIterator = CreateDeckLinkIteratorInstance();
    DeckLinkCaptureDelegate *delegate;
    BMDDisplayMode selectedDisplayMode = bmdModeNTSC;
    int displayModeCount               = 0;
    int exitStatus                     = 1;
    int aconnection                    = 0, vconnection = 0, camera = 0, i = 0;
    int ch;
    AVDictionary *opts = NULL;
    BMDPixelFormat pix = bmdFormat8BitYUV;
    HRESULT result;
    pthread_t th;

    pthread_mutex_init(&sleepMutex, NULL);
    pthread_cond_init(&sleepCond, NULL);
    av_register_all();

    if (!deckLinkIterator) {
        fprintf(stderr,
                "This application requires the DeckLink drivers installed.\n");
        goto bail;
    }

    // Parse command line options
    while ((ch = getopt(argc, argv, "?hvc:s:f:a:m:n:p:M:F:C:A:V:o:w:S:d:")) != -1) {
        switch (ch) {
        case 'v':
            g_verbose = true;
            break;
        case 'm':
            g_videoModeIndex = atoi(optarg);
            break;
        case 'c':
            g_audioChannels = atoi(optarg);
            if (g_audioChannels != 2 &&
                g_audioChannels != 8 &&
                g_audioChannels != 16) {
                fprintf(
                    stderr,
                    "Invalid argument: Audio Channels must be either 2, 8 or 16\n");
                goto bail;
            }
            break;
        case 's':
            g_audioSampleDepth = atoi(optarg);
            switch (g_audioSampleDepth) {
            case 16:
                sample_fmt = AV_SAMPLE_FMT_S16;
                break;
            case 32:
                sample_fmt = AV_SAMPLE_FMT_S32;
                break;
            default:
                fprintf(stderr,
                        "Invalid argument:"
                        " Audio Sample Depth must be either 16 bits"
                        " or 32 bits\n");
                goto bail;
            }
            break;
        case 'p':
            switch (atoi(optarg)) {
            case  8:
                pix     = bmdFormat8BitYUV;
                pix_fmt = AV_PIX_FMT_UYVY422;
                break;
            case 10:
                pix     = bmdFormat10BitYUV;
                pix_fmt = AV_PIX_FMT_YUV422P10;
                break;
            default:
                if (!strcmp("rgb10", optarg)) {
                    pix     = bmdFormat10BitRGB;
                    pix_fmt = AV_PIX_FMT_RGB48;
                    break;
                }
                if (!strcmp("yuv10", optarg)) {
                    pix     = bmdFormat10BitYUV;
                    pix_fmt = AV_PIX_FMT_YUV422P10;
                    break;
                }
                if (!strcmp("yuv8", optarg)) {
                    pix     = bmdFormat8BitYUV;
                    pix_fmt = AV_PIX_FMT_UYVY422;
                    break;
                }
                if (!strcmp("rgb8", optarg)) {
                    pix     = bmdFormat8BitARGB;
                    pix_fmt = AV_PIX_FMT_ARGB;
                    break;
                }

                fprintf(
                    stderr,
                    "Invalid argument: Pixel Format Depth must be either 8 bits or 10 bits\n");
                goto bail;
            }
            break;
        case 'f':
            g_videoOutputFile = optarg;
            break;
        case 'n':
            g_maxFrames = atoi(optarg);
            break;
        case 'M':
            g_memoryLimit = atoi(optarg) * 1024 * 1024 * 1024L;
            break;
        case 'F':
            fmt = av_guess_format(optarg, NULL, NULL);
            break;
        case 'A':
            aconnection = atoi(optarg);
            break;
        case 'V':
            vconnection = atoi(optarg);
            break;
        case 'C':
            camera = atoi(optarg);
            break;
        case 'S':
            serial_fd = open(optarg, O_RDWR | O_NONBLOCK);
            break;
        case 'o':
            if (av_dict_parse_string(&opts, optarg, "=", ":", 0) < 0) {
                fprintf(stderr, "Cannot parse option string %s\n",
                        optarg);
                goto bail;
            }
            break;
        case 'w':
            wallclock = true;
            break;
        case 'd':
            draw_bars = atoi(optarg);
            break;
        case '?':
        case 'h':
            usage(0);
        }
    }

    if (serial_fd > 0 && wallclock) {
        fprintf(stderr, "%s",
                "Wallclock and serial are not supported together\n"
                "Please disable either.\n");
        exit(1);
    }

    /* Connect to the first DeckLink instance */
    do
        result = deckLinkIterator->Next(&deckLink);
    while (i++ < camera);

    if (result != S_OK) {
        fprintf(stderr, "No DeckLink PCI cards found.\n");
        goto bail;
    }

    if (deckLink->QueryInterface(IID_IDeckLinkInput,
                                 (void **)&deckLinkInput) != S_OK) {
        goto bail;
    }

    result = deckLink->QueryInterface(IID_IDeckLinkConfiguration,
                                      (void **)&deckLinkConfiguration);
    if (result != S_OK) {
        fprintf(
            stderr,
            "Could not obtain the IDeckLinkConfiguration interface - result = %08x\n",
            result);
        goto bail;
    }

    result = S_OK;
    switch (aconnection) {
    case 1:
        result = DECKLINK_SET_AUDIO_CONNECTION(bmdAudioConnectionAnalog);
        break;
    case 2:
        result = DECKLINK_SET_AUDIO_CONNECTION(bmdAudioConnectionEmbedded);
        break;
    case 3:
        result = DECKLINK_SET_AUDIO_CONNECTION(bmdAudioConnectionAESEBU);
        break;
    default:
        // do not change it
        break;
    }
    if (result != S_OK) {
        fprintf(stderr, "Failed to set audio input - result = %08x\n", result);
        goto bail;
    }

    result = S_OK;
    switch (vconnection) {
    case 1:
        result = DECKLINK_SET_VIDEO_CONNECTION(bmdVideoConnectionComposite);
        break;
    case 2:
        result = DECKLINK_SET_VIDEO_CONNECTION(bmdVideoConnectionComponent);
        break;
    case 3:
        result = DECKLINK_SET_VIDEO_CONNECTION(bmdVideoConnectionHDMI);
        break;
    case 4:
        result = DECKLINK_SET_VIDEO_CONNECTION(bmdVideoConnectionSDI);
        break;
    case 5:
        result = DECKLINK_SET_VIDEO_CONNECTION(bmdVideoConnectionOpticalSDI);
        break;
    case 6:
        result = DECKLINK_SET_VIDEO_CONNECTION(bmdVideoConnectionSVideo);
        break;
    default:
        // do not change it
        break;
    }
    if (result != S_OK) {
        fprintf(stderr, "Failed to set video input - result %08x\n", result);
        goto bail;
    }

    delegate = new DeckLinkCaptureDelegate();
    deckLinkInput->SetCallback(delegate);

    // Obtain an IDeckLinkDisplayModeIterator to enumerate the display modes supported on output
    result = deckLinkInput->GetDisplayModeIterator(&displayModeIterator);
    if (result != S_OK) {
        fprintf(
            stderr,
            "Could not obtain the video output display mode iterator - result = %08x\n",
            result);
        goto bail;
    }

    if (!g_videoOutputFile) {
        fprintf(stderr,
                "Missing argument: Please specify output path using -f\n");
        goto bail;
    }

    if (!fmt) {
        fmt = av_guess_format(NULL, g_videoOutputFile, NULL);
        if (!fmt) {
            fprintf(
                stderr,
                "Unable to guess output format, please specify explicitly using -F\n");
            goto bail;
        }
    }

    if (g_videoModeIndex < 0) {
        fprintf(stderr, "No video mode specified\n");
        usage(0);
    }

    while (displayModeIterator->Next(&displayMode) == S_OK) {
        if (g_videoModeIndex == displayModeCount) {
            selectedDisplayMode = displayMode->GetDisplayMode();
            break;
        }
        displayModeCount++;
        displayMode->Release();
    }

    result = deckLinkInput->EnableVideoInput(selectedDisplayMode, pix, 0);
    if (result != S_OK) {
        fprintf(stderr,
                "Failed to enable video input. Is another application using "
                "the card?\n");
        goto bail;
    }

    result = deckLinkInput->EnableAudioInput(bmdAudioSampleRate48kHz,
                                             g_audioSampleDepth,
                                             g_audioChannels);
    if (result != S_OK) {
        fprintf(stderr,
                "Failed to enable audio input. Is another application using "
                "the card?\n");
        goto bail;
    }

    oc          = avformat_alloc_context();
    oc->oformat = fmt;

    snprintf(oc->filename, sizeof(oc->filename), "%s", g_videoOutputFile);


    switch (pix) {
    case bmdFormat8BitARGB:
    case bmdFormat8BitYUV:
        fmt->video_codec = AV_CODEC_ID_RAWVIDEO;
        break;
    case bmdFormat10BitYUV:
        fmt->video_codec = AV_CODEC_ID_V210;
        break;
    case bmdFormat10BitRGB:
        fmt->video_codec = AV_CODEC_ID_R210;
        break;
    }

    fmt->audio_codec = (sample_fmt == AV_SAMPLE_FMT_S16 ? AV_CODEC_ID_PCM_S16LE : AV_CODEC_ID_PCM_S32LE);

    video_st = add_video_stream(oc, fmt->video_codec);
    audio_st = add_audio_stream(oc, fmt->audio_codec);

    if (serial_fd > 0 || wallclock)
        data_st = add_data_stream(oc, AV_CODEC_ID_TEXT);

    if (!(fmt->flags & AVFMT_NOFILE)) {
        if (avio_open(&oc->pb, oc->filename, AVIO_FLAG_WRITE) < 0) {
            fprintf(stderr, "Could not open '%s'\n", oc->filename);
            exit(1);
        }
    }

    avformat_write_header(oc, &opts);
    avpacket_queue_init(&queue);

    result = deckLinkInput->StartStreams();
    if (result != S_OK) {
        goto bail;
    }
    // All Okay.
    exitStatus = 0;

    if (pthread_create(&th, NULL, push_packet, oc))
        goto bail;

    // Block main thread until signal occurs
    pthread_mutex_lock(&sleepMutex);
    set_signal();
    pthread_cond_wait(&sleepCond, &sleepMutex);
    pthread_mutex_unlock(&sleepMutex);
    deckLinkInput->StopStreams();
    fprintf(stderr, "Stopping Capture\n");
    avpacket_queue_end(&queue);

bail:
    if (displayModeIterator != NULL) {
        displayModeIterator->Release();
        displayModeIterator = NULL;
    }

    if (deckLinkInput != NULL) {
        deckLinkInput->Release();
        deckLinkInput = NULL;
    }

    if (deckLink != NULL) {
        deckLink->Release();
        deckLink = NULL;
    }

    if (deckLinkIterator != NULL) {
        deckLinkIterator->Release();
    }

    if (oc != NULL) {
        av_write_trailer(oc);
        if (!(fmt->flags & AVFMT_NOFILE)) {
            /* close the output file */
            avio_close(oc->pb);
        }
    }

    return exitStatus;
}
Example #26
0
static int init(sh_audio_t *sh)
{
    int i, x, in_size, srate, bps, *dtshd_rate;
    unsigned char *start;
    double pts;
    static const struct {
        const char *name;
        enum CodecID id;
    } fmt_id_type[] = {
        { "aac" , AV_CODEC_ID_AAC    },
        { "ac3" , AV_CODEC_ID_AC3    },
        { "dca" , AV_CODEC_ID_DTS    },
        { "eac3", AV_CODEC_ID_EAC3   },
        { "mpa" , AV_CODEC_ID_MP3    },
        { "thd" , AV_CODEC_ID_TRUEHD },
        { NULL  , 0 }
    };
    AVFormatContext     *lavf_ctx  = NULL;
    AVStream            *stream    = NULL;
    const AVOption      *opt       = NULL;
    struct spdifContext *spdif_ctx = NULL;

    spdif_ctx = av_mallocz(sizeof(*spdif_ctx));
    if (!spdif_ctx)
        goto fail;
    spdif_ctx->lavf_ctx = avformat_alloc_context();
    if (!spdif_ctx->lavf_ctx)
        goto fail;

    sh->context = spdif_ctx;
    lavf_ctx    = spdif_ctx->lavf_ctx;

    init_avformat();
    lavf_ctx->oformat = av_guess_format(FILENAME_SPDIFENC, NULL, NULL);
    if (!lavf_ctx->oformat)
        goto fail;
    lavf_ctx->priv_data = av_mallocz(lavf_ctx->oformat->priv_data_size);
    if (!lavf_ctx->priv_data)
        goto fail;
    lavf_ctx->pb = avio_alloc_context(spdif_ctx->pb_buffer, OUTBUF_SIZE, 1, spdif_ctx,
                                      read_packet, write_packet, seek);
    if (!lavf_ctx->pb)
        goto fail;
    stream = avformat_new_stream(lavf_ctx, 0);
    if (!stream)
        goto fail;
    lavf_ctx->duration   = AV_NOPTS_VALUE;
    lavf_ctx->start_time = AV_NOPTS_VALUE;
    for (i = 0; fmt_id_type[i].name; i++) {
        if (!strcmp(sh->codec->dll, fmt_id_type[i].name)) {
            lavf_ctx->streams[0]->codec->codec_id = fmt_id_type[i].id;
            break;
        }
    }
    lavf_ctx->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
    if (AVERROR_PATCHWELCOME == lavf_ctx->oformat->write_header(lavf_ctx)) {
        mp_msg(MSGT_DECAUDIO,MSGL_INFO,
               "This codec is not supported by spdifenc.\n");
        goto fail;
    }

    // get sample_rate & bitrate from parser
    x = ds_get_packet_pts(sh->ds, &start, &pts);
    in_size = x;
    if (x <= 0) {
        pts = MP_NOPTS_VALUE;
        x = 0;
    }
    ds_parse(sh->ds, &start, &x, pts, 0);
    srate = 48000;    //fake value
    bps   = 768000/8; //fake value
    if (x && sh->avctx) { // we have parser and large enough buffer
        if (sh->avctx->sample_rate < 44100) {
            mp_msg(MSGT_DECAUDIO,MSGL_INFO,
                   "This stream sample_rate[%d Hz] may be broken. "
                   "Force reset 48000Hz.\n",
                   sh->avctx->sample_rate);
            srate = 48000; //fake value
        } else
            srate = sh->avctx->sample_rate;
        bps = sh->avctx->bit_rate/8;
    }
    sh->ds->buffer_pos -= in_size;

    switch (lavf_ctx->streams[0]->codec->codec_id) {
    case AV_CODEC_ID_AAC:
        spdif_ctx->iec61937_packet_size = 16384;
        sh->sample_format               = AF_FORMAT_IEC61937_LE;
        sh->samplerate                  = srate;
        sh->channels                    = 2;
        sh->i_bps                       = bps;
        break;
    case AV_CODEC_ID_AC3:
        spdif_ctx->iec61937_packet_size = 6144;
        sh->sample_format               = AF_FORMAT_AC3_LE;
        sh->samplerate                  = srate;
        sh->channels                    = 2;
        sh->i_bps                       = bps;
        break;
    case AV_CODEC_ID_DTS: // FORCE USE DTS-HD
        opt = av_opt_find(&lavf_ctx->oformat->priv_class,
                          "dtshd_rate", NULL, 0, 0);
        if (!opt)
            goto fail;
        dtshd_rate                      = (int*)(((uint8_t*)lavf_ctx->priv_data) +
                                          opt->offset);
        *dtshd_rate                     = 192000*4;
        spdif_ctx->iec61937_packet_size = 32768;
        sh->sample_format               = AF_FORMAT_IEC61937_LE;
        sh->samplerate                  = 192000; // DTS core require 48000
        sh->channels                    = 2*4;
        sh->i_bps                       = bps;
        break;
    case AV_CODEC_ID_EAC3:
        spdif_ctx->iec61937_packet_size = 24576;
        sh->sample_format               = AF_FORMAT_IEC61937_LE;
        sh->samplerate                  = 192000;
        sh->channels                    = 2;
        sh->i_bps                       = bps;
        break;
    case AV_CODEC_ID_MP3:
        spdif_ctx->iec61937_packet_size = 4608;
        sh->sample_format               = AF_FORMAT_MPEG2;
        sh->samplerate                  = srate;
        sh->channels                    = 2;
        sh->i_bps                       = bps;
        break;
    case AV_CODEC_ID_TRUEHD:
        spdif_ctx->iec61937_packet_size = 61440;
        sh->sample_format               = AF_FORMAT_IEC61937_LE;
        sh->samplerate                  = 192000;
        sh->channels                    = 8;
        sh->i_bps                       = bps;
        break;
    default:
        break;
    }

    return 1;

fail:
    uninit(sh);
    return 0;
}
Example #27
0
MediaRet MediaRecorder::setup_sound_stream(const char *fname, AVOutputFormat *fmt)
{
    oc = avformat_alloc_context();
    if(!oc)
	return MRET_ERR_NOMEM;
    oc->oformat = fmt;
    strncpy(oc->filename, fname, sizeof(oc->filename) - 1);
    oc->filename[sizeof(oc->filename) - 1] = 0;
    if(fmt->audio_codec == CODEC_ID_NONE)
	return MRET_OK;

    AVCodecContext *ctx;
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53,10,0)
    aud_st = av_new_stream(oc, 1);
#else
    aud_st = avformat_new_stream(oc, NULL);
#endif
    if(!aud_st) {
	avformat_free_context(oc);
	oc = NULL;
	return MRET_ERR_NOMEM;
    }

    AVCodec *codec = avcodec_find_encoder(fmt->audio_codec);

    ctx = aud_st->codec;
    ctx->codec_id = fmt->audio_codec;
    ctx->codec_type = AVMEDIA_TYPE_AUDIO;
    // Some encoders don't like int16_t (SAMPLE_FMT_S16)
    ctx->sample_fmt = codec->sample_fmts[0];
    // This was changed in the initial ffmpeg 3.0 update,
    // but shouldn't (as far as I'm aware) cause problems with older versions
    ctx->bit_rate = 128000; // arbitrary; in case we're generating mp3
    ctx->sample_rate = soundGetSampleRate();
    ctx->channels = 2;
    ctx->time_base.den = 60;
    ctx->time_base.num = 1;
    if(fmt->flags & AVFMT_GLOBALHEADER)
	ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;

#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(53,6,0)
    if(!codec || avcodec_open(ctx, codec)) {
#else
    if(!codec || avcodec_open2(ctx, codec, NULL)) {
#endif
	avformat_free_context(oc);
	oc = NULL;
	return MRET_ERR_NOCODEC;
    }

    return MRET_OK;
}

MediaRet MediaRecorder::setup_video_stream(const char *fname, int w, int h, int d)
{
    AVCodecContext *ctx;
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53,10,0)
    vid_st = av_new_stream(oc, 0);
#else
    vid_st = avformat_new_stream(oc, NULL);
#endif
    if(!vid_st) {
	avformat_free_context(oc);
	oc = NULL;
	return MRET_ERR_NOMEM;
    }
    ctx = vid_st->codec;
    ctx->codec_id = oc->oformat->video_codec;
    ctx->codec_type = AVMEDIA_TYPE_VIDEO;
    ctx->width = w;
    ctx->height = h;
    ctx->time_base.den = 60;
    ctx->time_base.num = 1;
    // dunno if any of these help; some output just looks plain crappy
    // will have to investigate further
    ctx->bit_rate = 400000;
    ctx->gop_size = 12;
    ctx->max_b_frames = 2;
    switch(d) {
    case 16:
	// FIXME: test & make endian-neutral
	pixfmt = PIX_FMT_RGB565LE;
	break;
    case 24:
	pixfmt = PIX_FMT_RGB24;
	break;
    case 32:
    default: // should never be anything else
	pixfmt = PIX_FMT_RGBA;
	break;
    }
    ctx->pix_fmt = pixfmt;
    pixsize = d >> 3;
    linesize = pixsize * w;
    ctx->max_b_frames = 2;
    if(oc->oformat->flags & AVFMT_GLOBALHEADER)
	ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;

    AVCodec *codec = avcodec_find_encoder(oc->oformat->video_codec);
    // make sure RGB is supported (mostly not)
    if(codec->pix_fmts) {
	const enum PixelFormat *p;
#if LIBAVCODEC_VERSION_MAJOR < 55
	int64_t mask = 0;
#endif
	for(p = codec->pix_fmts; *p != -1; p++) {
	    // may get complaints about 1LL; thus the cast
#if LIBAVCODEC_VERSION_MAJOR < 55
	    mask |= ((int64_t)1) << *p;
#endif
	    if(*p == pixfmt)
		break;
	}
	if(*p == -1) {
	    // if not supported, use a converter to the next best format
	    // this is swscale, the converter used by the output demo
#if LIBAVCODEC_VERSION_MAJOR < 55
	    enum PixelFormat dp = (PixelFormat)avcodec_find_best_pix_fmt(mask, pixfmt, 0, NULL);
#else
#if LIBAVCODEC_VERSION_MICRO >= 100
// FFmpeg
		enum AVPixelFormat dp = avcodec_find_best_pix_fmt_of_list(codec->pix_fmts, pixfmt, 0, NULL);
#else
// Libav
		enum AVPixelFormat dp = avcodec_find_best_pix_fmt2(codec->pix_fmts, pixfmt, 0, NULL);
#endif
#endif
	    if(dp == -1)
		dp = codec->pix_fmts[0];
	    if(!(convpic = avcodec_alloc_frame()) ||
	       avpicture_alloc((AVPicture *)convpic, dp, w, h) < 0) {
		avformat_free_context(oc);
		oc = NULL;
		return MRET_ERR_NOMEM;
	    }
#if LIBSWSCALE_VERSION_INT < AV_VERSION_INT(0, 12, 0)
	    converter = sws_getContext(w, h, pixfmt, w, h, dp, SWS_BICUBIC,
				       NULL, NULL, NULL);
#else
	    converter = sws_alloc_context();
	    // what a convoluted, inefficient way to set options
	    av_opt_set_int(converter, "sws_flags", SWS_BICUBIC, 0);
	    av_opt_set_int(converter, "srcw", w, 0);
	    av_opt_set_int(converter, "srch", h, 0);
	    av_opt_set_int(converter, "dstw", w, 0);
	    av_opt_set_int(converter, "dsth", h, 0);
	    av_opt_set_int(converter, "src_format", pixfmt, 0);
	    av_opt_set_int(converter, "dst_format", dp, 0);
	    sws_init_context(converter, NULL, NULL);
#endif
	    ctx->pix_fmt = dp;
	}
    }
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(53,6,0)
    if(!codec || avcodec_open(ctx, codec)) {
#else
    if(!codec || avcodec_open2(ctx, codec, NULL)) {
#endif
	avformat_free_context(oc);
	oc = NULL;
	return MRET_ERR_NOCODEC;
    }

    return MRET_OK;
}

MediaRet MediaRecorder::finish_setup(const char *fname)
{
    if(audio_buf)
	free(audio_buf);
    if(audio_buf2)
	free(audio_buf2);
    audio_buf2 = NULL;
    in_audio_buf2 = 0;
    if(aud_st) {
	frame_len = aud_st->codec->frame_size * 4;
	sample_len = soundGetSampleRate() * 4 / 60;
	switch(aud_st->codec->codec_id) {
	case CODEC_ID_PCM_S16LE:
	case CODEC_ID_PCM_S16BE:
	case CODEC_ID_PCM_U16LE:
	case CODEC_ID_PCM_U16BE:
	    frame_len = sample_len;
	}
	audio_buf = (uint8_t *)malloc(AUDIO_BUF_LEN);
	if(!audio_buf) {
	    avformat_free_context(oc);
	    oc = NULL;
	    return MRET_ERR_NOMEM;
	}
	if(frame_len != sample_len && (frame_len > sample_len || sample_len % frame_len)) {
	    audio_buf2 = (uint16_t *)malloc(frame_len);
	    if(!audio_buf2) {
		avformat_free_context(oc);
		oc = NULL;
		return MRET_ERR_NOMEM;
	    }
	}
    } else
	audio_buf = NULL;
    if(video_buf)
	free(video_buf);
    if(vid_st) {
	video_buf = (uint8_t *)malloc(VIDEO_BUF_LEN);
	if(!video_buf) {
	    avformat_free_context(oc);
	    oc = NULL;
	    return MRET_ERR_NOMEM;
	}
    } else {
	video_buf = NULL;
    }
    if(!(oc->oformat->flags & AVFMT_NOFILE)) {
	if(avio_open(&oc->pb, fname, AVIO_FLAG_WRITE) < 0) {
	    avformat_free_context(oc);
	    oc = NULL;
	    return MRET_ERR_FERR;
	}
    }
    avformat_write_header(oc, NULL);    
    return MRET_OK;
}

MediaRet MediaRecorder::Record(const char *fname, int width, int height, int depth)
{
    if(oc)
	return MRET_ERR_RECORDING;
    aud_st = vid_st = NULL;
    AVOutputFormat *fmt = av_guess_format(NULL, fname, NULL);
    if(!fmt)
	fmt = av_guess_format("avi", NULL, NULL);
    if(!fmt || fmt->video_codec == CODEC_ID_NONE)
	return MRET_ERR_FMTGUESS;
    MediaRet ret;
    if((ret = setup_sound_stream(fname, fmt)) == MRET_OK &&
       (ret = setup_video_stream(fname, width, height, depth)) == MRET_OK)
	ret = finish_setup(fname);
    return ret;
}

MediaRet MediaRecorder::Record(const char *fname)
{
    if(oc)
	return MRET_ERR_RECORDING;
    aud_st = vid_st = NULL;
    AVOutputFormat *fmt = av_guess_format(NULL, fname, NULL);
    if(!fmt)
	fmt = av_guess_format("wav", NULL, NULL);
    if(!fmt || fmt->audio_codec == CODEC_ID_NONE)
	return MRET_ERR_FMTGUESS;
    MediaRet ret;
    if((ret = setup_sound_stream(fname, fmt)) == MRET_OK)
	ret = finish_setup(fname);
    return ret;
}

void MediaRecorder::Stop()
{
    if(oc) {
	if(in_audio_buf2)
	    AddFrame((uint16_t *)0);
	av_write_trailer(oc);
	avformat_free_context(oc);
	oc = NULL;
    }
    if(audio_buf) {
	free(audio_buf);
	audio_buf = NULL;
    }
    if(video_buf) {
	free(video_buf);
	video_buf = NULL;
    }
    if(audio_buf2) {
	free(audio_buf2);
	audio_buf2 = NULL;
    }
    if(convpic) {
	avpicture_free((AVPicture *)convpic);
	av_free(convpic);
	convpic = NULL;
    }
    if(converter) {
	sws_freeContext(converter);
	converter = NULL;
    }
}

MediaRecorder::~MediaRecorder()
{
    Stop();
}

// Still needs updating for avcodec_encode_video2
MediaRet MediaRecorder::AddFrame(const uint8_t *vid)
{
    if(!oc || !vid_st)
	return MRET_OK;

    AVCodecContext *ctx = vid_st->codec;
    AVPacket pkt;
#if LIBAVCODEC_VERSION_MAJOR > 56
    int ret, got_packet = 0;
#endif

    // strip borders.  inconsistent between depths for some reason
    // but fortunately consistent between gb/gba.
    int tbord, rbord;
    switch(pixsize) {
    case 2:
	//    16-bit: 2 @ right, 1 @ top
	tbord = 1; rbord = 2; break;
    case 3:
	//    24-bit: no border
	tbord = rbord = 0; break;
    case 4:
	//    32-bit: 1 @ right, 1 @ top
	tbord = 1; rbord = 1; break;
    }
    avpicture_fill((AVPicture *)pic, (uint8_t *)vid + tbord * (linesize + pixsize * rbord),
		   (PixelFormat)pixfmt, ctx->width + rbord, ctx->height);
    // satisfy stupid sws_scale()'s integrity check
    pic->data[1] = pic->data[2] = pic->data[3] = pic->data[0];
    pic->linesize[1] = pic->linesize[2] = pic->linesize[3] = pic->linesize[0];

    AVFrame *f = pic;

    if(converter) {
	sws_scale(converter, pic->data, pic->linesize, 0, ctx->height,
		  convpic->data, convpic->linesize);
	f = convpic;
    }
    av_init_packet(&pkt);
    pkt.stream_index = vid_st->index;
    if(oc->oformat->flags & AVFMT_RAWPICTURE) {
	// this won't work due to border
	// not sure what formats set this, anyway
	pkt.flags |= AV_PKT_FLAG_KEY;
	pkt.data = f->data[0];
	pkt.size = linesize * ctx->height;
    } else {
#if LIBAVCODEC_VERSION_MAJOR > 56
        pkt.data = video_buf;
        pkt.size = VIDEO_BUF_LEN;
        f->format = ctx->pix_fmt;
        f->width = ctx->width;
        f->height = ctx->height;
        ret = avcodec_encode_video2(ctx, &pkt, f, &got_packet);
        if(!ret && got_packet && ctx->coded_frame) {
            ctx->coded_frame->pts = pkt.pts;
            ctx->coded_frame->key_frame = !!(pkt.flags & AV_PKT_FLAG_KEY);
        }
#else
	pkt.size = avcodec_encode_video(ctx, video_buf, VIDEO_BUF_LEN, f);
#endif
	if(!pkt.size)
	    return MRET_OK;
	if(ctx->coded_frame && ctx->coded_frame->pts != AV_NOPTS_VALUE)
	    pkt.pts = av_rescale_q(ctx->coded_frame->pts, ctx->time_base, vid_st->time_base);
	if(pkt.size > VIDEO_BUF_LEN) {
	    avformat_free_context(oc);
	    oc = NULL;
	    return MRET_ERR_BUFSIZE;
	}
	if(ctx->coded_frame->key_frame)
	    pkt.flags |= AV_PKT_FLAG_KEY;
	pkt.data = video_buf;
    }
    if(av_interleaved_write_frame(oc, &pkt) < 0) {
	avformat_free_context(oc);
	oc = NULL;
	// yeah, err might not be a file error, but if it isn't, it's a
	// coding error rather than a user-controllable error
	// and better resolved using debugging
	return MRET_ERR_FERR;
    }
    return MRET_OK;
}

#if LIBAVCODEC_VERSION_MAJOR > 56
/* FFmpeg depricated avcodec_encode_audio.
 * It was removed completely in 3.0.
 * This will at least get audio recording *working*
 */
static inline int MediaRecorderEncodeAudio(AVCodecContext *ctx,
                                           AVPacket *pkt,
                                           uint8_t *buf, int buf_size,
                                           const short *samples)
{
    AVFrame *frame;
    av_init_packet(pkt);
    int ret, samples_size, got_packet = 0;

    pkt->data = buf;
    pkt->size = buf_size;
    if (samples) {
        frame = frame = av_frame_alloc();
        if (ctx->frame_size) {
            frame->nb_samples = ctx->frame_size;
        } else {
            frame->nb_samples = (int64_t)buf_size * 8 /
                            (av_get_bits_per_sample(ctx->codec_id) *
                            ctx->channels);
        }
        frame->format = ctx->sample_fmt;
        frame->channel_layout = ctx->channel_layout;
        samples_size = av_samples_get_buffer_size(NULL, ctx->channels,
                        frame->nb_samples, ctx->sample_fmt, 1);
        avcodec_fill_audio_frame(frame, ctx->channels, ctx->sample_fmt,
                        (const uint8_t *)samples, samples_size, 1);
        //frame->pts = AV_NOPTS_VALUE;
    } else {
        frame = NULL;
    }
        ret = avcodec_encode_audio2(ctx, pkt, frame, &got_packet);
    if (!ret && got_packet && ctx->coded_frame) {
        ctx->coded_frame->pts = pkt->pts;
        ctx->coded_frame->key_frame = !!(pkt->flags & AV_PKT_FLAG_KEY);
    }
        if (frame && frame->extended_data != frame->data)
        av_freep(&frame->extended_data);
        return ret;

}
Example #28
0
static struct proxy_output_ctx * alloc_proxy_output_ffmpeg(
	struct anim * anim,
	AVStream * st, int proxy_size, int width, int height,
	int UNUSED(quality))
{
	struct proxy_output_ctx * rv = MEM_callocN(
		sizeof(struct proxy_output_ctx), "alloc_proxy_output");
	
	char fname[FILE_MAX];

	// JPEG requires this
	width = round_up(width, 8);
	height = round_up(height, 8);

	rv->proxy_size = proxy_size;
	rv->anim = anim;

	get_proxy_filename(rv->anim, rv->proxy_size, fname, TRUE);
	BLI_make_existing_file(fname);

	rv->of = avformat_alloc_context();
	rv->of->oformat = av_guess_format("avi", NULL, NULL);
	
	BLI_snprintf(rv->of->filename, sizeof(rv->of->filename), "%s", fname);

	fprintf(stderr, "Starting work on proxy: %s\n", rv->of->filename);

	rv->st = av_new_stream(rv->of, 0);
	rv->c = rv->st->codec;
	rv->c->codec_type = AVMEDIA_TYPE_VIDEO;
	rv->c->codec_id = CODEC_ID_MJPEG;
	rv->c->width = width;
	rv->c->height = height;

	rv->of->oformat->video_codec = rv->c->codec_id;
	rv->codec = avcodec_find_encoder(rv->c->codec_id);

	if (!rv->codec) {
		fprintf(stderr, "No ffmpeg MJPEG encoder available? "
			"Proxy not built!\n");
		av_free(rv->of);
		return NULL;
	}

	if (rv->codec->pix_fmts) {
		rv->c->pix_fmt = rv->codec->pix_fmts[0];
	} else {
		rv->c->pix_fmt = PIX_FMT_YUVJ420P;
	}

	rv->c->sample_aspect_ratio 
		= rv->st->sample_aspect_ratio 
		= st->codec->sample_aspect_ratio;

	rv->c->time_base.den = 25;
	rv->c->time_base.num = 1;
	rv->st->time_base = rv->c->time_base;

	if (rv->of->flags & AVFMT_GLOBALHEADER) {
		rv->c->flags |= CODEC_FLAG_GLOBAL_HEADER;
	}

	if (av_set_parameters(rv->of, NULL) < 0) {
		fprintf(stderr, "Couldn't set output parameters? "
			"Proxy not built!\n");
		av_free(rv->of);
		return 0;
	}

	if (avio_open(&rv->of->pb, fname, AVIO_FLAG_WRITE) < 0) {
		fprintf(stderr, "Couldn't open outputfile! "
			"Proxy not built!\n");
		av_free(rv->of);
		return 0;
	}

	avcodec_open(rv->c, rv->codec);

	rv->video_buffersize = 2000000;
	rv->video_buffer = (uint8_t*)MEM_mallocN(
		rv->video_buffersize, "FFMPEG video buffer");

	rv->orig_height = st->codec->height;

	if (st->codec->width != width || st->codec->height != height ||
	        st->codec->pix_fmt != rv->c->pix_fmt)
	{
		rv->frame = avcodec_alloc_frame();
		avpicture_fill((AVPicture*) rv->frame,
		               MEM_mallocN(avpicture_get_size(
		                               rv->c->pix_fmt,
		                               round_up(width, 16), height),
		                           "alloc proxy output frame"),
		               rv->c->pix_fmt, round_up(width, 16), height);

		rv->sws_ctx = sws_getContext(
			st->codec->width,
			st->codec->height,
			st->codec->pix_fmt,
			width, height,
			rv->c->pix_fmt,
			SWS_FAST_BILINEAR | SWS_PRINT_INFO,
			NULL, NULL, NULL);
	}

	av_write_header(rv->of);

	return rv;
}
Example #29
0
/**
 * Open an output file and the required encoder.
 * Also set some basic encoder parameters.
 * Some of these parameters are based on the input file's parameters.
 */
static int open_output_file(const char *filename,
                            AVCodecContext *input_codec_context,
                            AVFormatContext **output_format_context,
                            AVCodecContext **output_codec_context)
{
    AVIOContext *output_io_context = NULL;
    AVStream *stream               = NULL;
    AVCodec *output_codec          = NULL;
    int error;

    /** Open the output file to write to it. */
    if ((error = avio_open(&output_io_context, filename,
                           AVIO_FLAG_WRITE)) < 0) {
        fprintf(stderr, "Could not open output file '%s' (error '%s')\n",
                filename, get_error_text(error));
        return error;
    }

    /** Create a new format context for the output container format. */
    if (!(*output_format_context = avformat_alloc_context())) {
        fprintf(stderr, "Could not allocate output format context\n");
        return AVERROR(ENOMEM);
    }

    /** Associate the output file (pointer) with the container format context. */
    (*output_format_context)->pb = output_io_context;

    /** Guess the desired container format based on the file extension. */
    if (!((*output_format_context)->oformat = av_guess_format(NULL, filename,
                                                              NULL))) {
        fprintf(stderr, "Could not find output file format\n");
        goto cleanup;
    }

    av_strlcpy((*output_format_context)->filename, filename,
               sizeof((*output_format_context)->filename));

    /** Find the encoder to be used by its name. */
    if (!(output_codec = avcodec_find_encoder(AV_CODEC_ID_AAC))) {
        fprintf(stderr, "Could not find an AAC encoder.\n");
        goto cleanup;
    }

    /** Create a new audio stream in the output file container. */
    if (!(stream = avformat_new_stream(*output_format_context, output_codec))) {
        fprintf(stderr, "Could not create new stream\n");
        error = AVERROR(ENOMEM);
        goto cleanup;
    }

    /** Save the encoder context for easier access later. */
    *output_codec_context = stream->codec;

    /**
     * Set the basic encoder parameters.
     * The input file's sample rate is used to avoid a sample rate conversion.
     */
    (*output_codec_context)->channels       = OUTPUT_CHANNELS;
    (*output_codec_context)->channel_layout = av_get_default_channel_layout(OUTPUT_CHANNELS);
    (*output_codec_context)->sample_rate    = input_codec_context->sample_rate;
    (*output_codec_context)->sample_fmt     = output_codec->sample_fmts[0];
    (*output_codec_context)->bit_rate       = OUTPUT_BIT_RATE;

    /** Allow the use of the experimental AAC encoder */
    (*output_codec_context)->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;

    /** Set the sample rate for the container. */
    stream->time_base.den = input_codec_context->sample_rate;
    stream->time_base.num = 1;

    /**
     * Some container formats (like MP4) require global headers to be present
     * Mark the encoder so that it behaves accordingly.
     */
    if ((*output_format_context)->oformat->flags & AVFMT_GLOBALHEADER)
        (*output_codec_context)->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

    /** Open the encoder for the audio stream to use it later. */
    if ((error = avcodec_open2(*output_codec_context, output_codec, NULL)) < 0) {
        fprintf(stderr, "Could not open output codec (error '%s')\n",
                get_error_text(error));
        goto cleanup;
    }

    return 0;

cleanup:
    avio_closep(&(*output_format_context)->pb);
    avformat_free_context(*output_format_context);
    *output_format_context = NULL;
    return error < 0 ? error : AVERROR_EXIT;
}
uint8_t lavMuxer::open(const char *filename,uint32_t inbitrate, ADM_MUXER_TYPE type, aviInfo *info,
              uint32_t videoExtraDataSize, uint8_t *videoExtraData, WAVHeader *audioheader,
              uint32_t audioextraSize,uint8_t *audioextraData)
{
 AVCodecContext *c;
 	_type=type;
	_fps1000=info->fps1000;
	switch(_type)
	{
	case MUXER_TS:
		fmt = av_guess_format("mpegts", NULL, NULL);
		break;
	case MUXER_DVD:
		fmt = av_guess_format("dvd", NULL, NULL);
		break;
	case MUXER_VCD:
		fmt = av_guess_format("vcd", NULL, NULL);
		break;
	case MUXER_SVCD:
		fmt = av_guess_format("svcd", NULL, NULL);
		break;
	case MUXER_MP4:
		fmt = av_guess_format("mp4", NULL, NULL);
		break;
	case MUXER_PSP:
		fmt = av_guess_format("psp", NULL, NULL);
		break;
	case MUXER_FLV:
		fmt = av_guess_format("flv", NULL, NULL);
		break;
	case MUXER_MATROSKA:
		fmt = av_guess_format("matroska", NULL, NULL);
		break;

	default:
		fmt=NULL;
	}
	if (!fmt)
	{
        	printf("Lav:Cannot guess format\n");
                ADM_assert(0);
		return 0;
	}
	oc = avformat_alloc_context();
	if (!oc)
	{
       		printf("Lav:Cannot allocate context\n");
		return 0;
	}
	oc->oformat = fmt;
	snprintf(oc->filename,1000,"file://%s",filename);
	// Video
	//________

	video_st = av_new_stream(oc, 0);
	if (!video_st)
	{
		printf("Lav: new stream failed\n");
		return 0;
	}

	c = video_st->codec;
	c->gop_size=15;
	c->max_b_frames=2;
	c->has_b_frames=1;

	switch(_type)
	{
				case MUXER_FLV:
					 c->codec=new AVCodec;
					 memset(c->codec,0,sizeof(AVCodec));
					 if(fourCC::check(info->fcc,(uint8_t *)"FLV1"))
					 {
						 c->codec_id=CODEC_ID_FLV1;
					 	 c->codec->name=ADM_strdup("FLV1");
					 }else
					 {
						 if(isVP6Compatible(info->fcc))
						 			{
							 		 c->codec_id=CODEC_ID_VP6F;
					 				 c->codec->name=ADM_strdup("VP6F");
						 			}
						 else
							 ADM_assert(0);

					 }

					 break;
                case MUXER_MATROSKA:
                        strcpy(oc->title,"Avidemux");
                        strcpy(oc->author,"Avidemux");
                        if(isMpeg4Compatible(info->fcc))
                        {
                                c->codec_id = CODEC_ID_MPEG4;
                        }else
                        {
                                if(isH264Compatible(info->fcc))
                                {
                                        c->has_b_frames=2; // let muxer know we may have bpyramid
                                        c->codec_id = CODEC_ID_H264;
                                        c->codec=new AVCodec;
                                        memset(c->codec,0,sizeof(AVCodec));
                                        c->codec->name=ADM_strdup("H264");
                                }
                                else
                                {
                                   if(!ADM_4cc_to_lavcodec((const char *)&(info->fcc),&(c->codec_id)))
                                   {
                                      printf("[lavFormat] Cannot map  this\n");
                                      return 0;
                                   }

                                }
                        }
                        if(videoExtraDataSize)
                        {
                                c->extradata=videoExtraData;
                                c->extradata_size= videoExtraDataSize;
                        }
                        break;
                case MUXER_MP4:
                case MUXER_PSP:
                {
                        // probably a memeleak here
                        char *foo=ADM_strdup(filename);

                        strcpy(oc->title,ADM_GetFileName(foo));
                        strcpy(oc->author,"Avidemux");
                        if(isMpeg4Compatible(info->fcc))
                        {
                                c->codec_id = CODEC_ID_MPEG4;
                        }else
                        {
                                if(isH264Compatible(info->fcc))
                                {
                                        c->has_b_frames=2; // let muxer know we may have bpyramid
                                        c->codec_id = CODEC_ID_H264;
                                        c->codec=new AVCodec;
                                        memset(c->codec,0,sizeof(AVCodec));
                                        c->codec->name=ADM_strdup("H264");
                                }
                                else
                                {
                                        if(isDVCompatible(info->fcc))
                                        {
                                          c->codec_id = CODEC_ID_DVVIDEO;
                                        }else
                                        {
                                          if(fourCC::check(info->fcc,(uint8_t *)"H263"))
                                          {
                                                    c->codec_id=CODEC_ID_H263;
                                            }else{
                                                    c->codec_id = CODEC_ID_MPEG4; // Default value
                                                    printf("Ooops, cant mux that...\n");
                                                    printf("Ooops, cant mux that...\n");
                                                    printf("Ooops, cant mux that...\n");
                                                }
                                        }
                                }
                        }
                        if(videoExtraDataSize)
                        {
                                c->extradata=videoExtraData;
                                c->extradata_size= videoExtraDataSize;
                        }
                        if(MUXER_PSP==_type)
                        {
                            c->rc_buffer_size=0; //8*1024*224;
                            c->rc_max_rate=0; //768*1000;
                            c->rc_min_rate=0;
                            c->bit_rate=768*1000;
                        }
                        else
                        {
                            c->rc_buffer_size=8*1024*224;
                            c->rc_max_rate=9500*1000;
                            c->rc_min_rate=0;
                            if(!inbitrate)
                                    c->bit_rate=9000*1000;
                            else
                                    c->bit_rate=inbitrate;
                        }
                }
                        break;
                case MUXER_TS:
                        c->codec_id = CODEC_ID_MPEG2VIDEO;
                        c->rc_buffer_size=8*1024*224;
                        c->rc_max_rate=9500*1000;
                        c->rc_min_rate=0;
                        if(!inbitrate)
                                c->bit_rate=9000*1000;
                        else
                                c->bit_rate=inbitrate;

                        break;
		case MUXER_DVD:
			c->codec_id = CODEC_ID_MPEG2VIDEO;
			c->rc_buffer_size=8*1024*224;
			c->rc_max_rate=9500*1000;
			c->rc_min_rate=0;
			if(!inbitrate)
				c->bit_rate=9000*1000;
			else
				c->bit_rate=inbitrate;

			break;
		case MUXER_VCD:
			c->codec_id = CODEC_ID_MPEG1VIDEO;

			c->rc_buffer_size=8*1024*40;
			c->rc_max_rate=1152*1000;
			c->rc_min_rate=1152*1000;

			c->bit_rate=1152*1000;


			break;
		case MUXER_SVCD:
			c->codec_id = CODEC_ID_MPEG2VIDEO;

			c->rc_buffer_size=8*1024*112;
			c->rc_max_rate=2500*1000;
			c->rc_min_rate=0*1000;
			if(!inbitrate)
				c->bit_rate=2040*1000;
			else
				c->bit_rate=inbitrate;

			break;
		default:
			ADM_assert(0);
	}

	c->codec_type = CODEC_TYPE_VIDEO;
	c->flags=CODEC_FLAG_QSCALE;
	c->width = info->width;
	c->height = info->height;

       AVRational fps25=(AVRational){1001,25025};
       AVRational fps24=(AVRational){1001,24000};
       AVRational fps30= (AVRational){1001,30000};
       AVRational fpsfree= (AVRational){1000,_fps1000};


    	switch(_fps1000)
	{
		case 25000:
                {
			 c->time_base= fps25;
			 break;
                }
		case 23976:
                        if(_type==MUXER_MP4 || _type==MUXER_PSP || _type==MUXER_FLV || _type==MUXER_MATROSKA)
                        {
                                 c->time_base= fps24; //(AVRational){1001,24000};
                                break;
                        }
		case  29970:
			 c->time_base=fps30;
			break;
		default:
                      {
                            if(_type==MUXER_MP4 || _type==MUXER_PSP || _type==MUXER_FLV || _type==MUXER_MATROSKA)
                            {
                                    c->time_base=fpsfree;// (AVRational){1000,_fps1000};
                                    break;
                            }
                            else
                            {
                                GUI_Error_HIG(QT_TR_NOOP("Incompatible frame rate"), NULL);
                                return 0;
                            }
                            }
                        break;
	}


	// Audio
	//________
        if(audioheader)
        {
          audio_st = av_new_stream(oc, 1);
          if (!audio_st)
          {
                  printf("Lav: new stream failed\n");
                  return 0;
          }


          c = audio_st->codec;
          c->frame_size=1024; //For AAC mainly, sample per frame
          printf("[LavFormat] Bitrate %u\n",(audioheader->byterate*8)/1000);
          _audioFq=c->sample_rate = audioheader->frequency;
#if 0
           if(_type== MUXER_PSP && audioheader->encoding==WAV_AAC)
            {
                    _audioFq=c->sample_rate = audioheader->frequency/2;                 //_audioFq*=2; // SBR
             }
#endif

          switch(audioheader->encoding)
          {
                  case WAV_AC3: c->codec_id = CODEC_ID_AC3;c->frame_size=6*256;break;
                  case WAV_MP2: c->codec_id = CODEC_ID_MP2;break;
                  case WAV_MP3:
  #warning FIXME : Probe deeper
                              c->frame_size=1152;
                              c->codec_id = CODEC_ID_MP3;
                              break;
                  case WAV_PCM:
                                  // One chunk is 10 ms (1/100 of fq)
                                  c->frame_size=4;
                                  c->codec_id = CODEC_ID_PCM_S16LE;break;
                  case WAV_AAC:
                                  c->extradata=audioextraData;
                                  c->extradata_size= audioextraSize;
                                  c->codec_id = CODEC_ID_AAC;
                                  break;
                  default:
                          if(_type==MUXER_MATROSKA)
                          {
                           if(ADM_WaveTag_to_lavcodec(audioheader->encoding, &(c->codec_id)))
                           {
                             if(audioextraData)
                             {
                                  c->extradata=audioextraData;
                                  c->extradata_size= audioextraSize;
                             }
                             // Put a dummy time increment
                              c->time_base= fps25;
                             break;
                           }
                          }

                          printf("Cant mux that ! audio\n");
                          printf("Cant mux that ! audio\n");
                          c->codec_id = CODEC_ID_MP2;
                          return 0;
                          break;
          }
          c->codec_type = CODEC_TYPE_AUDIO;

          c->bit_rate = audioheader->byterate*8;
          c->rc_buffer_size=(c->bit_rate/(2*8)); // 500 ms worth

          c->channels = audioheader->channels;
          _audioByterate=audioheader->byterate;

        }
        // /audio


//----------------------
	switch(_type)
	{
				case MUXER_FLV:
                case MUXER_PSP:
                case MUXER_MP4:
                case MUXER_MATROSKA:
                        oc->mux_rate=10080*1000; // Needed ?
                        break;

                case MUXER_TS:
                        oc->mux_rate=10080*1000;
                        break;
		case MUXER_DVD:
			oc->packet_size=2048;
			oc->mux_rate=10080*1000;
			break;
		case MUXER_VCD:
			oc->packet_size=2324;
			oc->mux_rate=2352 * 75 * 8;

			break;
		case MUXER_SVCD:

			oc->packet_size=2324;
			oc->mux_rate=2*2352 * 75 * 8; // ?

			break;
		default:
			ADM_assert(0);
	}
	oc->preload=AV_TIME_BASE/10; // 100 ms preloading
	oc->max_delay=200*1000; // 500 ms

	if (av_set_parameters(oc, NULL) < 0)
	{
		printf("Lav: set param failed \n");
		return 0;
	}
	 if (url_fopen(&(oc->pb), filename, URL_WRONLY) < 0)
	 {
	 	printf("Lav: Failed to open file :%s\n",filename);
		return 0;
        }

	ADM_assert(av_write_header(oc)>=0);
	dump_format(oc, 0, filename, 1);


	printf("lavformat mpeg muxer initialized\n");

	_running=1;

	one=(1000*1000*1000)/_fps1000;
	_curDTS=one;

	return 1;
}