コード例 #1
0
int avfilter_graph_parse(AVFilterGraph *graph, const char *filters,
                         AVFilterInOut *open_inputs,
                         AVFilterInOut *open_outputs, void *log_ctx)
{
    int ret;
    AVFilterInOut *cur, *match, *inputs = NULL, *outputs = NULL;

    if ((ret = avfilter_graph_parse2(graph, filters, &inputs, &outputs)) < 0)
        goto fail;

    /* First input can be omitted if it is "[in]" */
    if (inputs && !inputs->name)
        inputs->name = av_strdup("in");
    for (cur = inputs; cur; cur = cur->next) {
        if (!cur->name) {
              av_log(log_ctx, AV_LOG_ERROR,
                     "Not enough inputs specified for the \"%s\" filter.\n",
                     cur->filter_ctx->filter->name);
              ret = AVERROR(EINVAL);
              goto fail;
        }
        if (!(match = extract_inout(cur->name, &open_outputs)))
            continue;
        ret = avfilter_link(match->filter_ctx, match->pad_idx,
                            cur->filter_ctx,   cur->pad_idx);
        avfilter_inout_free(&match);
        if (ret < 0)
            goto fail;
    }

    /* Last output can be omitted if it is "[out]" */
    if (outputs && !outputs->name)
        outputs->name = av_strdup("out");
    for (cur = outputs; cur; cur = cur->next) {
        if (!cur->name) {
            av_log(log_ctx, AV_LOG_ERROR,
                   "Invalid filterchain containing an unlabelled output pad: \"%s\"\n",
                   filters);
            ret = AVERROR(EINVAL);
            goto fail;
        }
        if (!(match = extract_inout(cur->name, &open_inputs)))
            continue;
        ret = avfilter_link(cur->filter_ctx,   cur->pad_idx,
                            match->filter_ctx, match->pad_idx);
        avfilter_inout_free(&match);
        if (ret < 0)
            goto fail;
    }

 fail:
    if (ret < 0) {
        while (graph->nb_filters)
            avfilter_free(graph->filters[0]);
        av_freep(&graph->filters);
    }
    avfilter_inout_free(&inputs);
    avfilter_inout_free(&outputs);
    avfilter_inout_free(&open_inputs);
    avfilter_inout_free(&open_outputs);
    return ret;
#else
int avfilter_graph_parse(AVFilterGraph *graph, const char *filters,
                         AVFilterInOut **inputs, AVFilterInOut **outputs,
                         void *log_ctx)
{
    return avfilter_graph_parse_ptr(graph, filters, inputs, outputs, log_ctx);
#endif
}

int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters,
                         AVFilterInOut **open_inputs_ptr, AVFilterInOut **open_outputs_ptr,
                         void *log_ctx)
{
    int index = 0, ret = 0;
    char chr = 0;

    AVFilterInOut *curr_inputs = NULL;
    AVFilterInOut *open_inputs  = open_inputs_ptr  ? *open_inputs_ptr  : NULL;
    AVFilterInOut *open_outputs = open_outputs_ptr ? *open_outputs_ptr : NULL;

    if ((ret = parse_sws_flags(&filters, graph)) < 0)
        goto end;

    do {
        AVFilterContext *filter;
        const char *filterchain = filters;
        filters += strspn(filters, WHITESPACES);

        if ((ret = parse_inputs(&filters, &curr_inputs, &open_outputs, log_ctx)) < 0)
            goto end;

        if ((ret = parse_filter(&filter, &filters, graph, index, log_ctx)) < 0)
            goto end;

        if (filter->nb_inputs == 1 && !curr_inputs && !index) {
            /* First input pad, assume it is "[in]" if not specified */
            const char *tmp = "[in]";
            if ((ret = parse_inputs(&tmp, &curr_inputs, &open_outputs, log_ctx)) < 0)
                goto end;
        }

        if ((ret = link_filter_inouts(filter, &curr_inputs, &open_inputs, log_ctx)) < 0)
            goto end;

        if ((ret = parse_outputs(&filters, &curr_inputs, &open_inputs, &open_outputs,
                                 log_ctx)) < 0)
            goto end;

        filters += strspn(filters, WHITESPACES);
        chr = *filters++;

        if (chr == ';' && curr_inputs) {
            av_log(log_ctx, AV_LOG_ERROR,
                   "Invalid filterchain containing an unlabelled output pad: \"%s\"\n",
                   filterchain);
            ret = AVERROR(EINVAL);
            goto end;
        }
        index++;
    } while (chr == ',' || chr == ';');

    if (chr) {
        av_log(log_ctx, AV_LOG_ERROR,
               "Unable to parse graph description substring: \"%s\"\n",
               filters - 1);
        ret = AVERROR(EINVAL);
        goto end;
    }

    if (curr_inputs) {
        /* Last output pad, assume it is "[out]" if not specified */
        const char *tmp = "[out]";
        if ((ret = parse_outputs(&tmp, &curr_inputs, &open_inputs, &open_outputs,
                                 log_ctx)) < 0)
            goto end;
    }

end:
    /* clear open_in/outputs only if not passed as parameters */
    if (open_inputs_ptr) *open_inputs_ptr = open_inputs;
    else avfilter_inout_free(&open_inputs);
    if (open_outputs_ptr) *open_outputs_ptr = open_outputs;
    else avfilter_inout_free(&open_outputs);
    avfilter_inout_free(&curr_inputs);

    if (ret < 0) {
        while (graph->nb_filters)
            avfilter_free(graph->filters[0]);
        av_freep(&graph->filters);
    }
    return ret;
}
コード例 #2
0
void lwlibav_update_configuration
(
    lwlibav_decode_handler_t *dhp,
    uint32_t                  frame_number,
    int                       extradata_index,
    int64_t                   rap_pos
)
{
    lwlibav_extradata_handler_t *exhp = &dhp->exh;
    if( exhp->entry_count == 0 || extradata_index < 0 )
    {
        /* No need to update the extradata. */
        exhp->current_index = extradata_index;
        lwlibav_flush_buffers( dhp );
        return;
    }
    AVCodecContext *ctx = dhp->format->streams[ dhp->stream_index ]->codec;
    void *app_specific = ctx->opaque;
    avcodec_close( ctx );
    if( ctx->extradata )
    {
        av_freep( &ctx->extradata );
        ctx->extradata_size = 0;
    }
    /* Find an appropriate decoder. */
    char error_string[96] = { 0 };
    lwlibav_extradata_t *entry = &exhp->entries[extradata_index];
    const AVCodec *codec = find_decoder( entry->codec_id, dhp->preferred_decoder_names );
    if( !codec )
    {
        strcpy( error_string, "Failed to find the decoder.\n" );
        goto fail;
    }
    /* Get decoder default settings. */
    int thread_count = ctx->thread_count;
    if( avcodec_get_context_defaults3( ctx, codec ) < 0 )
    {
        strcpy( error_string, "Failed to get CODEC default.\n" );
        goto fail;
    }
    /* Set up decoder basic settings. */
    if( ctx->codec_type == AVMEDIA_TYPE_VIDEO )
        set_video_basic_settings( dhp, frame_number );
    else
        set_audio_basic_settings( dhp, frame_number );
    /* Update extradata. */
    if( entry->extradata_size > 0 )
    {
        ctx->extradata = (uint8_t *)av_malloc( entry->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE );
        if( !ctx->extradata )
        {
            strcpy( error_string, "Failed to allocate extradata.\n" );
            goto fail;
        }
        ctx->extradata_size = entry->extradata_size;
        memcpy( ctx->extradata, entry->extradata, ctx->extradata_size );
        memset( ctx->extradata + ctx->extradata_size, 0, FF_INPUT_BUFFER_PADDING_SIZE );
    }
    /* AVCodecContext.codec_id is supposed to be set properly in avcodec_open2().
     * See lwlibav_flush_buffers(), why this is needed. */
    ctx->codec_id  = AV_CODEC_ID_NONE;
    /* This is needed by some CODECs such as UtVideo and raw video. */
    ctx->codec_tag = entry->codec_tag;
    /* Open an appropriate decoder.
     * Here, we force single threaded decoding since some decoder doesn't do its proper initialization with multi-threaded decoding. */
    ctx->thread_count = 1;
    if( open_decoder( ctx, codec ) < 0 )
    {
        strcpy( error_string, "Failed to open decoder.\n" );
        goto fail;
    }
    exhp->current_index = extradata_index;
    exhp->delay_count   = 0;
    /* Set up decoder basic settings by actual decoding. */
    if( ctx->codec_type == AVMEDIA_TYPE_VIDEO
      ? try_decode_video_frame( dhp, frame_number, rap_pos, error_string ) < 0
      : try_decode_audio_frame( dhp, frame_number, error_string ) < 0 )
        goto fail;
    /* Reopen/flush with the requested number of threads. */
    ctx->thread_count = thread_count;
    int width  = ctx->width;
    int height = ctx->height;
    lwlibav_flush_buffers( dhp );
    ctx->get_buffer2 = exhp->get_buffer ? exhp->get_buffer : avcodec_default_get_buffer2;
    ctx->opaque      = app_specific;
    /* avcodec_open2() may have changed resolution unexpectedly. */
    ctx->width       = width;
    ctx->height      = height;
    return;
fail:
    exhp->delay_count = 0;
    dhp->error = 1;
    lw_log_show( &dhp->lh, LW_LOG_FATAL,
                 "%sIt is recommended you reopen the file.", error_string );
}
コード例 #3
0
ファイル: avcodec.cpp プロジェクト: ken444/LAVFilters
STDMETHODIMP CDecAvcodec::InitDecoder(AVCodecID codec, const CMediaType *pmt)
{
    DestroyDecoder();
    DbgLog((LOG_TRACE, 10, L"Initializing ffmpeg for codec %S", avcodec_get_name(codec)));

    BITMAPINFOHEADER *pBMI = nullptr;
    videoFormatTypeHandler((const BYTE *)pmt->Format(), pmt->FormatType(), &pBMI);

    m_pAVCodec = avcodec_find_decoder(codec);
    CheckPointer(m_pAVCodec, VFW_E_UNSUPPORTED_VIDEO);

    m_pAVCtx = avcodec_alloc_context3(m_pAVCodec);
    CheckPointer(m_pAVCtx, E_POINTER);

    if(    codec == AV_CODEC_ID_MPEG1VIDEO
            || codec == AV_CODEC_ID_MPEG2VIDEO
            || pmt->subtype == MEDIASUBTYPE_H264
            || pmt->subtype == MEDIASUBTYPE_h264
            || pmt->subtype == MEDIASUBTYPE_X264
            || pmt->subtype == MEDIASUBTYPE_x264
            || pmt->subtype == MEDIASUBTYPE_H264_bis
            || pmt->subtype == MEDIASUBTYPE_HEVC) {
        m_pParser = av_parser_init(codec);
    }

    DWORD dwDecFlags = m_pCallback->GetDecodeFlags();

    LONG biRealWidth = pBMI->biWidth, biRealHeight = pBMI->biHeight;
    if (pmt->formattype == FORMAT_VideoInfo || pmt->formattype == FORMAT_MPEGVideo) {
        VIDEOINFOHEADER *vih = (VIDEOINFOHEADER *)pmt->Format();
        if (vih->rcTarget.right != 0 && vih->rcTarget.bottom != 0) {
            biRealWidth  = vih->rcTarget.right;
            biRealHeight = vih->rcTarget.bottom;
        }
    } else if (pmt->formattype == FORMAT_VideoInfo2 || pmt->formattype == FORMAT_MPEG2Video) {
        VIDEOINFOHEADER2 *vih2 = (VIDEOINFOHEADER2 *)pmt->Format();
        if (vih2->rcTarget.right != 0 && vih2->rcTarget.bottom != 0) {
            biRealWidth  = vih2->rcTarget.right;
            biRealHeight = vih2->rcTarget.bottom;
        }
    }

    m_pAVCtx->codec_id              = codec;
    m_pAVCtx->codec_tag             = pBMI->biCompression;
    m_pAVCtx->coded_width           = pBMI->biWidth;
    m_pAVCtx->coded_height          = abs(pBMI->biHeight);
    m_pAVCtx->bits_per_coded_sample = pBMI->biBitCount;
    m_pAVCtx->error_concealment     = FF_EC_GUESS_MVS | FF_EC_DEBLOCK;
    m_pAVCtx->err_recognition       = 0;
    m_pAVCtx->workaround_bugs       = FF_BUG_AUTODETECT | FF_BUG_X264_LOSSLESS;
    m_pAVCtx->refcounted_frames     = 1;

    // Setup threading
    // Thread Count. 0 = auto detect
    int thread_count = m_pSettings->GetNumThreads();
    if (thread_count == 0) {
        thread_count = av_cpu_count() * 3 / 2;
    }
    m_pAVCtx->thread_count = max(1, min(thread_count, AVCODEC_MAX_THREADS));

    if (dwDecFlags & LAV_VIDEO_DEC_FLAG_NO_MT || codec == AV_CODEC_ID_MPEG4) {
        m_pAVCtx->thread_count = 1;
    }

    m_pFrame = av_frame_alloc();
    CheckPointer(m_pFrame, E_POINTER);

    // Process Extradata
    BYTE *extra = nullptr;
    size_t extralen = 0;
    getExtraData(*pmt, nullptr, &extralen);

    BOOL bH264avc = FALSE;
    if (pmt->formattype == FORMAT_MPEG2Video && (m_pAVCtx->codec_tag == MAKEFOURCC('a','v','c','1') || m_pAVCtx->codec_tag == MAKEFOURCC('A','V','C','1') || m_pAVCtx->codec_tag == MAKEFOURCC('C','C','V','1'))) {
        // Reconstruct AVC1 extradata format
        DbgLog((LOG_TRACE, 10, L"-> Processing AVC1 extradata of %d bytes", extralen));
        MPEG2VIDEOINFO *mp2vi = (MPEG2VIDEOINFO *)pmt->Format();
        extralen += 7;
        extra = (uint8_t *)av_mallocz(extralen + FF_INPUT_BUFFER_PADDING_SIZE);
        extra[0] = 1;
        extra[1] = (BYTE)mp2vi->dwProfile;
        extra[2] = 0;
        extra[3] = (BYTE)mp2vi->dwLevel;
        extra[4] = (BYTE)(mp2vi->dwFlags ? mp2vi->dwFlags : 4) - 1;

        // only process extradata if available
        uint8_t ps_count = 0;
        if (extralen > 7) {
            // Actually copy the metadata into our new buffer
            size_t actual_len;
            getExtraData(*pmt, extra + 6, &actual_len);

            // Count the number of SPS/PPS in them and set the length
            // We'll put them all into one block and add a second block with 0 elements afterwards
            // The parsing logic does not care what type they are, it just expects 2 blocks.
            BYTE *p = extra + 6, *end = extra + 6 + actual_len;
            BOOL bSPS = FALSE, bPPS = FALSE;
            while (p + 1 < end) {
                unsigned len = (((unsigned)p[0] << 8) | p[1]) + 2;
                if (p + len > end) {
                    break;
                }
                if ((p[2] & 0x1F) == 7)
                    bSPS = TRUE;
                if ((p[2] & 0x1F) == 8)
                    bPPS = TRUE;
                ps_count++;
                p += len;
            }
        }
        extra[5] = ps_count;
        extra[extralen - 1] = 0;

        bH264avc = TRUE;
        m_pAVCtx->extradata = extra;
        m_pAVCtx->extradata_size = (int)extralen;
    } else if (extralen > 0) {
        DbgLog((LOG_TRACE, 10, L"-> Processing extradata of %d bytes", extralen));
        if (pmt->subtype == MEDIASUBTYPE_LAV_RAWVIDEO) {
            if (extralen < sizeof(m_pAVCtx->pix_fmt)) {
                DbgLog((LOG_TRACE, 10, L"-> LAV RAW Video extradata is missing.."));
            } else {
                extra = (uint8_t *)av_mallocz(extralen + FF_INPUT_BUFFER_PADDING_SIZE);
                getExtraData(*pmt, extra, nullptr);
                m_pAVCtx->pix_fmt = *(AVPixelFormat *)extra;
                extralen -= sizeof(AVPixelFormat);
                memmove(extra, extra+sizeof(AVPixelFormat), extralen);
            }
        } else {
            // Just copy extradata for other formats
            extra = (uint8_t *)av_mallocz(extralen + FF_INPUT_BUFFER_PADDING_SIZE);
            getExtraData(*pmt, extra, nullptr);
        }
        // Hack to discard invalid MP4 metadata with AnnexB style video
        if (codec == AV_CODEC_ID_H264 && !bH264avc && extra && extra[0] == 1) {
            av_freep(&extra);
            extralen = 0;
        }
        m_pAVCtx->extradata = extra;
        m_pAVCtx->extradata_size = (int)extralen;
    } else {
        if (codec == AV_CODEC_ID_VP6 || codec == AV_CODEC_ID_VP6A || codec == AV_CODEC_ID_VP6F) {
            int cropH = pBMI->biWidth - biRealWidth;
            int cropV = pBMI->biHeight - biRealHeight;
            if (cropH >= 0 && cropH <= 0x0f && cropV >= 0 && cropV <= 0x0f) {
                m_pAVCtx->extradata = (uint8_t *)av_mallocz(1 + FF_INPUT_BUFFER_PADDING_SIZE);
                m_pAVCtx->extradata_size = 1;
                m_pAVCtx->extradata[0] = (cropH << 4) | cropV;
            }
        }
    }

    m_CurrentThread = 0;
    m_rtStartCache = AV_NOPTS_VALUE;

    LAVPinInfo lavPinInfo = {0};
    BOOL bLAVInfoValid = SUCCEEDED(m_pCallback->GetLAVPinInfo(lavPinInfo));

    m_bInputPadded = dwDecFlags & LAV_VIDEO_DEC_FLAG_LAVSPLITTER;

    // Setup codec-specific timing logic

    // MPEG-4 with VideoInfo/2 is from AVI, and only DTS
    if (codec == AV_CODEC_ID_MPEG4 && pmt->formattype != FORMAT_MPEG2Video)
        dwDecFlags |= LAV_VIDEO_DEC_FLAG_ONLY_DTS;

    // RealVideo is only DTS
    if (codec == AV_CODEC_ID_RV10 || codec == AV_CODEC_ID_RV20 || codec == AV_CODEC_ID_RV30 || codec == AV_CODEC_ID_RV40)
        dwDecFlags |= LAV_VIDEO_DEC_FLAG_ONLY_DTS;

    // Use ffmpegs logic to reorder timestamps
    // This is required for H264 content (except AVI), and generally all codecs that use frame threading
    m_bFFReordering        = !(dwDecFlags & LAV_VIDEO_DEC_FLAG_ONLY_DTS) && (
                                 (m_pAVCodec->capabilities & CODEC_CAP_FRAME_THREADS) // Covers most modern codecs, others listed above
                                 || codec == AV_CODEC_ID_MPEG2VIDEO
                                 || codec == AV_CODEC_ID_MPEG1VIDEO
                                 || codec == AV_CODEC_ID_DIRAC
                                 || codec == AV_CODEC_ID_VC1
                             );

    // Stop time is unreliable, drop it and calculate it
    m_bCalculateStopTime   = (codec == AV_CODEC_ID_H264 || codec == AV_CODEC_ID_DIRAC || (codec == AV_CODEC_ID_MPEG4 && pmt->formattype == FORMAT_MPEG2Video) || (codec == AV_CODEC_ID_VC1 && !(dwDecFlags & LAV_VIDEO_DEC_FLAG_ONLY_DTS)));

    // Real Video content has some odd timestamps
    // LAV Splitter does them allright with RV30/RV40, everything else screws them up
    m_bRVDropBFrameTimings = (codec == AV_CODEC_ID_RV10 || codec == AV_CODEC_ID_RV20 || ((codec == AV_CODEC_ID_RV30 || codec == AV_CODEC_ID_RV40) && (!(dwDecFlags & LAV_VIDEO_DEC_FLAG_LAVSPLITTER) || (bLAVInfoValid && (lavPinInfo.flags & LAV_STREAM_FLAG_RV34_MKV)))));

    // Enable B-Frame delay handling
    m_bBFrameDelay = !m_bFFReordering && !m_bRVDropBFrameTimings;

    m_bWaitingForKeyFrame = TRUE;
    m_bResumeAtKeyFrame =    codec == AV_CODEC_ID_MPEG2VIDEO
                             || codec == AV_CODEC_ID_VC1
                             || codec == AV_CODEC_ID_VC1IMAGE
                             || codec == AV_CODEC_ID_WMV3
                             || codec == AV_CODEC_ID_WMV3IMAGE
                             || codec == AV_CODEC_ID_RV30
                             || codec == AV_CODEC_ID_RV40
                             || codec == AV_CODEC_ID_VP3
                             || codec == AV_CODEC_ID_THEORA
                             || codec == AV_CODEC_ID_MPEG4;

    m_bHasPalette = m_pAVCtx->bits_per_coded_sample <= 8 && m_pAVCtx->extradata_size && !(dwDecFlags & LAV_VIDEO_DEC_FLAG_LAVSPLITTER)
                    &&  (codec == AV_CODEC_ID_MSVIDEO1
                         || codec == AV_CODEC_ID_MSRLE
                         || codec == AV_CODEC_ID_CINEPAK
                         || codec == AV_CODEC_ID_8BPS
                         || codec == AV_CODEC_ID_QPEG
                         || codec == AV_CODEC_ID_QTRLE
                         || codec == AV_CODEC_ID_TSCC);

    if (FAILED(AdditionaDecoderInit())) {
        return E_FAIL;
    }

    if (bLAVInfoValid) {
        // Setting has_b_frames to a proper value will ensure smoother decoding of H264
        if (lavPinInfo.has_b_frames >= 0) {
            DbgLog((LOG_TRACE, 10, L"-> Setting has_b_frames to %d", lavPinInfo.has_b_frames));
            m_pAVCtx->has_b_frames = lavPinInfo.has_b_frames;
        }
    }

    // Open the decoder
    m_bInInit = TRUE;
    int ret = avcodec_open2(m_pAVCtx, m_pAVCodec, nullptr);
    m_bInInit = FALSE;
    if (ret >= 0) {
        DbgLog((LOG_TRACE, 10, L"-> ffmpeg codec opened successfully (ret: %d)", ret));
        m_nCodecId = codec;
    } else {
        DbgLog((LOG_TRACE, 10, L"-> ffmpeg codec failed to open (ret: %d)", ret));
        DestroyDecoder();
        return VFW_E_UNSUPPORTED_VIDEO;
    }

    m_iInterlaced = 0;
    for (int i = 0; i < countof(ff_interlace_capable); i++) {
        if (codec == ff_interlace_capable[i]) {
            m_iInterlaced = -1;
            break;
        }
    }

    // Detect chroma and interlaced
    if (m_pAVCtx->extradata && m_pAVCtx->extradata_size) {
        if (codec == AV_CODEC_ID_MPEG2VIDEO) {
            CMPEG2HeaderParser mpeg2Parser(extra, extralen);
            if (mpeg2Parser.hdr.valid) {
                if (mpeg2Parser.hdr.chroma < 2) {
                    m_pAVCtx->pix_fmt = AV_PIX_FMT_YUV420P;
                } else if (mpeg2Parser.hdr.chroma == 2) {
                    m_pAVCtx->pix_fmt = AV_PIX_FMT_YUV422P;
                }
                m_iInterlaced = mpeg2Parser.hdr.interlaced;
            }
        } else if (codec == AV_CODEC_ID_H264) {
            CH264SequenceParser h264parser;
            if (bH264avc)
                h264parser.ParseNALs(extra+6, extralen-6, 2);
            else
                h264parser.ParseNALs(extra, extralen, 0);
            if (h264parser.sps.valid)
                m_iInterlaced = h264parser.sps.interlaced;
        } else if (codec == AV_CODEC_ID_VC1) {
            CVC1HeaderParser vc1parser(extra, extralen);
            if (vc1parser.hdr.valid)
                m_iInterlaced = (vc1parser.hdr.interlaced ? -1 : 0);
        }
    }

    if (codec == AV_CODEC_ID_DNXHD)
        m_pAVCtx->pix_fmt = AV_PIX_FMT_YUV422P10;
    else if (codec == AV_CODEC_ID_FRAPS)
        m_pAVCtx->pix_fmt = AV_PIX_FMT_BGR24;

    if (bLAVInfoValid && codec != AV_CODEC_ID_FRAPS && m_pAVCtx->pix_fmt != AV_PIX_FMT_DXVA2_VLD)
        m_pAVCtx->pix_fmt = lavPinInfo.pix_fmt;

    DbgLog((LOG_TRACE, 10, L"AVCodec init successfull. interlaced: %d", m_iInterlaced));

    return S_OK;
}
コード例 #4
0
ファイル: rtpdec_mpeg4.c プロジェクト: jpcottin/FFmpeg
static void close_context(PayloadContext *data)
{
    av_freep(&data->au_headers);
    av_freep(&data->mode);
}
コード例 #5
0
int main(int argc, char *argv[]) {
    AVFormatContext *pFormatCtx = NULL;
    int             i, videoStream;
    AVCodecContext  *pCodecCtx = NULL;
    AVCodecParameters       *pCodecParam = NULL;
    AVCodec         *pCodec = NULL;
    AVFrame         *pFrame = NULL;
    AVPacket        packet;
    int             send_packet, receive_frame;
    //float           aspect_ratio;
    AVFrame        *pict;
    /*
    std::unique_ptr<AVFrame, std::function<void(AVFrame*)>> frame_converted{
        av_frame_alloc(),
        [](AVFrame* f){ av_free(f->data[0]); } };
    if (av_frame_copy_props(frame_converted.get(),
        frame_decoded.get()) < 0) {
        throw std::runtime_error("Copying frame properties");
    }
    if (av_image_alloc(
        frame_converted->data, frame_converted->linesize,
        video_decoder_->width(), video_decoder_->height(),
        video_decoder_->pixel_format(), 1) < 0) {
        throw std::runtime_error("Allocating picture");
    }
    */
    AVDictionary    *optionsDict = NULL;
    struct SwsContext *sws_ctx = NULL;

    SDL_Texture*    pTexture = nullptr;
    SDL_Window*     pWindows = nullptr;
    SDL_Renderer*   pRenderer = nullptr;

    SDL_Event       event;

    if (argc < 2) {
        fprintf(stderr, "Usage: test <file>\n");
        exit(1);
    }
    // Register all formats and codecs
    av_register_all();

    if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
        exit(1);
    }

    // Open video file
    if (avformat_open_input(&pFormatCtx, argv[1], NULL, NULL) != 0)
        return -1; // Couldn't open file

    // Retrieve stream information
    if (avformat_find_stream_info(pFormatCtx, NULL)<0)
        return -1; // Couldn't find stream information

    // Dump information about file onto standard error
    av_dump_format(pFormatCtx, 0, argv[1], 0);

    // Find the first video stream
    videoStream = -1;
    for (i = 0; i<pFormatCtx->nb_streams; i++)
        if (pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
            videoStream = i;
            break;
        }
    if (videoStream == -1)
        return -1; // Didn't find a video stream

    // Get a pointer to the codec context for the video stream
    //AVCodecContext *codec is deprecated,so use the codecpar struct (AVCodecParameters) instead.
    pCodecParam = pFormatCtx->streams[videoStream]->codecpar;
    //but function avcodec_open2() need pCodecCtx,so copy  (AVCodecParameters) pCodecParam to (AVCodecContext) pCodecCtx
    pCodec = avcodec_find_decoder(pCodecParam->codec_id);
    // Find the decoder for the video stream
    if (pCodec == NULL) {
        fprintf(stderr, "Unsupported codec!\n");
        return -1; // Codec not found
    }
    pCodecCtx = avcodec_alloc_context3(pCodec);
    avcodec_parameters_to_context(pCodecCtx, pCodecParam);

    // Open codec
    if (avcodec_open2(pCodecCtx, pCodec, &optionsDict)<0)
        return -1; // Could not open codec

    // Allocate video frame
    pFrame = av_frame_alloc();

    // Make a screen to put our video
#ifndef __DARWIN__
    pWindows = SDL_CreateWindow(argv[1],SDL_WINDOWPOS_CENTERED,SDL_WINDOWPOS_CENTERED,pCodecParam->width, pCodecParam->height,SDL_WINDOW_BORDERLESS|SDL_WINDOW_RESIZABLE);
#else
    screen = SDL_SetVideoMode(pCodecParam->width, pCodecParam->height, 24, 0);
#endif
    if (!pWindows) {
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
        exit(1);
    }
    
    // Allocate a place to put our YUV image on that screen
    pRenderer = SDL_CreateRenderer(pWindows, -1, 0);
    if (!pRenderer) {
        fprintf(stderr, "SDL: could not create renderer - exiting\n");
        exit(1);
    }
    pTexture = SDL_CreateTexture(pRenderer, SDL_PIXELFORMAT_YV12, SDL_TEXTUREACCESS_STREAMING, pCodecParam->width, pCodecParam->height);
    sws_ctx =
        sws_getContext
        (
        pCodecParam->width,
        pCodecParam->height,
        (AVPixelFormat)pCodecParam->format,
        pCodecParam->width,
        pCodecParam->height,
        AV_PIX_FMT_YUV420P,
        SWS_BILINEAR,
        NULL,
        NULL,
        NULL
        );
    pict = av_frame_alloc();
    if (pict == nullptr){
        exit(1);
    }
    if (av_image_alloc(pict->data, pict->linesize,
        pCodecParam->width, pCodecParam->height,
        (AVPixelFormat)pCodecParam->format, 1) < 0){
        exit(1);
    }


    // Read frames and save first five frames to disk
    i = 0;
    while (av_read_frame(pFormatCtx, &packet) >= 0) {
        // Is this a packet from the video stream?
        if (packet.stream_index == videoStream) {
            // Decode video frame
            //avcodec_decode_video2 is deprecated Use avcodec_send_packet() and avcodec_receive_frame().
            send_packet = avcodec_send_packet(pCodecCtx, &packet);
            receive_frame = avcodec_receive_frame(pCodecCtx, pFrame);

            // Did we get a video frame?
            if (send_packet == SEND_PACKET_SUCCESS && receive_frame == RECEIVE_FRAME_SUCCESS) {
                //SDL_LockYUVOverlay(bmp);
                //SDL_LockTexture(pTexture,NULL,);
                // Convert the image into YUV format that SDL uses
                if (av_frame_copy_props(pFrame,
                    pict) < 0) {
                    exit(1);
                }

                sws_scale
                    (
                    sws_ctx,
                    pFrame->data,
                    pFrame->linesize,
                    0,
                    pCodecParam->height,
                    pict->data,
                    pict->linesize
                    );
                
                //SDL_UnlockYUVOverlay(bmp);
                SDL_UpdateYUVTexture(pTexture, NULL, pict->data[0], pict->linesize[0], pict->data[1], pict->linesize[1], pict->data[2], pict->linesize[2]);
                SDL_RenderCopy(pRenderer, pTexture, NULL, NULL);
                SDL_RenderPresent(pRenderer);

            }
        }

        // Free the packet that was allocated by av_read_frame
        av_packet_unref(&packet);
        SDL_PollEvent(&event);
        switch (event.type) {
        case SDL_QUIT:
            SDL_DestroyRenderer(pRenderer);
            SDL_DestroyTexture(pTexture);
            SDL_DestroyWindow(pWindows);
            SDL_Quit();
            exit(0);
            break;
        default:
            break;
        }

    }

    // Free the YUV frame
    av_frame_free(&pFrame);
    //free pict
    av_freep(&pict->data[0]);
    av_frame_free(&pict);

    // Close the codec
    avcodec_close(pCodecCtx);

    // Close the video file
    avformat_close_input(&pFormatCtx);

    return 0;
}
コード例 #6
0
ファイル: ac3enc_float.c プロジェクト: upsilon/libav
/**
 * Finalize MDCT and free allocated memory.
 */
static av_cold void mdct_end(AC3MDCTContext *mdct)
{
    ff_mdct_end(&mdct->fft);
    av_freep(&mdct->window);
}
コード例 #7
0
ファイル: resample2.c プロジェクト: c0ntrol/veejay
void vj_av_resample_close(void *ptr){
	AVResampleContext *c = (AVResampleContext*) ptr;
    av_freep(&c->filter_bank);
    av_freep(&c);
}
コード例 #8
0
AUD_FFMPEGWriter::AUD_FFMPEGWriter(std::string filename, AUD_DeviceSpecs specs, AUD_Container format, AUD_Codec codec, unsigned int bitrate) :
	m_position(0),
	m_specs(specs),
	m_input_samples(0)
{
	static const char* formats[] = { NULL, "ac3", "flac", "matroska", "mp2", "mp3", "ogg", "wav" };

	m_formatCtx = avformat_alloc_context();
	if (!m_formatCtx) AUD_THROW(AUD_ERROR_FFMPEG, context_error);

	strcpy(m_formatCtx->filename, filename.c_str());
	m_outputFmt = m_formatCtx->oformat = av_guess_format(formats[format], filename.c_str(), NULL);
	if (!m_outputFmt) {
		avformat_free_context(m_formatCtx);
		AUD_THROW(AUD_ERROR_FFMPEG, context_error);
	}

	switch(codec)
	{
	case AUD_CODEC_AAC:
		m_outputFmt->audio_codec = AV_CODEC_ID_AAC;
		break;
	case AUD_CODEC_AC3:
		m_outputFmt->audio_codec = AV_CODEC_ID_AC3;
		break;
	case AUD_CODEC_FLAC:
		m_outputFmt->audio_codec = AV_CODEC_ID_FLAC;
		break;
	case AUD_CODEC_MP2:
		m_outputFmt->audio_codec = AV_CODEC_ID_MP2;
		break;
	case AUD_CODEC_MP3:
		m_outputFmt->audio_codec = AV_CODEC_ID_MP3;
		break;
	case AUD_CODEC_PCM:
		switch(specs.format)
		{
		case AUD_FORMAT_U8:
			m_outputFmt->audio_codec = AV_CODEC_ID_PCM_U8;
			break;
		case AUD_FORMAT_S16:
			m_outputFmt->audio_codec = AV_CODEC_ID_PCM_S16LE;
			break;
		case AUD_FORMAT_S24:
			m_outputFmt->audio_codec = AV_CODEC_ID_PCM_S24LE;
			break;
		case AUD_FORMAT_S32:
			m_outputFmt->audio_codec = AV_CODEC_ID_PCM_S32LE;
			break;
		case AUD_FORMAT_FLOAT32:
			m_outputFmt->audio_codec = AV_CODEC_ID_PCM_F32LE;
			break;
		case AUD_FORMAT_FLOAT64:
			m_outputFmt->audio_codec = AV_CODEC_ID_PCM_F64LE;
			break;
		default:
			m_outputFmt->audio_codec = AV_CODEC_ID_NONE;
			break;
		}
		break;
	case AUD_CODEC_VORBIS:
		m_outputFmt->audio_codec = AV_CODEC_ID_VORBIS;
		break;
	default:
		m_outputFmt->audio_codec = AV_CODEC_ID_NONE;
		break;
	}

	try
	{
		if(m_outputFmt->audio_codec == AV_CODEC_ID_NONE)
			AUD_THROW(AUD_ERROR_SPECS, codec_error);

		m_stream = avformat_new_stream(m_formatCtx, NULL);
		if(!m_stream)
			AUD_THROW(AUD_ERROR_FFMPEG, stream_error);

		m_codecCtx = m_stream->codec;
		m_codecCtx->codec_id = m_outputFmt->audio_codec;
		m_codecCtx->codec_type = AVMEDIA_TYPE_AUDIO;
		m_codecCtx->bit_rate = bitrate;
		m_codecCtx->sample_rate = int(m_specs.rate);
		m_codecCtx->channels = m_specs.channels;
		m_codecCtx->time_base.num = 1;
		m_codecCtx->time_base.den = m_codecCtx->sample_rate;

		switch(m_specs.format)
		{
		case AUD_FORMAT_U8:
			m_convert = AUD_convert_float_u8;
			m_codecCtx->sample_fmt = AV_SAMPLE_FMT_U8;
			break;
		case AUD_FORMAT_S16:
			m_convert = AUD_convert_float_s16;
			m_codecCtx->sample_fmt = AV_SAMPLE_FMT_S16;
			break;
		case AUD_FORMAT_S32:
			m_convert = AUD_convert_float_s32;
			m_codecCtx->sample_fmt = AV_SAMPLE_FMT_S32;
			break;
		case AUD_FORMAT_FLOAT32:
			m_convert = AUD_convert_copy<float>;
			m_codecCtx->sample_fmt = AV_SAMPLE_FMT_FLT;
			break;
		case AUD_FORMAT_FLOAT64:
			m_convert = AUD_convert_float_double;
			m_codecCtx->sample_fmt = AV_SAMPLE_FMT_DBL;
			break;
		default:
			AUD_THROW(AUD_ERROR_FFMPEG, format_error);
		}

		try
		{
			if(m_formatCtx->oformat->flags & AVFMT_GLOBALHEADER)
				m_codecCtx->flags |= CODEC_FLAG_GLOBAL_HEADER;

			AVCodec* codec = avcodec_find_encoder(m_codecCtx->codec_id);
			if(!codec)
				AUD_THROW(AUD_ERROR_FFMPEG, codec_error);

			if(codec->sample_fmts) {
				// Check if the prefered sample format for this codec is supported.
				const enum AVSampleFormat *p = codec->sample_fmts;
				for(; *p != -1; p++) {
					if(*p == m_stream->codec->sample_fmt)
						break;
				}
				if(*p == -1) {
					// Sample format incompatible with codec. Defaulting to a format known to work.
					m_stream->codec->sample_fmt = codec->sample_fmts[0];
				}
			}

			if(avcodec_open2(m_codecCtx, codec, NULL))
				AUD_THROW(AUD_ERROR_FFMPEG, codec_error);

			m_output_buffer.resize(FF_MIN_BUFFER_SIZE);
			int samplesize = AUD_MAX(AUD_SAMPLE_SIZE(m_specs), AUD_DEVICE_SAMPLE_SIZE(m_specs));

			if(m_codecCtx->frame_size <= 1) {
				m_input_size = FF_MIN_BUFFER_SIZE * 8 / m_codecCtx->bits_per_coded_sample / m_codecCtx->channels;
				m_input_buffer.resize(m_input_size * samplesize);
			}
			else
			{
				m_input_buffer.resize(m_codecCtx->frame_size * samplesize);
				m_input_size = m_codecCtx->frame_size;
			}

#ifdef FFMPEG_HAVE_ENCODE_AUDIO2
			m_frame = av_frame_alloc();
			if (!m_frame)
				AUD_THROW(AUD_ERROR_FFMPEG, codec_error);
			avcodec_get_frame_defaults(m_frame);
			m_frame->linesize[0]    = m_input_size * samplesize;
			m_frame->format         = m_codecCtx->sample_fmt;
			m_frame->nb_samples     = m_input_size;
#  ifdef FFMPEG_HAVE_AVFRAME_SAMPLE_RATE
			m_frame->sample_rate    = m_codecCtx->sample_rate;
#  endif
#  ifdef FFMPEG_HAVE_FRAME_CHANNEL_LAYOUT
			m_frame->channel_layout = m_codecCtx->channel_layout;
#  endif
			m_sample_size = av_get_bytes_per_sample(m_codecCtx->sample_fmt);
			m_frame_pts = 0;
			m_deinterleave = av_sample_fmt_is_planar(m_codecCtx->sample_fmt);
			if(m_deinterleave)
				m_deinterleave_buffer.resize(m_input_size * m_codecCtx->channels * m_sample_size);
#endif

			try
			{
				if(avio_open(&m_formatCtx->pb, filename.c_str(), AVIO_FLAG_WRITE))
					AUD_THROW(AUD_ERROR_FILE, file_error);

				avformat_write_header(m_formatCtx, NULL);
			}
			catch(AUD_Exception&)
			{
				avcodec_close(m_codecCtx);
				av_freep(&m_formatCtx->streams[0]->codec);
				throw;
			}
		}
		catch(AUD_Exception&)
		{
			av_freep(&m_formatCtx->streams[0]);
			throw;
		}
	}
	catch(AUD_Exception&)
	{
		av_free(m_formatCtx);
		throw;
	}
}
コード例 #9
0
int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src)
{
    const AVCodec *orig_codec = dest->codec;
    uint8_t *orig_priv_data = dest->priv_data;

    if (avcodec_is_open(dest)) { // check that the dest context is uninitialized
        av_log(dest, AV_LOG_ERROR,
               "Tried to copy AVCodecContext %p into already-initialized %p\n",
               src, dest);
        return AVERROR(EINVAL);
    }

    av_opt_free(dest);

    memcpy(dest, src, sizeof(*dest));

    dest->priv_data       = orig_priv_data;

    if (orig_priv_data)
        av_opt_copy(orig_priv_data, src->priv_data);

    dest->codec           = orig_codec;

    /* set values specific to opened codecs back to their default state */
    dest->slice_offset    = NULL;
    dest->hwaccel         = NULL;
    dest->internal        = NULL;

    /* reallocate values that should be allocated separately */
    dest->rc_eq           = NULL;
    dest->extradata       = NULL;
    dest->intra_matrix    = NULL;
    dest->inter_matrix    = NULL;
    dest->rc_override     = NULL;
    dest->subtitle_header = NULL;
    if (src->rc_eq) {
        dest->rc_eq = av_strdup(src->rc_eq);
        if (!dest->rc_eq)
            return AVERROR(ENOMEM);
    }

#define alloc_and_copy_or_fail(obj, size, pad) \
    if (src->obj && size > 0) { \
        dest->obj = av_malloc(size + pad); \
        if (!dest->obj) \
            goto fail; \
        memcpy(dest->obj, src->obj, size); \
        if (pad) \
            memset(((uint8_t *) dest->obj) + size, 0, pad); \
    }
    alloc_and_copy_or_fail(extradata,    src->extradata_size,
                           FF_INPUT_BUFFER_PADDING_SIZE);
    alloc_and_copy_or_fail(intra_matrix, 64 * sizeof(int16_t), 0);
    alloc_and_copy_or_fail(inter_matrix, 64 * sizeof(int16_t), 0);
    alloc_and_copy_or_fail(rc_override,  src->rc_override_count * sizeof(*src->rc_override), 0);
    alloc_and_copy_or_fail(subtitle_header, src->subtitle_header_size, 1);
    av_assert0(dest->subtitle_header_size == src->subtitle_header_size);
#undef alloc_and_copy_or_fail

    return 0;

fail:
    av_freep(&dest->rc_override);
    av_freep(&dest->intra_matrix);
    av_freep(&dest->inter_matrix);
    av_freep(&dest->extradata);
    av_freep(&dest->rc_eq);
    return AVERROR(ENOMEM);
}
コード例 #10
0
/**
 * Finalize MDCT and free allocated memory.
 *
 * @param s  AC-3 encoder private context
 */
av_cold void ff_ac3_float_mdct_end(AC3EncodeContext *s)
{
    ff_mdct_end(&s->mdct);
    av_freep(&s->mdct_window);
}
コード例 #11
0
static int handle_connection(RTSPContext *c)
{
    if (c->poll_entry == NULL) {
      //printf("c -> poll entry is null!! \n");
      return 0;
    }
    int len, ret;

    switch(c->state) {
    case HTTPSTATE_READY:
      //printf("http state ready \n");
      break;
    case RTSPSTATE_WAIT_REQUEST:
        /* timeout ? */
        if ((c->timeout - cur_time) < 0)
            return -1;
        if (c->poll_entry->revents & (POLLERR | POLLHUP))
            return -1;

        /* no need to read if no events */
        if (!(c->poll_entry->revents & POLLIN))
            return 0;
        /* read the data */
    read_loop:
        len = recv(c->fd, c->buffer_ptr, 1, 0);
        if (len < 0) {
          printf("recv error\n");
          return -1;
        } else if (len == 0) {
            return -1;
        } else {
          /* search for end of request. */
          uint8_t *ptr;
          c->buffer_ptr += len;
          ptr = c->buffer_ptr;
          if ((ptr >= c->buffer + 2 && !memcmp(ptr-2, "\n\n", 2)) ||
            (ptr >= c->buffer + 4 && !memcmp(ptr-4, "\r\n\r\n", 4))) {
            /* request found : parse it and reply */
            ret = rtsp_parse_request(c);
            if (ret < 0)
              return -1;
          } else if (ptr >= c->buffer_end) {
              /* request too long: cannot do anything */
            printf("request too long!! \n");
              return -1;
          } else goto read_loop;
        }
        break;

    case RTPSTATE_SEND_DATA:
        /* for packetized output, we consider we can always write (the
           input streams set the speed). It may be better to verify
           that we do not rely too much on the kernel queues */
        if (c->poll_entry->revents & (POLLERR | POLLHUP))
            return -1;

        /* no need to read if no events */
        if (!(c->poll_entry->revents & POLLOUT))
            return 0;

        if (rtp_send_data(c) < 0)
            return -1;
        break;

    case RTSPSTATE_SEND_REPLY:
        //printf("rtspstate send reply \n");
        if (c->poll_entry->revents & (POLLERR | POLLHUP))
            goto close_connection;
        /* no need to write if no events */
        if (!(c->poll_entry->revents & POLLOUT))
            return 0;
        printf("send! reply \n");
        len = send(c->fd, c->buffer_ptr, c->buffer_end - c->buffer_ptr, 0);
        if (len < 0) {
            printf("send error TT! \n");
            goto close_connection;
        } else {
            printf("send reply success!! len = %d \n", len);
            c->buffer_ptr += len;
            if (c->buffer_ptr >= c->buffer_end) {
                /* all the buffer was sent : wait for a new request */
                av_freep(&c->pb_buffer);
                start_wait_request(c);
            }
        }
        break;
    case RTSPSTATE_SEND_PACKET:
        printf("rtsp send packet! \n");
        if (c->poll_entry->revents & (POLLERR | POLLHUP)) {
            av_freep(&c->pb_buffer);
            return -1;
        }
        /* no need to write if no events */
        if (!(c->poll_entry->revents & POLLOUT))
            return 0;
        len = send(c->fd, c->buffer_ptr,
                    c->buffer_end - c->buffer_ptr, 0);
        if (len < 0) {
          /* error : close connection */
          av_freep(&c->pb_buffer);
          return -1;
        } else {
            c->buffer_ptr += len;
            if (c->buffer_ptr >= c->buffer_end) {
                // all the buffer was sent : wait for a new request
                av_freep(&c->buffer);
                c->state = RTSPSTATE_WAIT_REQUEST;
            }
        }
        break;
    default:
        return -1;
    }
    return 0;

close_connection:
    printf("close connection!\n");
    av_freep(&c->pb_buffer);
    return -1;
}
コード例 #12
0
ファイル: rematrix.c プロジェクト: 0day-ci/FFmpeg
av_cold void swri_rematrix_free(SwrContext *s){
    av_freep(&s->native_matrix);
    av_freep(&s->native_one);
    av_freep(&s->native_simd_matrix);
    av_freep(&s->native_simd_one);
}
コード例 #13
0
ファイル: libopenjpegenc.c プロジェクト: BlackOmega/FFmpeg
static av_cold int libopenjpeg_encode_init(AVCodecContext *avctx)
{
    LibOpenJPEGContext *ctx = avctx->priv_data;
    int err = AVERROR(ENOMEM);

    opj_set_default_encoder_parameters(&ctx->enc_params);

    ctx->enc_params.cp_rsiz = ctx->profile;
    ctx->enc_params.mode = !!avctx->global_quality;
    ctx->enc_params.cp_cinema = ctx->cinema_mode;
    ctx->enc_params.prog_order = ctx->prog_order;
    ctx->enc_params.numresolution = ctx->numresolution;
    ctx->enc_params.cp_disto_alloc = ctx->disto_alloc;
    ctx->enc_params.cp_fixed_alloc = ctx->fixed_alloc;
    ctx->enc_params.cp_fixed_quality = ctx->fixed_quality;
    ctx->enc_params.tcp_numlayers = ctx->numlayers;
    ctx->enc_params.tcp_rates[0] = FFMAX(avctx->compression_level, 0) * 2;

    if (ctx->cinema_mode > 0) {
        ctx->enc_params.irreversible = 1;
        ctx->enc_params.tcp_mct = 1;
        ctx->enc_params.tile_size_on = 0;
        /* no subsampling */
        ctx->enc_params.cp_tdx=1;
        ctx->enc_params.cp_tdy=1;
        ctx->enc_params.subsampling_dx = 1;
        ctx->enc_params.subsampling_dy = 1;
        /* Tile and Image shall be at (0,0) */
        ctx->enc_params.cp_tx0 = 0;
        ctx->enc_params.cp_ty0 = 0;
        ctx->enc_params.image_offset_x0 = 0;
        ctx->enc_params.image_offset_y0 = 0;
        /* Codeblock size= 32*32 */
        ctx->enc_params.cblockw_init = 32;
        ctx->enc_params.cblockh_init = 32;
        ctx->enc_params.csty |= 0x01;
        /* No ROI */
        ctx->enc_params.roi_compno = -1;

        if (ctx->enc_params.prog_order != CPRL) {
            av_log(avctx, AV_LOG_ERROR, "prog_order forced to CPRL\n");
            ctx->enc_params.prog_order = CPRL;
        }
        ctx->enc_params.tp_flag = 'C';
        ctx->enc_params.tp_on = 1;
    }

    ctx->compress = opj_create_compress(ctx->format);
    if (!ctx->compress) {
        av_log(avctx, AV_LOG_ERROR, "Error creating the compressor\n");
        return AVERROR(ENOMEM);
    }

    ctx->image = mj2_create_image(avctx, &ctx->enc_params);
    if (!ctx->image) {
        av_log(avctx, AV_LOG_ERROR, "Error creating the mj2 image\n");
        err = AVERROR(EINVAL);
        goto fail;
    }
    opj_setup_encoder(ctx->compress, &ctx->enc_params, ctx->image);

    ctx->stream = opj_cio_open((opj_common_ptr) ctx->compress, NULL, 0);
    if (!ctx->stream) {
        av_log(avctx, AV_LOG_ERROR, "Error creating the cio stream\n");
        err = AVERROR(ENOMEM);
        goto fail;
    }

    avctx->coded_frame = av_frame_alloc();
    if (!avctx->coded_frame) {
        av_log(avctx, AV_LOG_ERROR, "Error allocating coded frame\n");
        goto fail;
    }

    memset(&ctx->event_mgr, 0, sizeof(opj_event_mgr_t));
    ctx->event_mgr.info_handler    = info_callback;
    ctx->event_mgr.error_handler = error_callback;
    ctx->event_mgr.warning_handler = warning_callback;
    opj_set_event_mgr((opj_common_ptr) ctx->compress, &ctx->event_mgr, avctx);

    return 0;

fail:
    opj_cio_close(ctx->stream);
    ctx->stream = NULL;
    opj_destroy_compress(ctx->compress);
    ctx->compress = NULL;
    opj_image_destroy(ctx->image);
    ctx->image = NULL;
    av_freep(&avctx->coded_frame);
    return err;
}
コード例 #14
0
ファイル: opt.c プロジェクト: Anistor-info/project1
int av_set_string3(void *obj, const char *name, const char *val, int alloc, const AVOption **o_out){
    int ret;
    const AVOption *o= av_find_opt(obj, name, NULL, 0, 0);
    if (o_out)
        *o_out = o;
    if(!o)
        return AVERROR(ENOENT);
    if(!val || o->offset<=0)
        return AVERROR(EINVAL);

    if(o->type == FF_OPT_TYPE_BINARY){
        uint8_t **dst = (uint8_t **)(((uint8_t*)obj) + o->offset);
        int *lendst = (int *)(dst + 1);
        uint8_t *bin, *ptr;
        int len = strlen(val);
        av_freep(dst);
        *lendst = 0;
        if (len & 1) return AVERROR(EINVAL);
        len /= 2;
        ptr = bin = av_malloc(len);
        while (*val) {
            int a = hexchar2int(*val++);
            int b = hexchar2int(*val++);
            if (a < 0 || b < 0) {
                av_free(bin);
                return AVERROR(EINVAL);
            }
            *ptr++ = (a << 4) | b;
        }
        *dst = bin;
        *lendst = len;
        return 0;
    }
    if(o->type != FF_OPT_TYPE_STRING){
        int notfirst=0;
        for(;;){
            int i;
            char buf[256];
            int cmd=0;
            double d;
            const char *error = NULL;

            if(*val == '+' || *val == '-')
                cmd= *(val++);

            for(i=0; i<sizeof(buf)-1 && val[i] && val[i]!='+' && val[i]!='-'; i++)
                buf[i]= val[i];
            buf[i]=0;

            d = ff_eval2(buf, const_values, const_names, NULL, NULL, NULL, NULL, NULL, &error);
            if(isnan(d)) {
                const AVOption *o_named= av_find_opt(obj, buf, o->unit, 0, 0);
                if(o_named && o_named->type == FF_OPT_TYPE_CONST)
                    d= o_named->default_val;
                else if(!strcmp(buf, "default")) d= o->default_val;
                else if(!strcmp(buf, "max"    )) d= o->max;
                else if(!strcmp(buf, "min"    )) d= o->min;
                else if(!strcmp(buf, "none"   )) d= 0;
                else if(!strcmp(buf, "all"    )) d= ~0;
                else {
                    if (error)
                        av_log(NULL, AV_LOG_ERROR, "Unable to parse option value \"%s\": %s\n", val, error);
                    return AVERROR(EINVAL);
                }
            }
            if(o->type == FF_OPT_TYPE_FLAGS){
                if     (cmd=='+') d= av_get_int(obj, name, NULL) | (int64_t)d;
                else if(cmd=='-') d= av_get_int(obj, name, NULL) &~(int64_t)d;
            }else{
                if     (cmd=='+') d= notfirst*av_get_double(obj, name, NULL) + d;
                else if(cmd=='-') d= notfirst*av_get_double(obj, name, NULL) - d;
            }

            if ((ret = av_set_number2(obj, name, d, 1, 1, o_out)) < 0)
                return ret;
            val+= i;
            if(!*val)
                return 0;
            notfirst=1;
        }
        return AVERROR(EINVAL);
    }

    if(alloc){
        av_free(*(void**)(((uint8_t*)obj) + o->offset));
        val= av_strdup(val);
    }

    memcpy(((uint8_t*)obj) + o->offset, &val, sizeof(val));
    return 0;
}
コード例 #15
0
ファイル: resampling_audio.c プロジェクト: CoerSuer/FFmpeg
int main(int argc, char **argv)
{
    int64_t src_ch_layout = AV_CH_LAYOUT_STEREO, dst_ch_layout = AV_CH_LAYOUT_SURROUND;
    int src_rate = 48000, dst_rate = 44100;
    uint8_t **src_data = NULL, **dst_data = NULL;
    int src_nb_channels = 0, dst_nb_channels = 0;
    int src_linesize, dst_linesize;
    int src_nb_samples = 1024, dst_nb_samples, max_dst_nb_samples;
    enum AVSampleFormat src_sample_fmt = AV_SAMPLE_FMT_DBL, dst_sample_fmt = AV_SAMPLE_FMT_S16;
    const char *dst_filename = NULL;
    FILE *dst_file;
    int dst_bufsize;
    const char *fmt;
    struct SwrContext *swr_ctx;
    double t;
    int ret;

    if (argc != 2) {
        fprintf(stderr, "Usage: %s output_file\n"
                "API example program to show how to resample an audio stream with libswresample.\n"
                "This program generates a series of audio frames, resamples them to a specified "
                "output format and rate and saves them to an output file named output_file.\n",
            argv[0]);
        exit(1);
    }
    dst_filename = argv[1];

    dst_file = fopen(dst_filename, "wb");
    if (!dst_file) {
        fprintf(stderr, "Could not open destination file %s\n", dst_filename);
        exit(1);
    }

    /* create resampler context */
    swr_ctx = swr_alloc();
    if (!swr_ctx) {
        fprintf(stderr, "Could not allocate resampler context\n");
        ret = AVERROR(ENOMEM);
        goto end;
    }

    /* set options */
    av_opt_set_int(swr_ctx, "in_channel_layout",    src_ch_layout, 0);
    av_opt_set_int(swr_ctx, "in_sample_rate",       src_rate, 0);
    av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", src_sample_fmt, 0);

    av_opt_set_int(swr_ctx, "out_channel_layout",    dst_ch_layout, 0);
    av_opt_set_int(swr_ctx, "out_sample_rate",       dst_rate, 0);
    av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", dst_sample_fmt, 0);

    /* initialize the resampling context */
    if ((ret = swr_init(swr_ctx)) < 0) {
        fprintf(stderr, "Failed to initialize the resampling context\n");
        goto end;
    }

    /* allocate source and destination samples buffers */

    src_nb_channels = av_get_channel_layout_nb_channels(src_ch_layout);
    ret = av_samples_alloc_array_and_samples(&src_data, &src_linesize, src_nb_channels,
                                             src_nb_samples, src_sample_fmt, 0);
    if (ret < 0) {
        fprintf(stderr, "Could not allocate source samples\n");
        goto end;
    }

    /* compute the number of converted samples: buffering is avoided
     * ensuring that the output buffer will contain at least all the
     * converted input samples */
    max_dst_nb_samples = dst_nb_samples =
        av_rescale_rnd(src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);

    /* buffer is going to be directly written to a rawaudio file, no alignment */
    dst_nb_channels = av_get_channel_layout_nb_channels(dst_ch_layout);
    ret = av_samples_alloc_array_and_samples(&dst_data, &dst_linesize, dst_nb_channels,
                                             dst_nb_samples, dst_sample_fmt, 0);
    if (ret < 0) {
        fprintf(stderr, "Could not allocate destination samples\n");
        goto end;
    }

    t = 0;
    do {
        /* generate synthetic audio */
        fill_samples((double *)src_data[0], src_nb_samples, src_nb_channels, src_rate, &t);

        /* compute destination number of samples */
        dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, src_rate) +
                                        src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
        if (dst_nb_samples > max_dst_nb_samples) {
            av_free(dst_data[0]);
            ret = av_samples_alloc(dst_data, &dst_linesize, dst_nb_channels,
                                   dst_nb_samples, dst_sample_fmt, 1);
            if (ret < 0)
                break;
            max_dst_nb_samples = dst_nb_samples;
        }

        /* convert to destination format */
        ret = swr_convert(swr_ctx, dst_data, dst_nb_samples, (const uint8_t **)src_data, src_nb_samples);
        if (ret < 0) {
            fprintf(stderr, "Error while converting\n");
            goto end;
        }
        dst_bufsize = av_samples_get_buffer_size(&dst_linesize, dst_nb_channels,
                                                 ret, dst_sample_fmt, 1);
        if (dst_bufsize < 0) {
            fprintf(stderr, "Could not get sample buffer size\n");
            goto end;
        }
        printf("t:%f in:%d out:%d\n", t, src_nb_samples, ret);
        fwrite(dst_data[0], 1, dst_bufsize, dst_file);
    } while (t < 10);

    if ((ret = get_format_from_sample_fmt(&fmt, dst_sample_fmt)) < 0)
        goto end;
    fprintf(stderr, "Resampling succeeded. Play the output file with the command:\n"
            "ffplay -f %s -channel_layout %"PRId64" -channels %d -ar %d %s\n",
            fmt, dst_ch_layout, dst_nb_channels, dst_rate, dst_filename);

end:
    if (dst_file)
        fclose(dst_file);

    if (src_data)
        av_freep(&src_data[0]);
    av_freep(&src_data);

    if (dst_data)
        av_freep(&dst_data[0]);
    av_freep(&dst_data);

    swr_free(&swr_ctx);
    return ret < 0;
}
コード例 #16
0
static int cmf_parser_next_slice(AVFormatContext *s, int *index, int first)
{
    int i, j, ret=-1;
    struct cmf *bs = s->priv_data;
    struct cmfvpb *oldvpb = bs->cmfvpb;
    struct cmfvpb *newvpb = NULL;
    AVInputFormat *in_fmt = NULL;
    av_log(s, AV_LOG_INFO, "cmf_parser_next_slice:%d\n", index);
    ret = cmfvpb_dup_pb(s->pb, &newvpb, index);
    if (ret < 0) {
        av_log(s, AV_LOG_INFO, "cmfvpb_dup_pb failed %s---%d [%d]\n",__FUNCTION__,__LINE__,ret);
        return ret;
    }
    if (bs->sctx) {
        avformat_free_context(bs->sctx);
    }
    bs->sctx = NULL;
    if (!(bs->sctx = avformat_alloc_context())) {
        av_log(s, AV_LOG_INFO, "cmf_parser_next_slice avformat_alloc_context failed!\n");
        return AVERROR(ENOMEM);
    }
    ret = av_probe_input_buffer(newvpb->pb, &in_fmt, "", NULL, 0, 0);
    if (ret < 0) {
        av_log(s, AV_LOG_INFO, "av_probe_input_buffer failed %s---%d [%d]\n",__FUNCTION__,__LINE__,ret);
		if (bs->sctx) {
        	avformat_free_context(bs->sctx);
			bs->sctx=NULL;
    	}
		return ret;
    }
    bs->sctx->pb = newvpb->pb;
    ret = avformat_open_input(&bs->sctx, "", NULL, NULL);
    if (ret < 0) {
        av_log(s, AV_LOG_INFO, "cmf_parser_next_slice:avformat_open_input failed \n");
		if (bs->sctx) {
        	avformat_free_context(bs->sctx);
			bs->sctx=NULL;
    	}
        goto fail;
    }
    if (!memcmp(bs->sctx->iformat->name,"flv",3)) {
        ret=av_find_stream_info(bs->sctx);
        av_log(s, AV_LOG_INFO, "xxx-------av_find_stream_info=%d",ret);
    }
    if (first) {
        /* Create new AVStreams for each stream in this chip
             Add into the Best format ;
        */
        if (!memcmp(bs->sctx->iformat->name,"mov",3))
            bs->next_mp4_flag = 1;
        for (j = 0; j < (int)bs->sctx->nb_streams ; j++)   {
            AVStream *st = av_new_stream(s, 0);
            if (!st) {
                ret = AVERROR(ENOMEM);
                goto fail;
            }
            avcodec_copy_context(st->codec, bs->sctx->streams[j]->codec);
        }
        for (i = 0; i < (int)s->nb_streams ; i++)   {
            AVStream *st = s->streams[i];
            AVStream *sst = bs->sctx->streams[i];
            st->id=sst->id;
            if (st->codec->codec_type == CODEC_TYPE_AUDIO) {
                st->time_base.den =  sst->time_base.den;
                st->time_base.num = sst->time_base.num;
                bs->first_slice_audio_index = i;
            }
            if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
                st->time_base.den = sst->time_base.den;
                st->time_base.num = sst->time_base.num;
                bs->first_mp4_video_base_time_num = sst->time_base.num;
                bs->first_mp4_video_base_time_den = sst->time_base.den;
                bs->first_slice_video_index = i;
            }
        }
        s->duration = newvpb->total_duration*1000;
        av_log(s, AV_LOG_INFO, "get duration  [%lld]us [%lld]ms [%lld]s\n", s->duration,newvpb->total_duration,(newvpb->total_duration/1000));
    } else {
        bs->stream_index_changed = 0;
        if (!memcmp(bs->sctx->iformat->name,"mpegts",6)) {
            if(bs->sctx->streams[bs->first_slice_audio_index]->codec->codec_type != s->streams[bs->first_slice_audio_index]->codec->codec_type) {
                bs->stream_index_changed = 1;
                AVStream *temp = NULL;
                AVStream *audio = bs->sctx->streams[bs->first_slice_audio_index];
                temp = bs->sctx->streams[bs->first_slice_video_index];
                bs->sctx->streams[bs->first_slice_video_index] = audio;
                bs->sctx->streams[bs->first_slice_audio_index] = temp;
            }
        }
        if (!memcmp(bs->sctx->iformat->name,"mov",3)) {
            #if 1
            /*
            int audio_index = -1;
            int video_index = -1;
            AVStream *cmf_audio = NULL;
            AVStream *cmf_video = NULL;
            for (i = 0; i < (int)bs->sctx->nb_streams ; i++) {
                if(bs->sctx->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO)
                    cmf_audio = bs->sctx->streams[i];
                if(bs->sctx->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO)
                    cmf_video = bs->sctx->streams[i];
            }
            if(cmf_audio && cmf_video) {*/
                for (i = 0; i < (int)s->nb_streams ; i++) {
                    AVStream *st = s->streams[i];
                    AVStream *sst = bs->sctx->streams[i];
                    if (st->codec->codec_type == CODEC_TYPE_AUDIO) {
                        st->time_base.den =  sst->time_base.den;
                        st->time_base.num = sst->time_base.num;
                        //audio_index = i;
                    }
                    if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
                        st->time_base.den = sst->time_base.den;
                        st->time_base.num = sst->time_base.num;
                        //video_index = i;
                    }
                    if(st->codec->extradata)
                        av_freep(&(st->codec->extradata));
                    if(sst->codec->extradata && sst->codec->extradata_size > 0) {
                        st->codec->extradata = av_malloc(sst->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
                        if(st->codec->extradata) {
                             memcpy(st->codec->extradata, sst->codec->extradata, sst->codec->extradata_size);
                             memset(((uint8_t *) st->codec->extradata) + sst->codec->extradata_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
                        }
                    }
                }
                /*
                if(audio_index >= 0 && bs->sctx->streams[bs->first_mp4_audio_index]->codec->codec_type != s->streams[audio_index]->codec->codec_type) {
                    bs->stream_index_changed = 1;
                    AVStream *temp = NULL;
                    AVStream *audio = s->streams[audio_index];
                    temp = s->streams[video_index];
                    s->streams[video_index] = audio;
                    s->streams[audio_index] = temp;
                    av_log(NULL,AV_LOG_INFO,"--------->tao_cmf_index_change first_mp4_audio_index=%d, audio_index=%d, video_index=%d",bs->first_mp4_audio_index,audio_index,video_index);
                }*/
                bs->next_mp4_flag = 1;
                #endif
            }
        }
    if (memcmp(bs->sctx->iformat->name,"flv",3)) {
        ret = avio_seek(bs->sctx->pb, bs->sctx->media_dataoffset, SEEK_SET);
        if (ret < 0) {
            av_log(s, AV_LOG_INFO, "avio_seek failed %s---%d \n",__FUNCTION__,__LINE__);
            return ret;
        }
    }
    bs->cmfvpb = newvpb;
fail:
    if (oldvpb) {
        cmfvpb_pb_free(oldvpb);
    }
    return ret;
}
コード例 #17
0
ファイル: demuxer.c プロジェクト: pder/mplayer2
static void clear_parser(sh_common_t *sh)
{
    av_parser_close(sh->parser);
    sh->parser = NULL;
    av_freep(&sh->avctx);
}
コード例 #18
0
ファイル: ffmpeg_consumer.cpp プロジェクト: TELE-TWIN/Server
	std::shared_ptr<AVStream> add_video_stream(std::vector<option>& options)
	{ 
		if(output_format_.vcodec == CODEC_ID_NONE)
			return nullptr;

		auto st = av_new_stream(oc_.get(), 0);
		if (!st) 		
			BOOST_THROW_EXCEPTION(caspar_exception() << msg_info("Could not allocate video-stream.") << boost::errinfo_api_function("av_new_stream"));		

		auto encoder = avcodec_find_encoder(output_format_.vcodec);
		if (!encoder)
			BOOST_THROW_EXCEPTION(caspar_exception() << msg_info("Codec not found."));

		auto c = st->codec;

		avcodec_get_context_defaults3(c, encoder);
				
		c->codec_id			= output_format_.vcodec;
		c->codec_type		= AVMEDIA_TYPE_VIDEO;
		c->width			= output_format_.width;
		c->height			= output_format_.height;
		c->time_base.den	= format_desc_.time_scale;
		c->time_base.num	= format_desc_.duration;
		c->gop_size			= 25;
		c->flags		   |= format_desc_.field_mode == core::field_mode::progressive ? 0 : (CODEC_FLAG_INTERLACED_ME | CODEC_FLAG_INTERLACED_DCT);
		if(c->pix_fmt == PIX_FMT_NONE)
			c->pix_fmt = PIX_FMT_YUV420P;

		if(c->codec_id == CODEC_ID_PRORES)
		{			
			c->bit_rate	= c->width < 1280 ? 63*1000000 : 220*1000000;
			c->pix_fmt	= PIX_FMT_YUV422P10;
		}
		else if(c->codec_id == CODEC_ID_DNXHD)
		{
			if(c->width < 1280 || c->height < 720)
				BOOST_THROW_EXCEPTION(caspar_exception() << msg_info("Unsupported video dimensions."));

			c->bit_rate	= 220*1000000;
			c->pix_fmt	= PIX_FMT_YUV422P;
		}
		else if(c->codec_id == CODEC_ID_DVVIDEO)
		{
			c->width = c->height == 1280 ? 960  : c->width;
			
			if(format_desc_.format == core::video_format::ntsc)
				c->pix_fmt = PIX_FMT_YUV411P;
			else if(format_desc_.format == core::video_format::pal)
				c->pix_fmt = PIX_FMT_YUV420P;
			else // dv50
				c->pix_fmt = PIX_FMT_YUV422P;
			
			if(format_desc_.duration == 1001)			
				c->width = c->height == 1080 ? 1280 : c->width;			
			else
				c->width = c->height == 1080 ? 1440 : c->width;			
		}
		else if(c->codec_id == CODEC_ID_H264)
		{			   
			c->pix_fmt = PIX_FMT_YUV420P;    
			if(options.empty())
			{
				av_opt_set(c->priv_data, "preset", "ultrafast", 0);
				av_opt_set(c->priv_data, "tune",   "fastdecode",   0);
				av_opt_set(c->priv_data, "crf",    "5",     0);
			}
		}
		else if(c->codec_id == CODEC_ID_QTRLE)
		{
			c->pix_fmt = PIX_FMT_ARGB;
		}
				
		c->max_b_frames = 0; // b-frames not supported.
				
		boost::range::remove_erase_if(options, [&](const option& o)
		{
			return ffmpeg::av_opt_set(c, o.name.c_str(), o.value.c_str(), AV_OPT_SEARCH_CHILDREN) > -1 ||
				   ffmpeg::av_opt_set(c->priv_data, o.name.c_str(), o.value.c_str(), AV_OPT_SEARCH_CHILDREN) > -1;
		});
				
		if(output_format_.format->flags & AVFMT_GLOBALHEADER)
			c->flags |= CODEC_FLAG_GLOBAL_HEADER;
		
		c->thread_count = boost::thread::hardware_concurrency();
		if(avcodec_open(c, encoder) < 0)
		{
			c->thread_count = 1;
			THROW_ON_ERROR2(avcodec_open(c, encoder), "[ffmpeg_consumer]");
		}

		return std::shared_ptr<AVStream>(st, [](AVStream* st)
		{
			LOG_ON_ERROR2(avcodec_close(st->codec), "[ffmpeg_consumer]");
			av_freep(&st->codec);
			av_freep(&st);
		});
	}
コード例 #19
0
static int vdpau_alloc(AVCodecContext *s)
{
    int loglevel = AV_LOG_ERROR;
    VDPAUContext *ctx;
    const char *display, *vendor;
    VdpStatus err;
    int ret;

    VdpDevice                device;
    VdpGetProcAddress       *get_proc_address;
    VdpGetInformationString *get_information_string;

    VDPAUHWDevicePriv    *device_priv = NULL;
    AVHWDeviceContext    *device_ctx;
    AVVDPAUDeviceContext *device_hwctx;
    AVHWFramesContext    *frames_ctx;

    ctx = av_mallocz(sizeof(*ctx));
    if (!ctx)
        return AVERROR(ENOMEM);

    device_priv = av_mallocz(sizeof(*device_priv));
    if (!device_priv) {
        av_freep(&ctx);
        goto fail;
    }

    device_priv->dpy = XOpenDisplay(":0");
    if (!device_priv->dpy) {
        av_log(NULL, loglevel, "Cannot open the X11 display %s.\n",
               XDisplayName(":0"));
        goto fail;
    }
    display = XDisplayString(device_priv->dpy);

    err = vdp_device_create_x11(device_priv->dpy, XDefaultScreen(device_priv->dpy),
                                &device, &get_proc_address);
    if (err != VDP_STATUS_OK) {
        av_log(NULL, loglevel, "VDPAU device creation on X11 display %s failed.\n",
               display);
        goto fail;
    }

#define GET_CALLBACK(id, result)                                                \
do {                                                                            \
    void *tmp;                                                                  \
    err = get_proc_address(device, id, &tmp);                                   \
    if (err != VDP_STATUS_OK) {                                                 \
        av_log(NULL, loglevel, "Error getting the " #id " callback.\n");        \
        goto fail;                                                              \
    }                                                                           \
    result = tmp;                                                               \
} while (0)

    GET_CALLBACK(VDP_FUNC_ID_GET_INFORMATION_STRING, get_information_string);
    GET_CALLBACK(VDP_FUNC_ID_DEVICE_DESTROY,         device_priv->device_destroy);

    device_ref = av_hwdevice_ctx_alloc(AV_HWDEVICE_TYPE_VDPAU);
    if (!device_ref)
        goto fail;
    device_ctx                     = (AVHWDeviceContext*)device_ref->data;
    device_hwctx                   = device_ctx->hwctx;
    device_ctx->user_opaque        = device_priv;
    device_ctx->free               = device_free;
    device_hwctx->device           = device;
    device_hwctx->get_proc_address = get_proc_address;

    device_priv = NULL;

    ret = av_hwdevice_ctx_init(device_ref);
    if (ret < 0)
        goto fail;

    ctx->hw_frames_ctx = av_hwframe_ctx_alloc(device_ref);
    if (!ctx->hw_frames_ctx)
        goto fail;
    //av_buffer_unref(&device_ref);

    frames_ctx            = (AVHWFramesContext*)ctx->hw_frames_ctx->data;
    frames_ctx->format    = AV_PIX_FMT_VDPAU;
    frames_ctx->sw_format = s->sw_pix_fmt;
    frames_ctx->width     = 1920;
    frames_ctx->height    = 1080;

    ret = av_hwframe_ctx_init(ctx->hw_frames_ctx);
    if (ret < 0)
        goto fail;

    if (av_vdpau_bind_context(s, device, get_proc_address, 0))
        goto fail;

    s->opaque = ctx;

    return 0;

fail:
    if (device_priv) {
        if (device_priv->device_destroy)
            device_priv->device_destroy(device);
        if (device_priv->dpy)
            XCloseDisplay(device_priv->dpy);
    }
    av_freep(&device_priv);
    av_buffer_unref(&device_ref);
    vdpau_uninit(s);
    return AVERROR(EINVAL);
}
コード例 #20
0
ファイル: dict.c プロジェクト: Crawping/chromium_extract
int main(void)
{
    AVDictionary *dict = NULL;
    AVDictionaryEntry *e;
    char *buffer = NULL;

    printf("Testing av_dict_get_string() and av_dict_parse_string()\n");
    av_dict_get_string(dict, &buffer, '=', ',');
    printf("%s\n", buffer);
    av_freep(&buffer);
    av_dict_set(&dict, "aaa", "aaa", 0);
    av_dict_set(&dict, "b,b", "bbb", 0);
    av_dict_set(&dict, "c=c", "ccc", 0);
    av_dict_set(&dict, "ddd", "d,d", 0);
    av_dict_set(&dict, "eee", "e=e", 0);
    av_dict_set(&dict, "f,f", "f=f", 0);
    av_dict_set(&dict, "g=g", "g,g", 0);
    test_separators(dict, ',', '=');
    av_dict_free(&dict);
    av_dict_set(&dict, "aaa", "aaa", 0);
    av_dict_set(&dict, "bbb", "bbb", 0);
    av_dict_set(&dict, "ccc", "ccc", 0);
    av_dict_set(&dict, "\\,=\'\"", "\\,=\'\"", 0);
    test_separators(dict, '"',  '=');
    test_separators(dict, '\'', '=');
    test_separators(dict, ',', '"');
    test_separators(dict, ',', '\'');
    test_separators(dict, '\'', '"');
    test_separators(dict, '"', '\'');
    av_dict_free(&dict);

    printf("\nTesting av_dict_set()\n");
    av_dict_set(&dict, "a", "a", 0);
    av_dict_set(&dict, "b", av_strdup("b"), AV_DICT_DONT_STRDUP_VAL);
    av_dict_set(&dict, av_strdup("c"), "c", AV_DICT_DONT_STRDUP_KEY);
    av_dict_set(&dict, av_strdup("d"), av_strdup("d"), AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL);
    av_dict_set(&dict, "e", "e", AV_DICT_DONT_OVERWRITE);
    av_dict_set(&dict, "e", "f", AV_DICT_DONT_OVERWRITE);
    av_dict_set(&dict, "f", "f", 0);
    av_dict_set(&dict, "f", NULL, 0);
    av_dict_set(&dict, "ff", "f", 0);
    av_dict_set(&dict, "ff", "f", AV_DICT_APPEND);
    e = NULL;
    while ((e = av_dict_get(dict, "", e, AV_DICT_IGNORE_SUFFIX)))
        printf("%s %s\n", e->key, e->value);
    av_dict_free(&dict);

    av_dict_set(&dict, NULL, "a", 0);
    av_dict_set(&dict, NULL, "b", 0);
    av_dict_get(dict, NULL, NULL, 0);
    e = NULL;
    while ((e = av_dict_get(dict, "", e, AV_DICT_IGNORE_SUFFIX)))
        printf("'%s' '%s'\n", e->key, e->value);
    av_dict_free(&dict);


    //valgrind sensible test
    printf("\nTesting av_dict_set_int()\n");
    av_dict_set_int(&dict, "1", 1, AV_DICT_DONT_STRDUP_VAL);
    av_dict_set_int(&dict, av_strdup("2"), 2, AV_DICT_DONT_STRDUP_KEY);
    av_dict_set_int(&dict, av_strdup("3"), 3, AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL);
    av_dict_set_int(&dict, "4", 4, 0);
    av_dict_set_int(&dict, "5", 5, AV_DICT_DONT_OVERWRITE);
    av_dict_set_int(&dict, "5", 6, AV_DICT_DONT_OVERWRITE);
    av_dict_set_int(&dict, "12", 1, 0);
    av_dict_set_int(&dict, "12", 2, AV_DICT_APPEND);
    e = NULL;
    while ((e = av_dict_get(dict, "", e, AV_DICT_IGNORE_SUFFIX)))
        printf("%s %s\n", e->key, e->value);
    av_dict_free(&dict);

    //valgrind sensible test
    printf("\nTesting av_dict_set() with existing AVDictionaryEntry.key as key\n");
    av_dict_set(&dict, "key", "old", 0);
    e = av_dict_get(dict, "key", NULL, 0);
    av_dict_set(&dict, e->key, "new val OK", 0);
    e = av_dict_get(dict, "key", NULL, 0);
    printf("%s\n", e->value);
    av_dict_set(&dict, e->key, e->value, 0);
    e = av_dict_get(dict, "key", NULL, 0);
    printf("%s\n", e->value);
    av_dict_free(&dict);

    return 0;
}
コード例 #21
0
ファイル: asfenc.c プロジェクト: Brhett/FFmpeg
/* write the header (used two times if non streamed) */
static int asf_write_header1(AVFormatContext *s, int64_t file_size, int64_t data_chunk_size)
{
    ASFContext *asf = s->priv_data;
    AVIOContext *pb = s->pb;
    AVDictionaryEntry *tags[5];
    int header_size, n, extra_size, extra_size2, wav_extra_size, file_time;
    int has_title;
    int metadata_count;
    AVCodecContext *enc;
    int64_t header_offset, cur_pos, hpos;
    int bit_rate;
    int64_t duration;

    ff_metadata_conv(&s->metadata, ff_asf_metadata_conv, NULL);

    tags[0] = av_dict_get(s->metadata, "title"    , NULL, 0);
    tags[1] = av_dict_get(s->metadata, "author"   , NULL, 0);
    tags[2] = av_dict_get(s->metadata, "copyright", NULL, 0);
    tags[3] = av_dict_get(s->metadata, "comment"  , NULL, 0);
    tags[4] = av_dict_get(s->metadata, "rating"   , NULL, 0);

    duration = asf->duration + PREROLL_TIME * 10000;
    has_title = tags[0] || tags[1] || tags[2] || tags[3] || tags[4];
    metadata_count = av_dict_count(s->metadata);

    bit_rate = 0;
    for(n=0;n<s->nb_streams;n++) {
        enc = s->streams[n]->codec;

        avpriv_set_pts_info(s->streams[n], 32, 1, 1000); /* 32 bit pts in ms */

        bit_rate += enc->bit_rate;
    }

    if (asf->is_streamed) {
        put_chunk(s, 0x4824, 0, 0xc00); /* start of stream (length will be patched later) */
    }

    ff_put_guid(pb, &ff_asf_header);
    avio_wl64(pb, -1); /* header length, will be patched after */
    avio_wl32(pb, 3 + has_title + !!metadata_count + s->nb_streams); /* number of chunks in header */
    avio_w8(pb, 1); /* ??? */
    avio_w8(pb, 2); /* ??? */

    /* file header */
    header_offset = avio_tell(pb);
    hpos = put_header(pb, &ff_asf_file_header);
    ff_put_guid(pb, &ff_asf_my_guid);
    avio_wl64(pb, file_size);
    file_time = 0;
    avio_wl64(pb, unix_to_file_time(file_time));
    avio_wl64(pb, asf->nb_packets); /* number of packets */
    avio_wl64(pb, duration); /* end time stamp (in 100ns units) */
    avio_wl64(pb, asf->duration); /* duration (in 100ns units) */
    avio_wl64(pb, PREROLL_TIME); /* start time stamp */
    avio_wl32(pb, (asf->is_streamed || !pb->seekable ) ? 3 : 2); /* ??? */
    avio_wl32(pb, s->packet_size); /* packet size */
    avio_wl32(pb, s->packet_size); /* packet size */
    avio_wl32(pb, bit_rate); /* Nominal data rate in bps */
    end_header(pb, hpos);

    /* unknown headers */
    hpos = put_header(pb, &ff_asf_head1_guid);
    ff_put_guid(pb, &ff_asf_head2_guid);
    avio_wl32(pb, 6);
    avio_wl16(pb, 0);
    end_header(pb, hpos);

    /* title and other infos */
    if (has_title) {
        int len;
        uint8_t *buf;
        AVIOContext *dyn_buf;

        if (avio_open_dyn_buf(&dyn_buf) < 0)
            return AVERROR(ENOMEM);

        hpos = put_header(pb, &ff_asf_comment_header);

        for (n = 0; n < FF_ARRAY_ELEMS(tags); n++) {
            len = tags[n] ? avio_put_str16le(dyn_buf, tags[n]->value) : 0;
            avio_wl16(pb, len);
        }
        len = avio_close_dyn_buf(dyn_buf, &buf);
        avio_write(pb, buf, len);
        av_freep(&buf);
        end_header(pb, hpos);
    }
    if (metadata_count) {
        AVDictionaryEntry *tag = NULL;
        hpos = put_header(pb, &ff_asf_extended_content_header);
        avio_wl16(pb, metadata_count);
        while ((tag = av_dict_get(s->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) {
            put_str16(pb, tag->key);
            avio_wl16(pb, 0);
            put_str16(pb, tag->value);
        }
        end_header(pb, hpos);
    }

    /* stream headers */
    for(n=0;n<s->nb_streams;n++) {
        int64_t es_pos;
        //        ASFStream *stream = &asf->streams[n];

        enc = s->streams[n]->codec;
        asf->streams[n].num = n + 1;
        asf->streams[n].seq = 1;


        switch(enc->codec_type) {
        case AVMEDIA_TYPE_AUDIO:
            wav_extra_size = 0;
            extra_size = 18 + wav_extra_size;
            extra_size2 = 8;
            break;
        default:
        case AVMEDIA_TYPE_VIDEO:
            wav_extra_size = enc->extradata_size;
            extra_size = 0x33 + wav_extra_size;
            extra_size2 = 0;
            break;
        }

        hpos = put_header(pb, &ff_asf_stream_header);
        if (enc->codec_type == AVMEDIA_TYPE_AUDIO) {
            ff_put_guid(pb, &ff_asf_audio_stream);
            ff_put_guid(pb, &ff_asf_audio_conceal_spread);
        } else {
            ff_put_guid(pb, &ff_asf_video_stream);
            ff_put_guid(pb, &ff_asf_video_conceal_none);
        }
        avio_wl64(pb, 0); /* ??? */
        es_pos = avio_tell(pb);
        avio_wl32(pb, extra_size); /* wav header len */
        avio_wl32(pb, extra_size2); /* additional data len */
        avio_wl16(pb, n + 1); /* stream number */
        avio_wl32(pb, 0); /* ??? */

        if (enc->codec_type == AVMEDIA_TYPE_AUDIO) {
            /* WAVEFORMATEX header */
            int wavsize = ff_put_wav_header(pb, enc);

            if (wavsize < 0)
                return -1;
            if (wavsize != extra_size) {
                cur_pos = avio_tell(pb);
                avio_seek(pb, es_pos, SEEK_SET);
                avio_wl32(pb, wavsize); /* wav header len */
                avio_seek(pb, cur_pos, SEEK_SET);
            }
            /* ERROR Correction */
            avio_w8(pb, 0x01);
            if(enc->codec_id == AV_CODEC_ID_ADPCM_G726 || !enc->block_align){
                avio_wl16(pb, 0x0190);
                avio_wl16(pb, 0x0190);
            }else{
                avio_wl16(pb, enc->block_align);
                avio_wl16(pb, enc->block_align);
            }
            avio_wl16(pb, 0x01);
            avio_w8(pb, 0x00);
        } else {
            avio_wl32(pb, enc->width);
            avio_wl32(pb, enc->height);
            avio_w8(pb, 2); /* ??? */
            avio_wl16(pb, 40 + enc->extradata_size); /* size */

            /* BITMAPINFOHEADER header */
            ff_put_bmp_header(pb, enc, ff_codec_bmp_tags, 1);
        }
        end_header(pb, hpos);
    }

    /* media comments */

    hpos = put_header(pb, &ff_asf_codec_comment_header);
    ff_put_guid(pb, &ff_asf_codec_comment1_header);
    avio_wl32(pb, s->nb_streams);
    for(n=0;n<s->nb_streams;n++) {
        AVCodec *p;
        const char *desc;
        int len;
        uint8_t *buf;
        AVIOContext *dyn_buf;

        enc = s->streams[n]->codec;
        p = avcodec_find_encoder(enc->codec_id);

        if(enc->codec_type == AVMEDIA_TYPE_AUDIO)
            avio_wl16(pb, 2);
        else if(enc->codec_type == AVMEDIA_TYPE_VIDEO)
            avio_wl16(pb, 1);
        else
            avio_wl16(pb, -1);

        if(enc->codec_id == AV_CODEC_ID_WMAV2)
            desc = "Windows Media Audio V8";
        else
            desc = p ? p->name : enc->codec_name;

        if ( avio_open_dyn_buf(&dyn_buf) < 0)
            return AVERROR(ENOMEM);

        avio_put_str16le(dyn_buf, desc);
        len = avio_close_dyn_buf(dyn_buf, &buf);
        avio_wl16(pb, len / 2); // "number of characters" = length in bytes / 2

        avio_write(pb, buf, len);
        av_freep(&buf);

        avio_wl16(pb, 0); /* no parameters */


        /* id */
        if (enc->codec_type == AVMEDIA_TYPE_AUDIO) {
            avio_wl16(pb, 2);
            avio_wl16(pb, enc->codec_tag);
        } else {
            avio_wl16(pb, 4);
            avio_wl32(pb, enc->codec_tag);
        }
        if(!enc->codec_tag)
            return -1;
    }
    end_header(pb, hpos);

    /* patch the header size fields */

    cur_pos = avio_tell(pb);
    header_size = cur_pos - header_offset;
    if (asf->is_streamed) {
        header_size += 8 + 30 + 50;

        avio_seek(pb, header_offset - 10 - 30, SEEK_SET);
        avio_wl16(pb, header_size);
        avio_seek(pb, header_offset - 2 - 30, SEEK_SET);
        avio_wl16(pb, header_size);

        header_size -= 8 + 30 + 50;
    }
    header_size += 24 + 6;
    avio_seek(pb, header_offset - 14, SEEK_SET);
    avio_wl64(pb, header_size);
    avio_seek(pb, cur_pos, SEEK_SET);

    /* movie chunk, followed by packets of packet_size */
    asf->data_offset = cur_pos;
    ff_put_guid(pb, &ff_asf_data_header);
    avio_wl64(pb, data_chunk_size);
    ff_put_guid(pb, &ff_asf_my_guid);
    avio_wl64(pb, asf->nb_packets); /* nb packets */
    avio_w8(pb, 1); /* ??? */
    avio_w8(pb, 1); /* ??? */
    return 0;
}
コード例 #22
0
ファイル: dict.c プロジェクト: Crawping/chromium_extract
int av_dict_set(AVDictionary **pm, const char *key, const char *value,
                int flags)
{
    AVDictionary *m = *pm;
    AVDictionaryEntry *tag = av_dict_get(m, key, NULL, flags);
    char *oldval = NULL, *copy_key = NULL, *copy_value = NULL;

    if (flags & AV_DICT_DONT_STRDUP_KEY)
        copy_key = (void *)key;
    else
        copy_key = av_strdup(key);
    if (flags & AV_DICT_DONT_STRDUP_VAL)
        copy_value = (void *)value;
    else if (copy_key)
        copy_value = av_strdup(value);
    if (!m)
        m = *pm = av_mallocz(sizeof(*m));
    if (!m || (key && !copy_key) || (value && !copy_value))
        goto err_out;

    if (tag) {
        if (flags & AV_DICT_DONT_OVERWRITE) {
            av_free(copy_key);
            av_free(copy_value);
            return 0;
        }
        if (flags & AV_DICT_APPEND)
            oldval = tag->value;
        else
            av_free(tag->value);
        av_free(tag->key);
        *tag = m->elems[--m->count];
    } else {
        AVDictionaryEntry *tmp = av_realloc(m->elems,
                                            (m->count + 1) * sizeof(*m->elems));
        if (!tmp)
            goto err_out;
        m->elems = tmp;
    }
    if (copy_value) {
        m->elems[m->count].key = copy_key;
        m->elems[m->count].value = copy_value;
        if (oldval && flags & AV_DICT_APPEND) {
            size_t len = strlen(oldval) + strlen(copy_value) + 1;
            char *newval = av_mallocz(len);
            if (!newval)
                goto err_out;
            av_strlcat(newval, oldval, len);
            av_freep(&oldval);
            av_strlcat(newval, copy_value, len);
            m->elems[m->count].value = newval;
            av_freep(&copy_value);
        }
        m->count++;
    } else {
        av_freep(&copy_key);
    }
    if (!m->count) {
        av_freep(&m->elems);
        av_freep(pm);
    }

    return 0;

err_out:
    if (m && !m->count) {
        av_freep(&m->elems);
        av_freep(pm);
    }
    av_free(copy_key);
    av_free(copy_value);
    return AVERROR(ENOMEM);
}
コード例 #23
0
ファイル: cafdec.c プロジェクト: Acidburn0zzz/libav
/** Read magic cookie chunk */
static int read_kuki_chunk(AVFormatContext *s, int64_t size)
{
    AVIOContext *pb = s->pb;
    AVStream *st      = s->streams[0];

    if (size < 0 || size > INT_MAX - FF_INPUT_BUFFER_PADDING_SIZE)
        return -1;

    if (st->codec->codec_id == AV_CODEC_ID_AAC) {
        /* The magic cookie format for AAC is an mp4 esds atom.
           The lavc AAC decoder requires the data from the codec specific
           description as extradata input. */
        int strt, skip;
        MOVAtom atom;

        strt = avio_tell(pb);
        ff_mov_read_esds(s, pb, atom);
        skip = size - (avio_tell(pb) - strt);
        if (skip < 0 || !st->codec->extradata ||
            st->codec->codec_id != AV_CODEC_ID_AAC) {
            av_log(s, AV_LOG_ERROR, "invalid AAC magic cookie\n");
            return AVERROR_INVALIDDATA;
        }
        avio_skip(pb, skip);
    } else if (st->codec->codec_id == AV_CODEC_ID_ALAC) {
#define ALAC_PREAMBLE 12
#define ALAC_HEADER   36
#define ALAC_NEW_KUKI 24
        uint8_t preamble[12];
        if (size < ALAC_NEW_KUKI) {
            av_log(s, AV_LOG_ERROR, "invalid ALAC magic cookie\n");
            avio_skip(pb, size);
            return AVERROR_INVALIDDATA;
        }
        avio_read(pb, preamble, ALAC_PREAMBLE);

        st->codec->extradata = av_mallocz(ALAC_HEADER + FF_INPUT_BUFFER_PADDING_SIZE);
        if (!st->codec->extradata)
            return AVERROR(ENOMEM);

        /* For the old style cookie, we skip 12 bytes, then read 36 bytes.
         * The new style cookie only contains the last 24 bytes of what was
         * 36 bytes in the old style cookie, so we fabricate the first 12 bytes
         * in that case to maintain compatibility. */
        if (!memcmp(&preamble[4], "frmaalac", 8)) {
            if (size < ALAC_PREAMBLE + ALAC_HEADER) {
                av_log(s, AV_LOG_ERROR, "invalid ALAC magic cookie\n");
                av_freep(&st->codec->extradata);
                return AVERROR_INVALIDDATA;
            }
            avio_read(pb, st->codec->extradata, ALAC_HEADER);
            avio_skip(pb, size - ALAC_PREAMBLE - ALAC_HEADER);
        } else {
            AV_WB32(st->codec->extradata, 36);
            memcpy(&st->codec->extradata[4], "alac", 4);
            AV_WB32(&st->codec->extradata[8], 0);
            memcpy(&st->codec->extradata[12], preamble, 12);
            avio_read(pb, &st->codec->extradata[24], ALAC_NEW_KUKI - 12);
            avio_skip(pb, size - ALAC_NEW_KUKI);
        }
        st->codec->extradata_size = ALAC_HEADER;
    } else {
        st->codec->extradata = av_mallocz(size + FF_INPUT_BUFFER_PADDING_SIZE);
        if (!st->codec->extradata)
            return AVERROR(ENOMEM);
        avio_read(pb, st->codec->extradata, size);
        st->codec->extradata_size = size;
    }

    return 0;
}
コード例 #24
0
ファイル: v308enc.c プロジェクト: 0x0B501E7E/ffmpeg
static av_cold int v308_encode_close(AVCodecContext *avctx)
{
    av_freep(&avctx->coded_frame);

    return 0;
}
コード例 #25
0
void swri_audio_convert_free(AudioConvert **ctx)
{
    av_freep(ctx);
}
コード例 #26
0
static int url_alloc_for_protocol (URLContext **puc, struct URLProtocol *up,
                                   const char *filename, int flags,
                                   const AVIOInterruptCB *int_cb)
{
    URLContext *uc;
    int err;

#if CONFIG_NETWORK
    if (up->flags & URL_PROTOCOL_FLAG_NETWORK && !ff_network_init())
        return AVERROR(EIO);
#endif
    uc = av_mallocz(sizeof(URLContext) + strlen(filename) + 1);
    if (!uc) {
        err = AVERROR(ENOMEM);
        goto fail;
    }
    uc->av_class = &ffurl_context_class;
    uc->filename = (char *) &uc[1];
    strcpy(uc->filename, filename);
    uc->prot = up;
    uc->flags = flags;
    uc->is_streamed = 0; /* default = not streamed */
    uc->max_packet_size = 0; /* default: stream file */
    if (up->priv_data_size) {
        uc->priv_data = av_mallocz(up->priv_data_size);
        if (up->priv_data_class) {
            int proto_len= strlen(up->name);
            char *start = strchr(uc->filename, ',');
            *(const AVClass**)uc->priv_data = up->priv_data_class;
            av_opt_set_defaults(uc->priv_data);
            if(!strncmp(up->name, uc->filename, proto_len) && uc->filename + proto_len == start){
                int ret= 0;
                char *p= start;
                char sep= *++p;
                char *key, *val;
                p++;
                while(ret >= 0 && (key= strchr(p, sep)) && p<key && (val = strchr(key+1, sep))){
                    *val= *key= 0;
                    ret= av_opt_set(uc->priv_data, p, key+1, 0);
                    if (ret == AVERROR_OPTION_NOT_FOUND)
                        av_log(uc, AV_LOG_ERROR, "Key '%s' not found.\n", p);
                    *val= *key= sep;
                    p= val+1;
                }
                if(ret<0 || p!=key){
                    av_log(uc, AV_LOG_ERROR, "Error parsing options string %s\n", start);
                    av_freep(&uc->priv_data);
                    av_freep(&uc);
                    goto fail;
                }
                memmove(start, key+1, strlen(key));
            }
        }
    }
    if (int_cb)
        uc->interrupt_callback = *int_cb;

    *puc = uc;
    return 0;
 fail:
    *puc = NULL;
#if CONFIG_NETWORK
    if (up->flags & URL_PROTOCOL_FLAG_NETWORK)
        ff_network_close();
#endif
    return err;
}
コード例 #27
0
int av_bitstream_filter_filter(AVBitStreamFilterContext *bsfc,
                               AVCodecContext *avctx, const char *args,
                               uint8_t **poutbuf, int *poutbuf_size,
                               const uint8_t *buf, int buf_size, int keyframe)
{
    BSFCompatContext *priv = bsfc->priv_data;
    AVPacket pkt = { 0 };
    int ret;

    if (!priv->ctx) {
        ret = av_bsf_alloc(bsfc->filter, &priv->ctx);
        if (ret < 0)
            return ret;

        ret = avcodec_parameters_from_context(priv->ctx->par_in, avctx);
        if (ret < 0)
            return ret;

        priv->ctx->time_base_in = avctx->time_base;

        if (bsfc->args && bsfc->filter->priv_class) {
            const AVOption *opt = av_opt_next(priv->ctx->priv_data, NULL);
            const char * shorthand[2] = {NULL};

            if (opt)
                shorthand[0] = opt->name;

            ret = av_opt_set_from_string(priv->ctx->priv_data, bsfc->args, shorthand, "=", ":");
        }

        ret = av_bsf_init(priv->ctx);
        if (ret < 0)
            return ret;
    }

    pkt.data = buf;
    pkt.size = buf_size;

    ret = av_bsf_send_packet(priv->ctx, &pkt);
    if (ret < 0)
        return ret;

    *poutbuf      = NULL;
    *poutbuf_size = 0;

    ret = av_bsf_receive_packet(priv->ctx, &pkt);
    if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
        return 0;
    else if (ret < 0)
        return ret;

    *poutbuf = av_malloc(pkt.size + AV_INPUT_BUFFER_PADDING_SIZE);
    if (!*poutbuf) {
        av_packet_unref(&pkt);
        return AVERROR(ENOMEM);
    }

    *poutbuf_size = pkt.size;
    memcpy(*poutbuf, pkt.data, pkt.size);

    av_packet_unref(&pkt);

    /* drain all the remaining packets we cannot return */
    while (ret >= 0) {
        ret = av_bsf_receive_packet(priv->ctx, &pkt);
        av_packet_unref(&pkt);
    }

    if (!priv->extradata_updated) {
        /* update extradata in avctx from the output codec parameters */
        if (priv->ctx->par_out->extradata_size && (!args || !strstr(args, "private_spspps_buf"))) {
            av_freep(&avctx->extradata);
            avctx->extradata_size = 0;
            avctx->extradata = av_mallocz(priv->ctx->par_out->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
            if (!avctx->extradata)
                return AVERROR(ENOMEM);
            memcpy(avctx->extradata, priv->ctx->par_out->extradata, priv->ctx->par_out->extradata_size);
            avctx->extradata_size = priv->ctx->par_out->extradata_size;
        }

        priv->extradata_updated = 1;
    }

    return 1;
}
コード例 #28
0
ファイル: takdec.c プロジェクト: NullEmpty/rk_ffmpeg_android
static int tak_read_header(AVFormatContext *s)
{
    TAKDemuxContext *tc = s->priv_data;
    AVIOContext *pb = s->pb;
    GetBitContext gb;
    AVStream *st;
    uint8_t *buffer = NULL;
    int ret;

    st = avformat_new_stream(s, 0);
    if (!st)
        return AVERROR(ENOMEM);

    st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
    st->codec->codec_id   = AV_CODEC_ID_TAK;
    st->need_parsing      = AVSTREAM_PARSE_FULL_RAW;

    tc->mlast_frame = 0;
    if (avio_rl32(pb) != MKTAG('t', 'B', 'a', 'K')) {
        avio_seek(pb, -4, SEEK_CUR);
        return 0;
    }

    while (!url_feof(pb)) {
        enum TAKMetaDataType type;
        int size;

        type = avio_r8(pb) & 0x7f;
        size = avio_rl24(pb);

        switch (type) {
        case TAK_METADATA_STREAMINFO:
        case TAK_METADATA_LAST_FRAME:
        case TAK_METADATA_ENCODER:
            buffer = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
            if (!buffer)
                return AVERROR(ENOMEM);

            if (avio_read(pb, buffer, size) != size) {
                av_freep(&buffer);
                return AVERROR(EIO);
            }

            init_get_bits(&gb, buffer, size * 8);
            break;
        case TAK_METADATA_MD5: {
            uint8_t md5[16];
            int i;

            if (size != 19)
                return AVERROR_INVALIDDATA;
            avio_read(pb, md5, 16);
            avio_skip(pb, 3);
            av_log(s, AV_LOG_VERBOSE, "MD5=");
            for (i = 0; i < 16; i++)
                av_log(s, AV_LOG_VERBOSE, "%02x", md5[i]);
            av_log(s, AV_LOG_VERBOSE, "\n");
            break;
            }
        case TAK_METADATA_END: {
            int64_t curpos = avio_tell(pb);

            if (pb->seekable) {
                ff_ape_parse_tag(s);
                avio_seek(pb, curpos, SEEK_SET);
            }

            tc->data_end += curpos;
            return 0;
            break;
            }
        default:
            ret = avio_skip(pb, size);
            if (ret < 0)
                return ret;
        }

        if (type == TAK_METADATA_STREAMINFO) {
            TAKStreamInfo ti;

            avpriv_tak_parse_streaminfo(&gb, &ti);
            if (ti.samples > 0)
                st->duration = ti.samples;
            st->codec->bits_per_coded_sample = ti.bps;
            if (ti.ch_layout)
                st->codec->channel_layout = ti.ch_layout;
            st->codec->sample_rate = ti.sample_rate;
            st->codec->channels    = ti.channels;
            st->start_time         = 0;
            avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
            st->codec->extradata      = buffer;
            st->codec->extradata_size = size;
            buffer = NULL;
        } else if (type == TAK_METADATA_LAST_FRAME) {
            if (size != 11)
                return AVERROR_INVALIDDATA;
            tc->mlast_frame = 1;
            tc->data_end = get_bits_longlong(&gb, TAK_LAST_FRAME_POS_BITS) +
                           get_bits(&gb, TAK_LAST_FRAME_SIZE_BITS);
            av_freep(&buffer);
        } else if (type == TAK_METADATA_ENCODER) {
            av_log(s, AV_LOG_VERBOSE, "encoder version: %0X\n",
                   get_bits_long(&gb, TAK_ENCODER_VERSION_BITS));
            av_freep(&buffer);
        }
    }

    return AVERROR_EOF;
}
コード例 #29
0
static int run_test(AVCodec *enc, AVCodec *dec, AVCodecContext *enc_ctx,
                    AVCodecContext *dec_ctx)
{
    AVPacket enc_pkt;
    AVFrame *in_frame, *out_frame;
    uint8_t *raw_in = NULL, *raw_out = NULL;
    int in_offset = 0, out_offset = 0;
    int frame_data_size = 0;
    int result = 0;
    int got_output = 0;
    int i = 0;

    in_frame = av_frame_alloc();
    if (!in_frame)
    {
        av_log(NULL, AV_LOG_ERROR, "Can't allocate input frame\n");
        return AVERROR(ENOMEM);
    }

    in_frame->nb_samples = enc_ctx->frame_size;
    in_frame->format = enc_ctx->sample_fmt;
    in_frame->channel_layout = enc_ctx->channel_layout;
    if (av_frame_get_buffer(in_frame, 32) != 0)
    {
        av_log(NULL, AV_LOG_ERROR, "Can't allocate a buffer for input frame\n");
        return AVERROR(ENOMEM);
    }

    out_frame = av_frame_alloc();
    if (!out_frame)
    {
        av_log(NULL, AV_LOG_ERROR, "Can't allocate output frame\n");
        return AVERROR(ENOMEM);
    }

    raw_in = av_malloc(in_frame->linesize[0] * NUMBER_OF_FRAMES);
    if (!raw_in)
    {
        av_log(NULL, AV_LOG_ERROR, "Can't allocate memory for raw_in\n");
        return AVERROR(ENOMEM);
    }

    raw_out = av_malloc(in_frame->linesize[0] * NUMBER_OF_FRAMES);
    if (!raw_out)
    {
        av_log(NULL, AV_LOG_ERROR, "Can't allocate memory for raw_out\n");
        return AVERROR(ENOMEM);
    }

    for (i = 0; i < NUMBER_OF_FRAMES; i++)
    {
        av_init_packet(&enc_pkt);
        enc_pkt.data = NULL;
        enc_pkt.size = 0;

        generate_raw_frame((uint16_t*)(in_frame->data[0]), i, enc_ctx->sample_rate,
                           enc_ctx->channels, enc_ctx->frame_size);
        memcpy(raw_in + in_offset, in_frame->data[0], in_frame->linesize[0]);
        in_offset += in_frame->linesize[0];
        result = avcodec_encode_audio2(enc_ctx, &enc_pkt, in_frame, &got_output);
        if (result < 0)
        {
            av_log(NULL, AV_LOG_ERROR, "Error encoding audio frame\n");
            return result;
        }

        /* if we get an encoded packet, feed it straight to the decoder */
        if (got_output)
        {
            result = avcodec_decode_audio4(dec_ctx, out_frame, &got_output, &enc_pkt);
            if (result < 0)
            {
                av_log(NULL, AV_LOG_ERROR, "Error decoding audio packet\n");
                return result;
            }

            if (got_output)
            {
                if (result != enc_pkt.size)
                {
                    av_log(NULL, AV_LOG_INFO, "Decoder consumed only part of a packet, it is allowed to do so -- need to update this test\n");
                    return AVERROR_UNKNOWN;
                }

                if (in_frame->nb_samples != out_frame->nb_samples)
                {
                    av_log(NULL, AV_LOG_ERROR, "Error frames before and after decoding has different number of samples\n");
                    return AVERROR_UNKNOWN;
                }

                if (in_frame->channel_layout != out_frame->channel_layout)
                {
                    av_log(NULL, AV_LOG_ERROR, "Error frames before and after decoding has different channel layout\n");
                    return AVERROR_UNKNOWN;
                }

                if (in_frame->format != out_frame->format)
                {
                    av_log(NULL, AV_LOG_ERROR, "Error frames before and after decoding has different sample format\n");
                    return AVERROR_UNKNOWN;
                }
                memcpy(raw_out + out_offset, out_frame->data[0], out_frame->linesize[0]);
                out_offset += out_frame->linesize[0];
            }
        }
        av_free_packet(&enc_pkt);
    }

    if (memcmp(raw_in, raw_out, frame_data_size * NUMBER_OF_FRAMES) != 0)
    {
        av_log(NULL, AV_LOG_ERROR, "Output differs\n");
        return 1;
    }

    av_log(NULL, AV_LOG_INFO, "OK\n");

    av_freep(&raw_in);
    av_freep(&raw_out);
    av_frame_free(&in_frame);
    av_frame_free(&out_frame);
    return 0;
}
コード例 #30
0
int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters,
                          AVFilterInOut **inputs,
                          AVFilterInOut **outputs)
{
    int index = 0, ret = 0;
    char chr = 0;

    AVFilterInOut *curr_inputs = NULL, *open_inputs = NULL, *open_outputs = NULL;

    filters += strspn(filters, WHITESPACES);

    if ((ret = parse_sws_flags(&filters, graph)) < 0)
        goto fail;

    do {
        AVFilterContext *filter;
        filters += strspn(filters, WHITESPACES);

        if ((ret = parse_inputs(&filters, &curr_inputs, &open_outputs, graph)) < 0)
            goto end;
        if ((ret = parse_filter(&filter, &filters, graph, index, graph)) < 0)
            goto end;


        if ((ret = link_filter_inouts(filter, &curr_inputs, &open_inputs, graph)) < 0)
            goto end;

        if ((ret = parse_outputs(&filters, &curr_inputs, &open_inputs, &open_outputs,
                                 graph)) < 0)
            goto end;

        filters += strspn(filters, WHITESPACES);
        chr = *filters++;

        if (chr == ';' && curr_inputs)
            append_inout(&open_outputs, &curr_inputs);
        index++;
    } while (chr == ',' || chr == ';');

    if (chr) {
        av_log(graph, AV_LOG_ERROR,
               "Unable to parse graph description substring: \"%s\"\n",
               filters - 1);
        ret = AVERROR(EINVAL);
        goto end;
    }

    append_inout(&open_outputs, &curr_inputs);


    *inputs  = open_inputs;
    *outputs = open_outputs;
    return 0;

 fail:end:
    while (graph->nb_filters)
        avfilter_free(graph->filters[0]);
    av_freep(&graph->filters);
    avfilter_inout_free(&open_inputs);
    avfilter_inout_free(&open_outputs);
    avfilter_inout_free(&curr_inputs);

    *inputs  = NULL;
    *outputs = NULL;

    return ret;
}