示例#1
0
bool BufferSinkFilterContext::getSamples(AVFrame *frame, int nbSamples, OptionalErrorCode ec)
{
    clear_if(ec);
    if (!m_sink) {
        throws_if(ec, Errors::Unallocated);
        return false;
    }

    if (m_req == ReqGetFrame) {
        throws_if(ec, Errors::MixBufferSinkAccess);
        return false;
    }

    m_req = ReqGetSamples;

    int sts = av_buffersink_get_samples(m_sink.raw(), frame, nbSamples);
    if (sts < 0) {
        if (sts == AVERROR_EOF || sts == AVERROR(EAGAIN)) {
            if (ec) {
                *ec = make_ffmpeg_error(sts);
            }
        } else {
            throws_if(ec, sts, ffmpeg_category());
        }
        return false;
    }
    return true;
}
示例#2
0
static int compat_read(AVFilterContext *ctx,
                       AVFilterBufferRef **pbuf, int nb_samples)
{
    AVFilterBufferRef *buf;
    AVFrame *frame;
    int ret;

    if (!pbuf)
        return ff_poll_frame(ctx->inputs[0]);

    frame = av_frame_alloc();
    if (!frame)
        return AVERROR(ENOMEM);

    if (!nb_samples)
        ret = av_buffersink_get_frame(ctx, frame);
    else
        ret = av_buffersink_get_samples(ctx, frame, nb_samples);

    if (ret < 0)
        goto fail;

    if (ctx->inputs[0]->type == AVMEDIA_TYPE_VIDEO) {
        buf = avfilter_get_video_buffer_ref_from_arrays(frame->data, frame->linesize,
                                                        AV_PERM_READ,
                                                        frame->width, frame->height,
                                                        frame->format);
    } else {
        buf = avfilter_get_audio_buffer_ref_from_arrays(frame->extended_data,
                                                        frame->linesize[0], AV_PERM_READ,
                                                        frame->nb_samples,
                                                        frame->format,
                                                        frame->channel_layout);
    }
    if (!buf) {
        ret = AVERROR(ENOMEM);
        goto fail;
    }

    avfilter_copy_frame_props(buf, frame);

    buf->buf->priv = frame;
    buf->buf->free = compat_free_buffer;

    *pbuf = buf;

    return 0;
fail:
    av_frame_free(&frame);
    return ret;
}
示例#3
0
// decode one audio packet and return its uncompressed size
static int audio_decode_frame(struct GroovePlaylist *playlist, struct GrooveFile *file) {
    struct GroovePlaylistPrivate *p = (struct GroovePlaylistPrivate *) playlist;
    struct GrooveFilePrivate *f = (struct GrooveFilePrivate *) file;

    AVPacket *pkt = &f->audio_pkt;
    AVCodecContext *dec = f->audio_st->codec;

    AVPacket *pkt_temp = &p->audio_pkt_temp;
    *pkt_temp = *pkt;

    // update the audio clock with the pts if we can
    if (pkt->pts != AV_NOPTS_VALUE)
        f->audio_clock = av_q2d(f->audio_st->time_base) * pkt->pts;

    int max_data_size = 0;
    int len1, got_frame;
    int new_packet = 1;
    AVFrame *in_frame = p->in_frame;

    // NOTE: the audio packet can contain several frames
    while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
        new_packet = 0;

        len1 = avcodec_decode_audio4(dec, in_frame, &got_frame, pkt_temp);
        if (len1 < 0) {
            // if error, we skip the frame
            pkt_temp->size = 0;
            return -1;
        }

        pkt_temp->data += len1;
        pkt_temp->size -= len1;

        if (!got_frame) {
            // stop sending empty packets if the decoder is finished
            if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
                return 0;
            continue;
        }

        // push the audio data from decoded frame into the filtergraph
        int err = av_buffersrc_write_frame(p->abuffer_ctx, in_frame);
        if (err < 0) {
            av_strerror(err, p->strbuf, sizeof(p->strbuf));
            av_log(NULL, AV_LOG_ERROR, "error writing frame to buffersrc: %s\n",
                    p->strbuf);
            return -1;
        }

        // for each data format in the sink map, pull filtered audio from its
        // buffersink, turn it into a GrooveBuffer and then increment the ref
        // count for each sink in that stack.
        struct SinkMap *map_item = p->sink_map;
        double clock_adjustment = 0;
        while (map_item) {
            struct GrooveSink *example_sink = map_item->stack_head->sink;
            int data_size = 0;
            for (;;) {
                AVFrame *oframe = av_frame_alloc();
                int err = example_sink->buffer_sample_count == 0 ?
                    av_buffersink_get_frame(map_item->abuffersink_ctx, oframe) :
                    av_buffersink_get_samples(map_item->abuffersink_ctx, oframe, example_sink->buffer_sample_count);
                if (err == AVERROR_EOF || err == AVERROR(EAGAIN)) {
                    av_frame_free(&oframe);
                    break;
                }
                if (err < 0) {
                    av_frame_free(&oframe);
                    av_log(NULL, AV_LOG_ERROR, "error reading buffer from buffersink\n");
                    return -1;
                }
                struct GrooveBuffer *buffer = frame_to_groove_buffer(playlist, example_sink, oframe);
                if (!buffer) {
                    av_frame_free(&oframe);
                    return -1;
                }
                data_size += buffer->size;
                struct SinkStack *stack_item = map_item->stack_head;
                // we hold this reference to avoid cleanups until at least this loop
                // is done and we call unref after it.
                groove_buffer_ref(buffer);
                while (stack_item) {
                    struct GrooveSink *sink = stack_item->sink;
                    struct GrooveSinkPrivate *s = (struct GrooveSinkPrivate *) sink;
                    // as soon as we call groove_queue_put, this buffer could be unref'd.
                    // so we ref before putting it in the queue, and unref if it failed.
                    groove_buffer_ref(buffer);
                    if (groove_queue_put(s->audioq, buffer) < 0) {
                        av_log(NULL, AV_LOG_ERROR, "unable to put buffer in queue\n");
                        groove_buffer_unref(buffer);
                    }
                    stack_item = stack_item->next;
                }
                groove_buffer_unref(buffer);
            }
            if (data_size > max_data_size) {
                max_data_size = data_size;
                clock_adjustment = data_size / (double)example_sink->bytes_per_sec;
            }
            map_item = map_item->next;
        }

        // if no pts, then estimate it
        if (pkt->pts == AV_NOPTS_VALUE)
            f->audio_clock += clock_adjustment;
        return max_data_size;
    }
    return max_data_size;
}