Exemplo n.º 1
0
int avfilter_copy_buf_props(AVFrame *dst, const AVFilterBufferRef *src)
{
    int planes, nb_channels;

    if (!dst)
        return AVERROR(EINVAL);
    /* abort in case the src is NULL and dst is not, avoid inconsistent state in dst */
    av_assert0(src);

    memcpy(dst->data, src->data, sizeof(dst->data));
    memcpy(dst->linesize, src->linesize, sizeof(dst->linesize));

    dst->pts     = src->pts;
    dst->format  = src->format;
    av_frame_set_pkt_pos(dst, src->pos);

    switch (src->type) {
    case AVMEDIA_TYPE_VIDEO:
        av_assert0(src->video);
        dst->width               = src->video->w;
        dst->height              = src->video->h;
        dst->sample_aspect_ratio = src->video->sample_aspect_ratio;
        dst->interlaced_frame    = src->video->interlaced;
        dst->top_field_first     = src->video->top_field_first;
        dst->key_frame           = src->video->key_frame;
        dst->pict_type           = src->video->pict_type;
        break;
    case AVMEDIA_TYPE_AUDIO:
        av_assert0(src->audio);
        nb_channels = av_get_channel_layout_nb_channels(src->audio->channel_layout);
        planes      = av_sample_fmt_is_planar(src->format) ? nb_channels : 1;

        if (planes > FF_ARRAY_ELEMS(dst->data)) {
            dst->extended_data = av_mallocz_array(planes, sizeof(*dst->extended_data));
            if (!dst->extended_data)
                return AVERROR(ENOMEM);
            memcpy(dst->extended_data, src->extended_data,
                   planes * sizeof(*dst->extended_data));
        } else
            dst->extended_data = dst->data;
        dst->nb_samples          = src->audio->nb_samples;
        av_frame_set_sample_rate   (dst, src->audio->sample_rate);
        av_frame_set_channel_layout(dst, src->audio->channel_layout);
        av_frame_set_channels      (dst, src->audio->channels);
        break;
    default:
        return AVERROR(EINVAL);
    }

    return 0;
}
Exemplo n.º 2
0
int AudioSamples::init(SampleFormat sampleFormat, int samplesCount, uint64_t channelLayout, int sampleRate, int align)
{
    if (!m_raw) {
        m_raw = av_frame_alloc();
        m_raw->opaque = this;
    }

    if (m_raw->data[0]) {
        av_frame_free(&m_raw);
    }

    m_raw->format      = sampleFormat;
    m_raw->nb_samples  = samplesCount;

    av_frame_set_sample_rate(m_raw, sampleRate);
    av_frame_set_channel_layout(m_raw, channelLayout);

    av_frame_get_buffer(m_raw, align);
    return 0;
}
Exemplo n.º 3
0
static int filter(struct af_instance *af, struct mp_audio *data, int flags)
{
    struct priv *p = af->priv;
    struct mp_audio *r = af->data;
    bool eof = data->samples == 0 && (flags & AF_FILTER_FLAG_EOF);
    AVFilterLink *l_in = p->in->outputs[0];

    AVFrame *frame = av_frame_alloc();
    frame->nb_samples = data->samples;
    frame->format = l_in->format;

    // Timebase is 1/sample_rate
    frame->pts = p->samples_in;

    av_frame_set_channels(frame, l_in->channels);
    av_frame_set_channel_layout(frame, l_in->channel_layout);
    av_frame_set_sample_rate(frame, l_in->sample_rate);

    frame->extended_data = frame->data;
    for (int n = 0; n < data->num_planes; n++)
        frame->data[n] = data->planes[n];
    frame->linesize[0] = frame->nb_samples * data->sstride;

    if (av_buffersrc_add_frame(p->in, eof ? NULL : frame) < 0) {
        av_frame_free(&frame);
        return -1;
    }
    av_frame_free(&frame);

    int64_t out_pts = AV_NOPTS_VALUE;
    r->samples = 0;
    for (;;) {
        frame = av_frame_alloc();
        if (av_buffersink_get_frame(p->out, frame) < 0) {
            // Not an error situation - no more output buffers in queue.
            av_frame_free(&frame);
            break;
        }

        mp_audio_realloc_min(r, r->samples + frame->nb_samples);
        for (int n = 0; n < r->num_planes; n++) {
            memcpy((char *)r->planes[n] + r->samples * r->sstride,
                   frame->extended_data[n], frame->nb_samples * r->sstride);
        }
        r->samples += frame->nb_samples;

        if (out_pts == AV_NOPTS_VALUE)
            out_pts = frame->pts;

        av_frame_free(&frame);
    }

    p->samples_in += data->samples;

    if (out_pts != AV_NOPTS_VALUE) {
        double in_time = p->samples_in / (double)data->rate;
        double out_time = out_pts * av_q2d(p->timebase_out);
        // Need pts past the last output sample.
        out_time += r->samples / (double)r->rate;

        af->delay = in_time - out_time;
    }

    *data = *r;
    return 0;
}