示例#1
0
static int acompressor_filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    const double *src = (const double *)in->data[0];
    AVFilterContext *ctx = inlink->dst;
    SidechainCompressContext *s = ctx->priv;
    AVFilterLink *outlink = ctx->outputs[0];
    AVFrame *out;
    double *dst;

    if (av_frame_is_writable(in)) {
        out = in;
    } else {
        out = ff_get_audio_buffer(inlink, in->nb_samples);
        if (!out) {
            av_frame_free(&in);
            return AVERROR(ENOMEM);
        }
        av_frame_copy_props(out, in);
    }
    dst = (double *)out->data[0];

    compressor(s, src, dst, src, in->nb_samples,
               s->level_in, s->level_in,
               inlink, inlink);

    if (out != in)
        av_frame_free(&in);
    return ff_filter_frame(outlink, out);
}
示例#2
0
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
{
    SoxContext *sox = inlink->dst->priv;
    AVFrame *outsamples;
    size_t nb_in_samples, nb_out_samples;

    if (av_frame_is_writable(insamples)) {
        outsamples = insamples;
    } else {
        outsamples = ff_get_audio_buffer(inlink, insamples->nb_samples);
        if (!outsamples)
            return AVERROR(ENOMEM);
        outsamples->pts = insamples->pts;
    }

    nb_out_samples = nb_in_samples =
        insamples->nb_samples * sox->effect->in_signal.channels;

    // FIXME not handling cases where not all the input is consumed
    sox->effect->handler.flow(sox->effect, (int32_t *)insamples->data[0],
        (int32_t *)outsamples->data[0], &nb_in_samples, &nb_out_samples);

    outsamples->nb_samples = nb_out_samples / sox->effect->out_signal.channels;

    if (insamples != outsamples)
        av_frame_free(&insamples);

    return ff_filter_frame(inlink->dst->outputs[0], outsamples);
}
示例#3
0
文件: af_bs2b.c 项目: 0Soul/FFmpeg
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
    int ret;
    AVFrame *out_frame;

    Bs2bContext     *bs2b = inlink->dst->priv;
    AVFilterLink *outlink = inlink->dst->outputs[0];

    if (av_frame_is_writable(frame)) {
        out_frame = frame;
    } else {
        out_frame = ff_get_audio_buffer(inlink, frame->nb_samples);
        if (!out_frame)
            return AVERROR(ENOMEM);
        av_frame_copy(out_frame, frame);
        ret = av_frame_copy_props(out_frame, frame);
        if (ret < 0) {
            av_frame_free(&out_frame);
            av_frame_free(&frame);
            return ret;
        }
    }

    bs2b->filter(bs2b->bs2bp, out_frame->extended_data[0], out_frame->nb_samples);

    if (frame != out_frame)
        av_frame_free(&frame);

    return ff_filter_frame(outlink, out_frame);
}
示例#4
0
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame)
{
    AVFrame *tmp;
    int ret;

    av_assert0(avctx->codec_type == AVMEDIA_TYPE_VIDEO);

    if (!frame->data[0])
        return ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);

    if (av_frame_is_writable(frame))
        return ff_decode_frame_props(avctx, frame);

    tmp = av_frame_alloc();
    if (!tmp)
        return AVERROR(ENOMEM);

    av_frame_move_ref(tmp, frame);

    ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
    if (ret < 0) {
        av_frame_free(&tmp);
        return ret;
    }

    av_frame_copy(frame, tmp);
    av_frame_free(&tmp);

    return 0;
}
示例#5
0
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    AVFilterContext *ctx  = inlink->dst;
    VagueDenoiserContext *s = ctx->priv;
    AVFilterLink *outlink = ctx->outputs[0];
    AVFrame *out;
    int direct = av_frame_is_writable(in);

    if (direct) {
        out = in;
    } else {
        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out) {
            av_frame_free(&in);
            return AVERROR(ENOMEM);
        }

        av_frame_copy_props(out, in);
    }

    filter(s, in, out);

    if (!direct)
        av_frame_free(&in);

    return ff_filter_frame(outlink, out);
}
示例#6
0
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
    AVFilterContext *ctx = inlink->dst;
    AudioEchoContext *s = ctx->priv;
    AVFrame *out_frame;
#ifdef IDE_COMPILE
	AVRational tmp;
#endif

    if (av_frame_is_writable(frame)) {
        out_frame = frame;
    } else {
        out_frame = ff_get_audio_buffer(inlink, frame->nb_samples);
        if (!out_frame)
            return AVERROR(ENOMEM);
        av_frame_copy_props(out_frame, frame);
    }

    s->echo_samples(s, s->delayptrs, frame->extended_data, out_frame->extended_data,
                    frame->nb_samples, inlink->channels);

#ifdef IDE_COMPILE
	tmp.num = 1;
	tmp.den = inlink->sample_rate;
	s->next_pts = frame->pts + av_rescale_q(frame->nb_samples, tmp, inlink->time_base);
#else
	s->next_pts = frame->pts + av_rescale_q(frame->nb_samples, (AVRational){1, inlink->sample_rate}, inlink->time_base);
#endif

    if (frame != out_frame)
        av_frame_free(&frame);

    return ff_filter_frame(ctx->outputs[0], out_frame);
}
示例#7
0
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    CrystalizerContext *s = ctx->priv;
    AVFrame *out;

    if (!s->prev) {
        s->prev = ff_get_audio_buffer(inlink, 1);
        if (!s->prev) {
            av_frame_free(&in);
            return AVERROR(ENOMEM);
        }
    }

    if (av_frame_is_writable(in)) {
        out = in;
    } else {
        out = ff_get_audio_buffer(inlink, in->nb_samples);
        if (!out) {
            av_frame_free(&in);
            return AVERROR(ENOMEM);
        }
        av_frame_copy_props(out, in);
    }

    s->filter((void **)out->extended_data, (void **)s->prev->extended_data, (const void **)in->extended_data,
              in->nb_samples, in->channels, s->mult, s->clip);

    if (out != in)
        av_frame_free(&in);

    return ff_filter_frame(outlink, out);
}
示例#8
0
文件: af_agate.c 项目: WXB506/FFmpeg
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    AudioGateContext *s = ctx->priv;
    const double *src = (const double *)in->data[0];
    const double makeup = s->makeup;
    const double attack_coeff = s->attack_coeff;
    const double release_coeff = s->release_coeff;
    const double level_in = s->level_in;
    AVFrame *out;
    double *dst;
    int n, c;

    if (av_frame_is_writable(in)) {
        out = in;
    } else {
        out = ff_get_audio_buffer(inlink, in->nb_samples);
        if (!out) {
            av_frame_free(&in);
            return AVERROR(ENOMEM);
        }
        av_frame_copy_props(out, in);
    }
    dst = (double *)out->data[0];

    for (n = 0; n < in->nb_samples; n++, src += inlink->channels, dst += inlink->channels) {
        double abs_sample = fabs(src[0]), gain = 1.0;

        for (c = 0; c < inlink->channels; c++)
            dst[c] = src[c] * level_in;

        if (s->link == 1) {
            for (c = 1; c < inlink->channels; c++)
                abs_sample = FFMAX(fabs(src[c]), abs_sample);
        } else {
            for (c = 1; c < inlink->channels; c++)
                abs_sample += fabs(src[c]);

            abs_sample /= inlink->channels;
        }

        if (s->detection)
            abs_sample *= abs_sample;

        s->lin_slope += (abs_sample - s->lin_slope) * (abs_sample > s->lin_slope ? attack_coeff : release_coeff);
        if (s->lin_slope > 0.0)
            gain = output_gain(s->lin_slope, s->ratio, s->thres,
                               s->knee, s->knee_start, s->knee_stop,
                               s->lin_knee_stop, s->range);

        for (c = 0; c < inlink->channels; c++)
            dst[c] *= gain * makeup;
    }

    if (out != in)
        av_frame_free(&in);
    return ff_filter_frame(outlink, out);
}
示例#9
0
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    CrystalizerContext *s = ctx->priv;
    const float *src = (const float *)in->data[0];
    const float mult = s->mult;
    AVFrame *out;
    float *dst, *prv;
    int n, c;

    if (!s->prev) {
        s->prev = ff_get_audio_buffer(inlink, 1);
        if (!s->prev) {
            av_frame_free(&in);
            return AVERROR(ENOMEM);
        }
    }

    if (av_frame_is_writable(in)) {
        out = in;
    } else {
        out = ff_get_audio_buffer(inlink, in->nb_samples);
        if (!out) {
            av_frame_free(&in);
            return AVERROR(ENOMEM);
        }
        av_frame_copy_props(out, in);
    }

    dst = (float *)out->data[0];
    prv = (float *)s->prev->data[0];

    for (n = 0; n < in->nb_samples; n++) {
        for (c = 0; c < in->channels; c++) {
            float current = src[c];

            dst[c] = current + (current - prv[c]) * mult;
            prv[c] = current;
            if (s->clip) {
                dst[c] = av_clipf(dst[c], -1, 1);
            }
        }
        dst += c;
        src += c;
    }

    if (out != in)
        av_frame_free(&in);

    return ff_filter_frame(outlink, out);
}
示例#10
0
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    DelogoContext *s = inlink->dst->priv;
    AVFilterLink *outlink = inlink->dst->outputs[0];
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    AVFrame *out;
    int hsub0 = desc->log2_chroma_w;
    int vsub0 = desc->log2_chroma_h;
    int direct = 0;
    int plane;
    AVRational sar;

    if (av_frame_is_writable(in)) {
        direct = 1;
        out = in;
    } else {
        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out) {
            av_frame_free(&in);
            return AVERROR(ENOMEM);
        }

        av_frame_copy_props(out, in);
    }

    sar = in->sample_aspect_ratio;
    /* Assume square pixels if SAR is unknown */
    if (!sar.num)
        sar.num = sar.den = 1;

    for (plane = 0; plane < desc->nb_components; plane++) {
        int hsub = plane == 1 || plane == 2 ? hsub0 : 0;
        int vsub = plane == 1 || plane == 2 ? vsub0 : 0;

        apply_delogo(out->data[plane], out->linesize[plane],
                     in ->data[plane], in ->linesize[plane],
                     AV_CEIL_RSHIFT(inlink->w, hsub),
                     AV_CEIL_RSHIFT(inlink->h, vsub),
                     sar, s->x>>hsub, s->y>>vsub,
                     /* Up and left borders were rounded down, inject lost bits
                      * into width and height to avoid error accumulation */
                     AV_CEIL_RSHIFT(s->w + (s->x & ((1<<hsub)-1)), hsub),
                     AV_CEIL_RSHIFT(s->h + (s->y & ((1<<vsub)-1)), vsub),
                     s->band>>FFMIN(hsub, vsub),
                     s->show, direct);
    }

    if (!direct)
        av_frame_free(&in);

    return ff_filter_frame(outlink, out);
}
示例#11
0
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
{
    int (*filter_frame)(AVFilterLink *, AVFrame *);
    AVFilterPad *dst = link->dstpad;
    AVFrame *out;

    FF_DPRINTF_START(NULL, filter_frame);
    ff_dlog_link(NULL, link, 1);

    if (!(filter_frame = dst->filter_frame))
        filter_frame = default_filter_frame;

    /* copy the frame if needed */
    if (dst->needs_writable && !av_frame_is_writable(frame)) {
        av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n");

        switch (link->type) {
        case AVMEDIA_TYPE_VIDEO:
            out = ff_get_video_buffer(link, link->w, link->h);
            break;
        case AVMEDIA_TYPE_AUDIO:
            out = ff_get_audio_buffer(link, frame->nb_samples);
            break;
        default: return AVERROR(EINVAL);
        }
        if (!out) {
            av_frame_free(&frame);
            return AVERROR(ENOMEM);
        }
        av_frame_copy_props(out, frame);

        switch (link->type) {
        case AVMEDIA_TYPE_VIDEO:
            av_image_copy(out->data, out->linesize, frame->data, frame->linesize,
                          frame->format, frame->width, frame->height);
            break;
        case AVMEDIA_TYPE_AUDIO:
            av_samples_copy(out->extended_data, frame->extended_data,
                            0, 0, frame->nb_samples,
                            av_get_channel_layout_nb_channels(frame->channel_layout),
                            frame->format);
            break;
        default: return AVERROR(EINVAL);
        }

        av_frame_free(&frame);
    } else
        out = frame;

    return filter_frame(link, out);
}
示例#12
0
static inline int copy_field
(
    lw_log_handler_t *lhp,
    AVFrame          *dst,
    AVFrame          *src,
    int               line_offset
)
{
    /* Check if the destination is writable. */
    if( av_frame_is_writable( dst ) == 0 )
    {
        /* The destination is NOT writable, so allocate new buffers and copy the data. */
        av_frame_unref( dst );
        if( av_frame_ref( dst, src ) < 0 )
        {
            if( lhp->show_log )
                lhp->show_log( lhp, LW_LOG_ERROR, "Failed to reference a video frame.\n" );
            return -1;
        }
        if( av_frame_make_writable( dst ) < 0 )
        {
            if( lhp->show_log )
                lhp->show_log( lhp, LW_LOG_ERROR, "Failed to make a video frame writable.\n" );
            return -1;
        }
        /* For direct rendering, the destination can not know
         * whether the value at the address held by the opaque pointer is valid or not.
         * Anyway, the opaque pointer for direct rendering shall be set to NULL. */
        dst->opaque = NULL;
    }
    else
    {
        /* The destination is writable. Copy field data from the source. */
        const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get( (enum AVPixelFormat)dst->format );
        int number_of_planes = av_pix_fmt_count_planes( (enum AVPixelFormat)dst->format );
        int height           = MIN( dst->height, src->height );
        for( int i = 0; i < number_of_planes; i++ )
        {
            int r_shift = 1 + ((i == 1 || i == 2) ? desc->log2_chroma_h : 0);
            int field_height = (height >> r_shift) + (line_offset == 0 && (height & 1) ? 1 : 0);
            av_image_copy_plane( dst->data[i] + dst->linesize[i] * line_offset, 2 * dst->linesize[i],
                                 src->data[i] + src->linesize[i] * line_offset, 2 * src->linesize[i],
                                 MIN( dst->linesize[i], src->linesize[i] ),
                                 field_height );
        }
    }
    /* Treat this frame as interlaced. */
    dst->interlaced_frame = 1;
    return 0;
}
示例#13
0
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    StereoWidenContext *s = ctx->priv;
    const float *src = (const float *)in->data[0];
    const float drymix = s->drymix;
    const float crossfeed = s->crossfeed;
    const float feedback = s->feedback;
    AVFrame *out = NULL;
    float *dst;
    int n;

    if (av_frame_is_writable(in)) {
        out = in;
    } else {
        AVFrame *out = ff_get_audio_buffer(inlink, in->nb_samples);
        if (!out) {
            av_frame_free(&in);
            return AVERROR(ENOMEM);
        }
        av_frame_copy_props(out, in);
    }
    dst = (float *)out->data[0];

    for (n = 0; n < in->nb_samples; n++, src += 2, dst += 2) {
        const float left = src[0], right = src[1];
        float *read = s->write + 2;

        if (read > s->buffer + s->length)
            read = s->buffer;

        dst[0] = drymix * left - crossfeed * right - feedback * read[1];
        dst[1] = drymix * right - crossfeed * left - feedback * read[0];

        s->write[0] = left;
        s->write[1] = right;

        if (s->write == s->buffer + s->length)
            s->write = s->buffer;
        else
            s->write += 2;
    }

    if (out != in)
        av_frame_free(&in);
    return ff_filter_frame(outlink, out);
}
示例#14
0
文件: frame.c 项目: ptahmose/FFmpeg
int av_frame_make_writable(AVFrame *frame)
{
    AVFrame tmp;
    int ret;

    if (!frame->buf[0])
        return AVERROR(EINVAL);

    if (av_frame_is_writable(frame))
        return 0;

    memset(&tmp, 0, sizeof(tmp));
    tmp.format         = frame->format;
    tmp.width          = frame->width;
    tmp.height         = frame->height;
    tmp.channels       = frame->channels;
    tmp.channel_layout = frame->channel_layout;
    tmp.nb_samples     = frame->nb_samples;
    ret = av_frame_get_buffer(&tmp, 32);
    if (ret < 0)
        return ret;

    if (tmp.nb_samples) {
        int ch = tmp.channels;
        CHECK_CHANNELS_CONSISTENCY(&tmp);
        av_samples_copy(tmp.extended_data, frame->extended_data, 0, 0,
                        frame->nb_samples, ch, frame->format);
    } else {
        av_image_copy(tmp.data, tmp.linesize, frame->data, frame->linesize,
                      frame->format, frame->width, frame->height);
    }

    ret = av_frame_copy_props(&tmp, frame);
    if (ret < 0) {
        av_frame_unref(&tmp);
        return ret;
    }

    av_frame_unref(frame);

    *frame = tmp;
    if (tmp.data == tmp.extended_data)
        frame->extended_data = frame->data;

    return 0;
}
示例#15
0
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    ExtraStereoContext *s = ctx->priv;
    const float *src = (const float *)in->data[0];
    const float mult = s->mult;
    AVFrame *out;
    float *dst;
    int n;

    if (av_frame_is_writable(in)) {
        out = in;
    } else {
        out = ff_get_audio_buffer(inlink, in->nb_samples);
        if (!out) {
            av_frame_free(&in);
            return AVERROR(ENOMEM);
        }
        av_frame_copy_props(out, in);
    }
    dst = (float *)out->data[0];

    for (n = 0; n < in->nb_samples; n++) {
        float average, left, right;

        left    = src[n * 2    ];
        right   = src[n * 2 + 1];
        average = (left + right) / 2.;
        left    = average + mult * (left  - average);
        right   = average + mult * (right - average);

        if (s->clip) {
            left  = av_clipf(left,  -1, 1);
            right = av_clipf(right, -1, 1);
        }

        dst[n * 2    ] = left;
        dst[n * 2 + 1] = right;
    }

    if (out != in)
        av_frame_free(&in);
    return ff_filter_frame(outlink, out);
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    AVFilterContext *ctx  = inlink->dst;
    HQDN3DContext *s = ctx->priv;
    AVFilterLink *outlink = ctx->outputs[0];

    AVFrame *out;
    int direct, c;

    if (av_frame_is_writable(in) && !ctx->is_disabled) {
        direct = 1;
        out = in;
    } else {
        direct = 0;
        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out) {
            av_frame_free(&in);
            return AVERROR(ENOMEM);
        }

        av_frame_copy_props(out, in);
    }

    for (c = 0; c < 3; c++) {
        denoise(s, in->data[c], out->data[c],
                s->line, &s->frame_prev[c],
                FF_CEIL_RSHIFT(in->width,  (!!c * s->hsub)),
                FF_CEIL_RSHIFT(in->height, (!!c * s->vsub)),
                in->linesize[c], out->linesize[c],
                s->coefs[c ? CHROMA_SPATIAL : LUMA_SPATIAL],
                s->coefs[c ? CHROMA_TMP     : LUMA_TMP]);
    }

    if (ctx->is_disabled) {
        av_frame_free(&out);
        return ff_filter_frame(outlink, in);
    }

    if (!direct)
        av_frame_free(&in);

    return ff_filter_frame(outlink, out);
}
示例#17
0
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    AVFilterContext *ctx = inlink->dst;
    LADSPAContext *s = ctx->priv;
    AVFrame *out;
    int i, h;

    if (!s->nb_outputs ||
        (av_frame_is_writable(in) && s->nb_inputs == s->nb_outputs &&
        !(s->desc->Properties & LADSPA_PROPERTY_INPLACE_BROKEN))) {
        out = in;
    } else {
        out = ff_get_audio_buffer(ctx->outputs[0], in->nb_samples);
        if (!out) {
            av_frame_free(&in);
            return AVERROR(ENOMEM);
        }
        av_frame_copy_props(out, in);
    }

    for (h = 0; h < s->nb_handles; h++) {
        for (i = 0; i < s->nb_inputs; i++) {
            s->desc->connect_port(s->handles[h], s->ipmap[i],
                                  (LADSPA_Data*)in->extended_data[i]);
        }

        for (i = 0; i < s->nb_outputs; i++) {
            s->desc->connect_port(s->handles[h], s->opmap[i],
                                  (LADSPA_Data*)out->extended_data[i]);
        }

        s->desc->run(s->handles[h], in->nb_samples);
    }

    for (i = 0; i < s->nb_outputcontrols; i++)
        print_ctl_info(ctx, AV_LOG_VERBOSE, s, i, s->ocmap, s->octlv, 1);

    if (out != in)
        av_frame_free(&in);

    return ff_filter_frame(ctx->outputs[0], out);
}
示例#18
0
int av_frame_make_writable(AVFrame *frame)
{
    AVFrame tmp;
    int ret;

    if (!frame->buf[0])
        return AVERROR(EINVAL);

    if (av_frame_is_writable(frame))
        return 0;

    memset(&tmp, 0, sizeof(tmp));
    tmp.format         = frame->format;
    tmp.width          = frame->width;
    tmp.height         = frame->height;
    tmp.channels       = frame->channels;
    tmp.channel_layout = frame->channel_layout;
    tmp.nb_samples     = frame->nb_samples;
    ret = av_frame_get_buffer(&tmp, 32);
    if (ret < 0)
        return ret;

    ret = av_frame_copy(&tmp, frame);
    if (ret < 0) {
        av_frame_unref(&tmp);
        return ret;
    }

    ret = av_frame_copy_props(&tmp, frame);
    if (ret < 0) {
        av_frame_unref(&tmp);
        return ret;
    }

    av_frame_unref(frame);

    *frame = tmp;
    if (tmp.data == tmp.extended_data)
        frame->extended_data = frame->data;

    return 0;
}
示例#19
0
文件: af_monoparts.c 项目: svpv/qadrc
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
    AVFilterContext *ctx = inlink->dst;
    MonoPartsContext *s = ctx->priv;
    AVFilterLink *outlink = ctx->outputs[0];

    int part = s->current_part++;
    if (part < s->part1) // stereo passthru
	return ff_filter_frame(outlink, frame);

    if (!av_frame_is_writable(frame)) {
	AVFrame *copy = ff_get_audio_buffer(inlink, frame->nb_samples);
	av_frame_copy_props(copy, frame);
	av_frame_copy(copy, frame);
	av_frame_free(&frame);
	frame = copy;
    }

    if (part == s->part1) {
	if (part == 0)
	    s->full_mono(frame);
	else
	    s->stereo2mono(frame);
    }
    else if (part < s->part2)
	s->full_mono(frame);
    else if (part == s->part2) {
	bool smallframe = frame->nb_samples < inlink->min_samples;
	bool lastpart = *s->parts == '\0';
	if (smallframe && lastpart)
	    s->full_mono(frame);
	else
	    s->mono2stereo(frame);
	if (lastpart)
	    s->part1 = s->part2 = INT_MAX;
	else if (!scan_part(ctx))
	    return AVERROR(EINVAL);
    }

    return ff_filter_frame(outlink, frame);
}
示例#20
0
static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
{
    RemovelogoContext *s = inlink->dst->priv;
    AVFilterLink *outlink = inlink->dst->outputs[0];
    AVFrame *outpicref;
    int direct = 0;

    if (av_frame_is_writable(inpicref)) {
        direct = 1;
        outpicref = inpicref;
    } else {
        outpicref = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!outpicref) {
            av_frame_free(&inpicref);
            return AVERROR(ENOMEM);
        }
        av_frame_copy_props(outpicref, inpicref);
    }

    blur_image(s->mask,
               inpicref ->data[0], inpicref ->linesize[0],
               outpicref->data[0], outpicref->linesize[0],
               s->full_mask_data, inlink->w,
               inlink->w, inlink->h, direct, &s->full_mask_bbox);
    blur_image(s->mask,
               inpicref ->data[1], inpicref ->linesize[1],
               outpicref->data[1], outpicref->linesize[1],
               s->half_mask_data, inlink->w/2,
               inlink->w/2, inlink->h/2, direct, &s->half_mask_bbox);
    blur_image(s->mask,
               inpicref ->data[2], inpicref ->linesize[2],
               outpicref->data[2], outpicref->linesize[2],
               s->half_mask_data, inlink->w/2,
               inlink->w/2, inlink->h/2, direct, &s->half_mask_bbox);

    if (!direct)
        av_frame_free(&inpicref);

    return ff_filter_frame(outlink, outpicref);
}
示例#21
0
static int filter_frame(AVFilterLink *inlink, AVFrame *inbuf)
{
    AudioPhaserContext *p = inlink->dst->priv;
    AVFilterLink *outlink = inlink->dst->outputs[0];
    AVFrame *outbuf;

    if (av_frame_is_writable(inbuf)) {
        outbuf = inbuf;
    } else {
        outbuf = ff_get_audio_buffer(inlink, inbuf->nb_samples);
        if (!outbuf)
            return AVERROR(ENOMEM);
        av_frame_copy_props(outbuf, inbuf);
    }

    p->phaser(p, inbuf->extended_data, outbuf->extended_data,
              outbuf->nb_samples, av_frame_get_channels(outbuf));

    if (inbuf != outbuf)
        av_frame_free(&inbuf);

    return ff_filter_frame(outlink, outbuf);
}
示例#22
0
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    HQDN3DContext *hqdn3d = inlink->dst->priv;
    AVFilterLink *outlink = inlink->dst->outputs[0];
    AVFrame *out;
    int direct, c;

    if (av_frame_is_writable(in)) {
        direct = 1;
        out = in;
    } else {
        direct = 0;
        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out) {
            av_frame_free(&in);
            return AVERROR(ENOMEM);
        }

        av_frame_copy_props(out, in);
        out->width  = outlink->w;
        out->height = outlink->h;
    }

    for (c = 0; c < 3; c++) {
        denoise(hqdn3d, in->data[c], out->data[c],
                hqdn3d->line, &hqdn3d->frame_prev[c],
                in->width  >> (!!c * hqdn3d->hsub),
                in->height >> (!!c * hqdn3d->vsub),
                in->linesize[c], out->linesize[c],
                hqdn3d->coefs[c?2:0], hqdn3d->coefs[c?3:1]);
    }

    if (!direct)
        av_frame_free(&in);

    return ff_filter_frame(outlink, out);
}
示例#23
0
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
    AVFilterContext *ctx = inlink->dst;
    FlangerContext *s = ctx->priv;
    AVFrame *out_frame;
    int chan, i;

    if (av_frame_is_writable(frame)) {
        out_frame = frame;
    } else {
        out_frame = ff_get_audio_buffer(inlink, frame->nb_samples);
        if (!out_frame)
            return AVERROR(ENOMEM);
        av_frame_copy_props(out_frame, frame);
    }

    for (i = 0; i < frame->nb_samples; i++) {

        s->delay_buf_pos = (s->delay_buf_pos + s->max_samples - 1) % s->max_samples;

        for (chan = 0; chan < inlink->channels; chan++) {
            double *src = (double *)frame->extended_data[chan];
            double *dst = (double *)out_frame->extended_data[chan];
            double delayed_0, delayed_1;
            double delayed;
            double in, out;
            int channel_phase = chan * s->lfo_length * s->channel_phase + .5;
            double delay = s->lfo[(s->lfo_pos + channel_phase) % s->lfo_length];
            int int_delay = (int)delay;
            double frac_delay = modf(delay, &delay);
            double *delay_buffer = (double *)s->delay_buffer[chan];

            in = src[i];
            delay_buffer[s->delay_buf_pos] = in + s->delay_last[chan] *
                                                           s->feedback_gain;
            delayed_0 = delay_buffer[(s->delay_buf_pos + int_delay++) % s->max_samples];
            delayed_1 = delay_buffer[(s->delay_buf_pos + int_delay++) % s->max_samples];

            if (s->interpolation == INTERPOLATION_LINEAR) {
                delayed = delayed_0 + (delayed_1 - delayed_0) * frac_delay;
            } else {
                double a, b;
                double delayed_2 = delay_buffer[(s->delay_buf_pos + int_delay++) % s->max_samples];
                delayed_2 -= delayed_0;
                delayed_1 -= delayed_0;
                a = delayed_2 * .5 - delayed_1;
                b = delayed_1 *  2 - delayed_2 *.5;
                delayed = delayed_0 + (a * frac_delay + b) * frac_delay;
            }

            s->delay_last[chan] = delayed;
            out = in * s->in_gain + delayed * s->delay_gain;
            dst[i] = out;
        }
        s->lfo_pos = (s->lfo_pos + 1) % s->lfo_length;
    }

    if (frame != out_frame)
        av_frame_free(&frame);

    return ff_filter_frame(ctx->outputs[0], out_frame);
}
示例#24
0
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    AVFilterContext *ctx = inlink->dst;
    DCTdnoizContext *s = ctx->priv;
    AVFilterLink *outlink = inlink->dst->outputs[0];
    int direct, plane;
    AVFrame *out;

    if (av_frame_is_writable(in)) {
        direct = 1;
        out = in;
    } else {
        direct = 0;
        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out) {
            av_frame_free(&in);
            return AVERROR(ENOMEM);
        }
        av_frame_copy_props(out, in);
    }

    s->color_decorrelation(s->cbuf[0], s->p_linesize,
                           in->data[0], in->linesize[0],
                           s->pr_width, s->pr_height);
    for (plane = 0; plane < 3; plane++) {
#ifdef IDE_COMPILE
        ThreadData td = {
            s->cbuf[0][plane],
            s->cbuf[1][plane],
        };
#else
		ThreadData td = {
            .src = s->cbuf[0][plane],
            .dst = s->cbuf[1][plane],
        };
#endif
		ctx->internal->execute(ctx, filter_slice, &td, NULL, s->nb_threads);
    }
    s->color_correlation(out->data[0], out->linesize[0],
                         s->cbuf[1], s->p_linesize,
                         s->pr_width, s->pr_height);

    if (!direct) {
        int y;
        uint8_t *dst = out->data[0];
        const uint8_t *src = in->data[0];
        const int dst_linesize = out->linesize[0];
        const int src_linesize = in->linesize[0];
        const int hpad = (inlink->w - s->pr_width) * 3;
        const int vpad = (inlink->h - s->pr_height);

        if (hpad) {
            uint8_t       *dstp = dst + s->pr_width * 3;
            const uint8_t *srcp = src + s->pr_width * 3;

            for (y = 0; y < s->pr_height; y++) {
                memcpy(dstp, srcp, hpad);
                dstp += dst_linesize;
                srcp += src_linesize;
            }
        }
        if (vpad) {
            uint8_t       *dstp = dst + s->pr_height * dst_linesize;
            const uint8_t *srcp = src + s->pr_height * src_linesize;

            for (y = 0; y < vpad; y++) {
                memcpy(dstp, srcp, inlink->w * 3);
                dstp += dst_linesize;
                srcp += src_linesize;
            }
        }

        av_frame_free(&in);
    }

    return ff_filter_frame(outlink, out);
}

static av_cold void uninit(AVFilterContext *ctx)
{
    int i;
    DCTdnoizContext *s = ctx->priv;

    av_free(s->weights);
    for (i = 0; i < 2; i++) {
        av_free(s->cbuf[i][0]);
        av_free(s->cbuf[i][1]);
        av_free(s->cbuf[i][2]);
    }
    for (i = 0; i < s->nb_threads; i++) {
        av_free(s->slices[i]);
        av_expr_free(s->expr[i]);
    }
}
示例#25
0
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
    AVFilterContext   *ctx     = inlink->dst;
    FieldOrderContext *s       = ctx->priv;
    AVFilterLink      *outlink = ctx->outputs[0];
    int h, plane, src_line_step, dst_line_step, line_size, line;
    uint8_t *dst, *src;
    AVFrame *out;

    if (!frame->interlaced_frame ||
        frame->top_field_first == s->dst_tff) {
        av_log(ctx, AV_LOG_VERBOSE,
               "Skipping %s.\n",
               frame->interlaced_frame ?
               "frame with same field order" : "progressive frame");
        return ff_filter_frame(outlink, frame);
    }

    if (av_frame_is_writable(frame)) {
        out = frame;
    } else {
        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out) {
            av_frame_free(&frame);
            return AVERROR(ENOMEM);
        }
        av_frame_copy_props(out, frame);
    }

    av_log(ctx, AV_LOG_TRACE,
            "picture will move %s one line\n",
            s->dst_tff ? "up" : "down");
    h = frame->height;
    for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++) {
        dst_line_step = out->linesize[plane];
        src_line_step = frame->linesize[plane];
        line_size = s->line_size[plane];
        dst = out->data[plane];
        src = frame->data[plane];
        if (s->dst_tff) {
            /** Move every line up one line, working from
             *  the top to the bottom of the frame.
             *  The original top line is lost.
             *  The new last line is created as a copy of the
             *  penultimate line from that field. */
            for (line = 0; line < h; line++) {
                if (1 + line < frame->height) {
                    memcpy(dst, src + src_line_step, line_size);
                } else {
                    memcpy(dst, src - 2 * src_line_step, line_size);
                }
                dst += dst_line_step;
                src += src_line_step;
            }
        } else {
            /** Move every line down one line, working from
             *  the bottom to the top of the frame.
             *  The original bottom line is lost.
             *  The new first line is created as a copy of the
             *  second line from that field. */
            dst += (h - 1) * dst_line_step;
            src += (h - 1) * src_line_step;
            for (line = h - 1; line >= 0 ; line--) {
                if (line > 0) {
                    memcpy(dst, src - src_line_step, line_size);
                } else {
                    memcpy(dst, src + 2 * src_line_step, line_size);
                }
                dst -= dst_line_step;
                src -= src_line_step;
            }
        }
    }
    out->top_field_first = s->dst_tff;

    if (frame != out)
        av_frame_free(&frame);
    return ff_filter_frame(outlink, out);
}
示例#26
0
static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
{
    int (*filter_frame)(AVFilterLink *, AVFrame *);
    AVFilterContext *dstctx = link->dst;
    AVFilterPad *dst = link->dstpad;
    AVFrame *out = NULL;
    int ret;
    AVFilterCommand *cmd= link->dst->command_queue;
    int64_t pts;

    if (link->closed) {
        av_frame_free(&frame);
        return AVERROR_EOF;
    }

    if (!(filter_frame = dst->filter_frame))
        filter_frame = default_filter_frame;

    /* copy the frame if needed */
    if (dst->needs_writable && !av_frame_is_writable(frame)) {
        av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n");

        switch (link->type) {
        case AVMEDIA_TYPE_VIDEO:
            out = ff_get_video_buffer(link, link->w, link->h);
            break;
        case AVMEDIA_TYPE_AUDIO:
            out = ff_get_audio_buffer(link, frame->nb_samples);
            break;
        default:
            ret = AVERROR(EINVAL);
            goto fail;
        }
        if (!out) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }

        ret = av_frame_copy_props(out, frame);
        if (ret < 0)
            goto fail;

        switch (link->type) {
        case AVMEDIA_TYPE_VIDEO:
            av_image_copy(out->data, out->linesize, (const uint8_t **)frame->data, frame->linesize,
                          frame->format, frame->width, frame->height);
            break;
        case AVMEDIA_TYPE_AUDIO:
            av_samples_copy(out->extended_data, frame->extended_data,
                            0, 0, frame->nb_samples,
                            av_get_channel_layout_nb_channels(frame->channel_layout),
                            frame->format);
            break;
        default:
            ret = AVERROR(EINVAL);
            goto fail;
        }

        av_frame_free(&frame);
    } else
        out = frame;

    while(cmd && cmd->time <= out->pts * av_q2d(link->time_base)){
        av_log(link->dst, AV_LOG_DEBUG,
               "Processing command time:%f command:%s arg:%s\n",
               cmd->time, cmd->command, cmd->arg);
        avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags);
        ff_command_queue_pop(link->dst);
        cmd= link->dst->command_queue;
    }

    pts = out->pts;
    if (dstctx->enable_str) {
        int64_t pos = av_frame_get_pkt_pos(out);
        dstctx->var_values[VAR_N] = link->frame_count;
        dstctx->var_values[VAR_T] = pts == AV_NOPTS_VALUE ? NAN : pts * av_q2d(link->time_base);
        dstctx->var_values[VAR_W] = link->w;
        dstctx->var_values[VAR_H] = link->h;
        dstctx->var_values[VAR_POS] = pos == -1 ? NAN : pos;

        dstctx->is_disabled = fabs(av_expr_eval(dstctx->enable, dstctx->var_values, NULL)) < 0.5;
        if (dstctx->is_disabled &&
            (dstctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC))
            filter_frame = default_filter_frame;
    }
    ret = filter_frame(link, out);
    link->frame_count++;
    link->frame_requested = 0;
    ff_update_link_current_pts(link, pts);
    return ret;

fail:
    av_frame_free(&out);
    av_frame_free(&frame);
    return ret;
}
示例#27
0
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    unsigned x, y, direct = 0;
    AVFilterContext *ctx = inlink->dst;
    VignetteContext *s = ctx->priv;
    AVFilterLink *outlink = ctx->outputs[0];
    AVFrame *out;

    if (av_frame_is_writable(in)) {
        direct = 1;
        out = in;
    } else {
        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out) {
            av_frame_free(&in);
            return AVERROR(ENOMEM);
        }
        av_frame_copy_props(out, in);
    }

    if (s->eval_mode == EVAL_MODE_FRAME)
        update_context(s, inlink, in);

    if (s->desc->flags & AV_PIX_FMT_FLAG_RGB) {
        uint8_t       *dst = out->data[0];
        const uint8_t *src = in ->data[0];
        const float *fmap = s->fmap;
        const int dst_linesize = out->linesize[0];
        const int src_linesize = in ->linesize[0];
        const int fmap_linesize = s->fmap_linesize;

        for (y = 0; y < inlink->h; y++) {
            uint8_t       *dstp = dst;
            const uint8_t *srcp = src;

            for (x = 0; x < inlink->w; x++, dstp += 3, srcp += 3) {
                const float f = fmap[x];

                dstp[0] = av_clip_uint8(srcp[0] * f + get_dither_value(s));
                dstp[1] = av_clip_uint8(srcp[1] * f + get_dither_value(s));
                dstp[2] = av_clip_uint8(srcp[2] * f + get_dither_value(s));
            }
            dst += dst_linesize;
            src += src_linesize;
            fmap += fmap_linesize;
        }
    } else {
        int plane;

        for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
            uint8_t       *dst = out->data[plane];
            const uint8_t *src = in ->data[plane];
            const float *fmap = s->fmap;
            const int dst_linesize = out->linesize[plane];
            const int src_linesize = in ->linesize[plane];
            const int fmap_linesize = s->fmap_linesize;
            const int chroma = plane == 1 || plane == 2;
            const int hsub = chroma ? s->desc->log2_chroma_w : 0;
            const int vsub = chroma ? s->desc->log2_chroma_h : 0;
            const int w = AV_CEIL_RSHIFT(inlink->w, hsub);
            const int h = AV_CEIL_RSHIFT(inlink->h, vsub);

            for (y = 0; y < h; y++) {
                uint8_t *dstp = dst;
                const uint8_t *srcp = src;

                for (x = 0; x < w; x++) {
                    const double dv = get_dither_value(s);
                    if (chroma) *dstp++ = av_clip_uint8(fmap[x << hsub] * (*srcp++ - 127) + 127 + dv);
                    else        *dstp++ = av_clip_uint8(fmap[x        ] *  *srcp++              + dv);
                }
                dst += dst_linesize;
                src += src_linesize;
                fmap += fmap_linesize << vsub;
            }
        }
    }

    if (!direct)
        av_frame_free(&in);
    return ff_filter_frame(outlink, out);
}
示例#28
0
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    StereoToolsContext *s = ctx->priv;
    const double *src = (const double *)in->data[0];
    const double sb = s->base < 0 ? s->base * 0.5 : s->base;
    const double sbal = 1 + s->sbal;
    const double mpan = 1 + s->mpan;
    const double slev = s->slev;
    const double mlev = s->mlev;
    const double balance_in = s->balance_in;
    const double balance_out = s->balance_out;
    const double level_in = s->level_in;
    const double level_out = s->level_out;
    const double sc_level = s->sc_level;
    const double delay = s->delay;
    const int length = s->length;
    const int mute_l = floor(s->mute_l + 0.5);
    const int mute_r = floor(s->mute_r + 0.5);
    const int phase_l = floor(s->phase_l + 0.5);
    const int phase_r = floor(s->phase_r + 0.5);
    double *buffer = s->buffer;
    AVFrame *out;
    double *dst;
    int nbuf = inlink->sample_rate * (fabs(delay) / 1000.);
    int n;

    nbuf -= nbuf % 2;
    if (av_frame_is_writable(in)) {
        out = in;
    } else {
        out = ff_get_audio_buffer(inlink, in->nb_samples);
        if (!out) {
            av_frame_free(&in);
            return AVERROR(ENOMEM);
        }
        av_frame_copy_props(out, in);
    }
    dst = (double *)out->data[0];

    for (n = 0; n < in->nb_samples; n++, src += 2, dst += 2) {
        double L = src[0], R = src[1], l, r, m, S;

        L *= level_in;
        R *= level_in;

        L *= 1. - FFMAX(0., balance_in);
        R *= 1. + FFMIN(0., balance_in);

        if (s->softclip) {
            R = s->inv_atan_shape * atan(R * sc_level);
            L = s->inv_atan_shape * atan(L * sc_level);
        }

        switch (s->mode) {
        case 0:
            m = (L + R) * 0.5;
            S = (L - R) * 0.5;
            l = m * mlev * FFMIN(1., 2. - mpan) + S * slev * FFMIN(1., 2. - sbal);
            r = m * mlev * FFMIN(1., mpan)      - S * slev * FFMIN(1., sbal);
            L = l;
            R = r;
            break;
        case 1:
            l = L * FFMIN(1., 2. - sbal);
            r = R * FFMIN(1., sbal);
            L = 0.5 * (l + r) * mlev;
            R = 0.5 * (l - r) * slev;
            break;
        case 2:
            l = L * mlev * FFMIN(1., 2. - mpan) + R * slev * FFMIN(1., 2. - sbal);
            r = L * mlev * FFMIN(1., mpan)      - R * slev * FFMIN(1., sbal);
            L = l;
            R = r;
            break;
        case 3:
            R = L;
            break;
        case 4:
            L = R;
            break;
        case 5:
            L = (L + R) / 2;
            R = L;
            break;
        case 6:
            l = L;
            L = R;
            R = l;
            m = (L + R) * 0.5;
            S = (L - R) * 0.5;
            l = m * mlev * FFMIN(1., 2. - mpan) + S * slev * FFMIN(1., 2. - sbal);
            r = m * mlev * FFMIN(1., mpan)      - S * slev * FFMIN(1., sbal);
            L = l;
            R = r;
            break;
        }

        L *= 1. - mute_l;
        R *= 1. - mute_r;

        L *= (2. * (1. - phase_l)) - 1.;
        R *= (2. * (1. - phase_r)) - 1.;

        buffer[s->pos  ] = L;
        buffer[s->pos+1] = R;

        if (delay > 0.) {
            R = buffer[(s->pos - (int)nbuf + 1 + length) % length];
        } else if (delay < 0.) {
            L = buffer[(s->pos - (int)nbuf + length)     % length];
        }

        l = L + sb * L - sb * R;
        r = R + sb * R - sb * L;

        L = l;
        R = r;

        l = L * s->phase_cos_coef - R * s->phase_sin_coef;
        r = L * s->phase_sin_coef + R * s->phase_cos_coef;

        L = l;
        R = r;

        s->pos = (s->pos + 2) % s->length;

        L *= 1. - FFMAX(0., balance_out);
        R *= 1. + FFMIN(0., balance_out);

        L *= level_out;
        R *= level_out;

        dst[0] = L;
        dst[1] = R;
    }

    if (out != in)
        av_frame_free(&in);
    return ff_filter_frame(outlink, out);
}
示例#29
0
文件: af_volume.c 项目: AVLeo/libav
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
    VolumeContext *vol    = inlink->dst->priv;
    AVFilterLink *outlink = inlink->dst->outputs[0];
    int nb_samples        = buf->nb_samples;
    AVFrame *out_buf;
    AVFrameSideData *sd = av_frame_get_side_data(buf, AV_FRAME_DATA_REPLAYGAIN);
    int ret;

    if (sd && vol->replaygain != REPLAYGAIN_IGNORE) {
        if (vol->replaygain != REPLAYGAIN_DROP) {
            AVReplayGain *replaygain = (AVReplayGain*)sd->data;
            int32_t gain  = 100000;
            uint32_t peak = 100000;
            float g, p;

            if (vol->replaygain == REPLAYGAIN_TRACK &&
                replaygain->track_gain != INT32_MIN) {
                gain = replaygain->track_gain;

                if (replaygain->track_peak != 0)
                    peak = replaygain->track_peak;
            } else if (replaygain->album_gain != INT32_MIN) {
                gain = replaygain->album_gain;

                if (replaygain->album_peak != 0)
                    peak = replaygain->album_peak;
            } else {
                av_log(inlink->dst, AV_LOG_WARNING, "Both ReplayGain gain "
                       "values are unknown.\n");
            }
            g = gain / 100000.0f;
            p = peak / 100000.0f;

            av_log(inlink->dst, AV_LOG_VERBOSE,
                   "Using gain %f dB from replaygain side data.\n", g);

            vol->volume   = pow(10, (g + vol->replaygain_preamp) / 20);
            if (vol->replaygain_noclip)
                vol->volume = FFMIN(vol->volume, 1.0 / p);
            vol->volume_i = (int)(vol->volume * 256 + 0.5);

            volume_init(vol);
        }
        av_frame_remove_side_data(buf, AV_FRAME_DATA_REPLAYGAIN);
    }

    if (vol->volume == 1.0 || vol->volume_i == 256)
        return ff_filter_frame(outlink, buf);

    /* do volume scaling in-place if input buffer is writable */
    if (av_frame_is_writable(buf)) {
        out_buf = buf;
    } else {
        out_buf = ff_get_audio_buffer(inlink, nb_samples);
        if (!out_buf)
            return AVERROR(ENOMEM);
        ret = av_frame_copy_props(out_buf, buf);
        if (ret < 0) {
            av_frame_free(&out_buf);
            av_frame_free(&buf);
            return ret;
        }
    }

    if (vol->precision != PRECISION_FIXED || vol->volume_i > 0) {
        int p, plane_samples;

        if (av_sample_fmt_is_planar(buf->format))
            plane_samples = FFALIGN(nb_samples, vol->samples_align);
        else
            plane_samples = FFALIGN(nb_samples * vol->channels, vol->samples_align);

        if (vol->precision == PRECISION_FIXED) {
            for (p = 0; p < vol->planes; p++) {
                vol->scale_samples(out_buf->extended_data[p],
                                   buf->extended_data[p], plane_samples,
                                   vol->volume_i);
            }
        } else if (av_get_packed_sample_fmt(vol->sample_fmt) == AV_SAMPLE_FMT_FLT) {
            for (p = 0; p < vol->planes; p++) {
                vol->fdsp.vector_fmul_scalar((float *)out_buf->extended_data[p],
                                             (const float *)buf->extended_data[p],
                                             vol->volume, plane_samples);
            }
        } else {
            for (p = 0; p < vol->planes; p++) {
                vol->fdsp.vector_dmul_scalar((double *)out_buf->extended_data[p],
                                             (const double *)buf->extended_data[p],
                                             vol->volume, plane_samples);
            }
        }
    }

    emms_c();

    if (buf != out_buf)
        av_frame_free(&buf);

    return ff_filter_frame(outlink, out_buf);
}
示例#30
0
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    AVFilterContext *ctx = inlink->dst;
    GBlurContext *s = ctx->priv;
    AVFilterLink *outlink = ctx->outputs[0];
    AVFrame *out;
    int plane;

    set_params(s->sigma,  s->steps, &s->postscale,  &s->boundaryscale,  &s->nu);
    set_params(s->sigmaV, s->steps, &s->postscaleV, &s->boundaryscaleV, &s->nuV);

    if (av_frame_is_writable(in)) {
        out = in;
    } else {
        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out) {
            av_frame_free(&in);
            return AVERROR(ENOMEM);
        }
        av_frame_copy_props(out, in);
    }

    for (plane = 0; plane < s->nb_planes; plane++) {
        const int height = s->planeheight[plane];
        const int width = s->planewidth[plane];
        float *bptr = s->buffer;
        const uint8_t *src = in->data[plane];
        const uint16_t *src16 = (const uint16_t *)in->data[plane];
        uint8_t *dst = out->data[plane];
        uint16_t *dst16 = (uint16_t *)out->data[plane];
        int y, x;

        if (!s->sigma || !(s->planes & (1 << plane))) {
            if (out != in)
                av_image_copy_plane(out->data[plane], out->linesize[plane],
                                    in->data[plane], in->linesize[plane],
                                    width * ((s->depth + 7) / 8), height);
            continue;
        }

        if (s->depth == 8) {
            for (y = 0; y < height; y++) {
                for (x = 0; x < width; x++) {
                    bptr[x] = src[x];
                }
                bptr += width;
                src += in->linesize[plane];
            }
        } else {
            for (y = 0; y < height; y++) {
                for (x = 0; x < width; x++) {
                    bptr[x] = src16[x];
                }
                bptr += width;
                src16 += in->linesize[plane] / 2;
            }
        }

        gaussianiir2d(ctx, plane);

        bptr = s->buffer;
        if (s->depth == 8) {
            for (y = 0; y < height; y++) {
                for (x = 0; x < width; x++) {
                    dst[x] = bptr[x];
                }
                bptr += width;
                dst += out->linesize[plane];
            }
        } else {
            for (y = 0; y < height; y++) {
                for (x = 0; x < width; x++) {
                    dst16[x] = bptr[x];
                }
                bptr += width;
                dst16 += out->linesize[plane] / 2;
            }
        }
    }

    if (out != in)
        av_frame_free(&in);
    return ff_filter_frame(outlink, out);
}