static int filter_frame(AVFilterLink *inlink, AVFrame *in) { AVFilterContext *ctx = inlink->dst; AVFilterLink *outlink = ctx->outputs[0]; CrystalizerContext *s = ctx->priv; AVFrame *out; if (!s->prev) { s->prev = ff_get_audio_buffer(inlink, 1); if (!s->prev) { av_frame_free(&in); return AVERROR(ENOMEM); } } if (av_frame_is_writable(in)) { out = in; } else { out = ff_get_audio_buffer(inlink, in->nb_samples); if (!out) { av_frame_free(&in); return AVERROR(ENOMEM); } av_frame_copy_props(out, in); } s->filter((void **)out->extended_data, (void **)s->prev->extended_data, (const void **)in->extended_data, in->nb_samples, in->channels, s->mult, s->clip); if (out != in) av_frame_free(&in); return ff_filter_frame(outlink, out); }
static int config_output(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; AudioSurroundContext *s = ctx->priv; int ch; s->irdft = av_calloc(outlink->channels, sizeof(*s->irdft)); if (!s->irdft) return AVERROR(ENOMEM); for (ch = 0; ch < outlink->channels; ch++) { s->irdft[ch] = av_rdft_init(ff_log2(s->buf_size), IDFT_C2R); if (!s->irdft[ch]) return AVERROR(ENOMEM); } s->nb_out_channels = outlink->channels; s->output_levels = av_malloc_array(s->nb_out_channels, sizeof(*s->output_levels)); if (!s->output_levels) return AVERROR(ENOMEM); for (ch = 0; ch < s->nb_out_channels; ch++) s->output_levels[ch] = s->level_out; ch = av_get_channel_layout_channel_index(outlink->channel_layout, AV_CH_FRONT_CENTER); if (ch >= 0) s->output_levels[ch] *= s->fc_out; ch = av_get_channel_layout_channel_index(outlink->channel_layout, AV_CH_LOW_FREQUENCY); if (ch >= 0) s->output_levels[ch] *= s->lfe_out; s->output = ff_get_audio_buffer(outlink, s->buf_size * 2); s->overlap_buffer = ff_get_audio_buffer(outlink, s->buf_size * 2); if (!s->overlap_buffer || !s->output) return AVERROR(ENOMEM); return 0; }
static int filter_frame(AVFilterLink *inlink, AVFrame *in) { AVFilterContext *ctx = inlink->dst; AVFilterLink *outlink = ctx->outputs[0]; CrystalizerContext *s = ctx->priv; const float *src = (const float *)in->data[0]; const float mult = s->mult; AVFrame *out; float *dst, *prv; int n, c; if (!s->prev) { s->prev = ff_get_audio_buffer(inlink, 1); if (!s->prev) { av_frame_free(&in); return AVERROR(ENOMEM); } } if (av_frame_is_writable(in)) { out = in; } else { out = ff_get_audio_buffer(inlink, in->nb_samples); if (!out) { av_frame_free(&in); return AVERROR(ENOMEM); } av_frame_copy_props(out, in); } dst = (float *)out->data[0]; prv = (float *)s->prev->data[0]; for (n = 0; n < in->nb_samples; n++) { for (c = 0; c < in->channels; c++) { float current = src[c]; dst[c] = current + (current - prv[c]) * mult; prv[c] = current; if (s->clip) { dst[c] = av_clipf(dst[c], -1, 1); } } dst += c; src += c; } if (out != in) av_frame_free(&in); return ff_filter_frame(outlink, out); }
static int filter_frame(AVFilterLink *link, AVFrame *frame) { AVFilterContext *ctx = link->dst; SidechainCompressContext *s = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; AVFrame *out = NULL, *in[2] = { NULL }; double *dst; int nb_samples; int i; for (i = 0; i < 2; i++) if (link == ctx->inputs[i]) break; av_assert0(i < 2); av_audio_fifo_write(s->fifo[i], (void **)frame->extended_data, frame->nb_samples); av_frame_free(&frame); nb_samples = FFMIN(av_audio_fifo_size(s->fifo[0]), av_audio_fifo_size(s->fifo[1])); if (!nb_samples) return 0; out = ff_get_audio_buffer(outlink, nb_samples); if (!out) return AVERROR(ENOMEM); for (i = 0; i < 2; i++) { in[i] = ff_get_audio_buffer(ctx->inputs[i], nb_samples); if (!in[i]) { av_frame_free(&in[0]); av_frame_free(&in[1]); av_frame_free(&out); return AVERROR(ENOMEM); } av_audio_fifo_read(s->fifo[i], (void **)in[i]->data, nb_samples); } dst = (double *)out->data[0]; out->pts = s->pts; s->pts += nb_samples; compressor(s, (double *)in[0]->data[0], dst, (double *)in[1]->data[0], nb_samples, s->level_in, s->level_sc, ctx->inputs[0], ctx->inputs[1]); av_frame_free(&in[0]); av_frame_free(&in[1]); return ff_filter_frame(outlink, out); }
static int read_from_fifo(AVFilterContext *ctx, AVFrame *frame, int nb_samples) { BufferSinkContext *s = ctx->priv; AVFilterLink *link = ctx->inputs[0]; AVFrame *tmp; #ifdef IDE_COMPILE AVRational tmp2; #endif if (!(tmp = ff_get_audio_buffer(link, nb_samples))) return AVERROR(ENOMEM); av_audio_fifo_read(s->audio_fifo, (void**)tmp->extended_data, nb_samples); tmp->pts = s->next_pts; if (s->next_pts != AV_NOPTS_VALUE) { #ifdef IDE_COMPILE tmp2.num = 1; tmp2.den = link->sample_rate; s->next_pts += av_rescale_q(nb_samples, tmp2, link->time_base); #else s->next_pts += av_rescale_q(nb_samples, (AVRational){1, link->sample_rate}, link->time_base); #endif } av_frame_move_ref(frame, tmp); av_frame_free(&tmp); return 0; }
static int request_frame(AVFilterLink *outlink) { SoxContext *sox = outlink->dst->priv; sox_effect_t *effect = sox->effect; size_t out_nb_samples = 1024; AVFrame *outsamples; int ret; ret = ff_request_frame(outlink->src->inputs[0]); if (ret == AVERROR_EOF) { /* drain cached samples */ while (0) { outsamples = ff_get_audio_buffer(outlink, out_nb_samples); ret = effect->handler.drain(sox->effect, (int32_t *)outsamples->data[0], &out_nb_samples); outsamples->nb_samples = out_nb_samples / effect->out_signal.channels; ff_filter_frame(outlink, outsamples); if (ret == SOX_EOF) break; } } return ret; }
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) { SoxContext *sox = inlink->dst->priv; AVFrame *outsamples; size_t nb_in_samples, nb_out_samples; if (av_frame_is_writable(insamples)) { outsamples = insamples; } else { outsamples = ff_get_audio_buffer(inlink, insamples->nb_samples); if (!outsamples) return AVERROR(ENOMEM); outsamples->pts = insamples->pts; } nb_out_samples = nb_in_samples = insamples->nb_samples * sox->effect->in_signal.channels; // FIXME not handling cases where not all the input is consumed sox->effect->handler.flow(sox->effect, (int32_t *)insamples->data[0], (int32_t *)outsamples->data[0], &nb_in_samples, &nb_out_samples); outsamples->nb_samples = nb_out_samples / sox->effect->out_signal.channels; if (insamples != outsamples) av_frame_free(&insamples); return ff_filter_frame(inlink->dst->outputs[0], outsamples); }
static int request_frame(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; AudioEchoContext *s = ctx->priv; int ret; ret = ff_request_frame(ctx->inputs[0]); if (ret == AVERROR_EOF && !ctx->is_disabled && s->fade_out) { int nb_samples = FFMIN(s->fade_out, 2048); AVFrame *frame; frame = ff_get_audio_buffer(outlink, nb_samples); if (!frame) return AVERROR(ENOMEM); s->fade_out -= nb_samples; av_samples_set_silence(frame->extended_data, 0, frame->nb_samples, outlink->channels, frame->format); s->echo_samples(s, s->delayptrs, frame->data, frame->data, frame->nb_samples, outlink->channels); frame->pts = s->next_pts; if (s->next_pts != AV_NOPTS_VALUE) s->next_pts += av_rescale_q(nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base); return ff_filter_frame(outlink, frame); } return ret; }
static int request_frame(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; LADSPAContext *s = ctx->priv; AVFrame *out; int64_t t; int i; if (ctx->nb_inputs) return ff_request_frame(ctx->inputs[0]); t = av_rescale(s->pts, AV_TIME_BASE, s->sample_rate); if (s->duration >= 0 && t >= s->duration) return AVERROR_EOF; out = ff_get_audio_buffer(outlink, s->nb_samples); if (!out) return AVERROR(ENOMEM); for (i = 0; i < s->nb_outputs; i++) s->desc->connect_port(s->handles[0], s->opmap[i], (LADSPA_Data*)out->extended_data[i]); s->desc->run(s->handles[0], s->nb_samples); for (i = 0; i < s->nb_outputcontrols; i++) print_ctl_info(ctx, AV_LOG_INFO, s, i, s->ocmap, s->octlv, 1); out->sample_rate = s->sample_rate; out->pts = s->pts; s->pts += s->nb_samples; return ff_filter_frame(outlink, out); }
static int request_frame(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; ResampleContext *s = ctx->priv; int ret = avfilter_request_frame(ctx->inputs[0]); /* flush the lavr delay buffer */ if (ret == AVERROR_EOF && s->avr) { AVFilterBufferRef *buf; int nb_samples = av_rescale_rnd(avresample_get_delay(s->avr), outlink->sample_rate, ctx->inputs[0]->sample_rate, AV_ROUND_UP); if (!nb_samples) return ret; buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples); if (!buf) return AVERROR(ENOMEM); ret = avresample_convert(s->avr, (void**)buf->extended_data, buf->linesize[0], nb_samples, NULL, 0, 0); if (ret <= 0) { avfilter_unref_buffer(buf); return (ret == 0) ? AVERROR_EOF : ret; } buf->pts = s->next_pts; ff_filter_samples(outlink, buf); return 0; } return ret; }
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples) { AVFilterLink *outlink = inlink->dst->outputs[0]; int16_t *taps, *endin, *in, *out; AVFilterBufferRef *outsamples = ff_get_audio_buffer(inlink, AV_PERM_WRITE, insamples->audio->nb_samples); int ret; avfilter_copy_buffer_ref_props(outsamples, insamples); taps = ((EarwaxContext *)inlink->dst->priv)->taps; out = (int16_t *)outsamples->data[0]; in = (int16_t *)insamples ->data[0]; // copy part of new input and process with saved input memcpy(taps+NUMTAPS, in, NUMTAPS * sizeof(*taps)); out = scalarproduct(taps, taps + NUMTAPS, out); // process current input endin = in + insamples->audio->nb_samples * 2 - NUMTAPS; out = scalarproduct(in, endin, out); // save part of input for next round memcpy(taps, endin, NUMTAPS * sizeof(*taps)); ret = ff_filter_samples(outlink, outsamples); avfilter_unref_buffer(insamples); return ret; }
static int filter_frame(AVFilterLink *inlink, AVFrame *in) { AVFilterContext *ctx = inlink->dst; ShowFreqsContext *s = ctx->priv; AVFrame *fin = NULL; int ret = 0; av_audio_fifo_write(s->fifo, (void **)in->extended_data, in->nb_samples); while (av_audio_fifo_size(s->fifo) >= s->win_size) { fin = ff_get_audio_buffer(inlink, s->win_size); if (!fin) { ret = AVERROR(ENOMEM); goto fail; } fin->pts = s->pts; s->pts += s->skip_samples; ret = av_audio_fifo_peek(s->fifo, (void **)fin->extended_data, s->win_size); if (ret < 0) goto fail; ret = plot_freqs(inlink, fin); av_frame_free(&fin); av_audio_fifo_drain(s->fifo, s->skip_samples); if (ret < 0) goto fail; } fail: av_frame_free(&fin); av_frame_free(&in); return ret; }
static int filter_frame(AVFilterLink *inlink, AVFrame *in) { AVFilterContext *ctx = inlink->dst; AVFilterLink *outlink = ctx->outputs[0]; ReplayGainContext *s = ctx->priv; uint32_t level; AVFrame *out; out = ff_get_audio_buffer(inlink, in->nb_samples); if (!out) { av_frame_free(&in); return AVERROR(ENOMEM); } calc_stereo_peak((float *)in->data[0], in->nb_samples, &s->peak); yule_filter_stereo_samples(s, (const float *)in->data[0], (float *)out->data[0], out->nb_samples); butter_filter_stereo_samples(s, (float *)out->data[0], out->nb_samples); level = (uint32_t)floor(100 * calc_stereo_rms((float *)out->data[0], out->nb_samples)); level = av_clip(level, 0, HISTOGRAM_SLOTS - 1); s->histogram[level]++; av_frame_free(&out); return ff_filter_frame(outlink, in); }
static int fir_frame(AudioFIRContext *s, AVFrame *in, AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; AVFrame *out = NULL; out = ff_get_audio_buffer(outlink, in->nb_samples); if (!out) { av_frame_free(&in); return AVERROR(ENOMEM); } if (s->pts == AV_NOPTS_VALUE) s->pts = in->pts; s->in[0] = in; ctx->internal->execute(ctx, fir_channels, out, NULL, FFMIN(outlink->channels, ff_filter_get_nb_threads(ctx))); out->pts = s->pts; if (s->pts != AV_NOPTS_VALUE) s->pts += av_rescale_q(out->nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base); av_frame_free(&in); s->in[0] = NULL; return ff_filter_frame(outlink, out); }
static int push_samples(AVFilterContext *ctx, int nb_samples) { AVFilterLink *outlink = ctx->outputs[0]; LoopContext *s = ctx->priv; AVFrame *out; int ret, i = 0; while (s->loop != 0 && i < nb_samples) { out = ff_get_audio_buffer(outlink, FFMIN(nb_samples, s->nb_samples - s->current_sample)); if (!out) return AVERROR(ENOMEM); ret = av_audio_fifo_peek_at(s->fifo, (void **)out->extended_data, out->nb_samples, s->current_sample); if (ret < 0) return ret; out->pts = s->pts; out->nb_samples = ret; s->pts += out->nb_samples; i += out->nb_samples; s->current_sample += out->nb_samples; ret = ff_filter_frame(outlink, out); if (ret < 0) return ret; if (s->current_sample >= s->nb_samples) { s->current_sample = 0; if (s->loop > 0) s->loop--; } } return ret; }
static int acompressor_filter_frame(AVFilterLink *inlink, AVFrame *in) { const double *src = (const double *)in->data[0]; AVFilterContext *ctx = inlink->dst; SidechainCompressContext *s = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; AVFrame *out; double *dst; if (av_frame_is_writable(in)) { out = in; } else { out = ff_get_audio_buffer(inlink, in->nb_samples); if (!out) { av_frame_free(&in); return AVERROR(ENOMEM); } av_frame_copy_props(out, in); } dst = (double *)out->data[0]; compressor(s, src, dst, src, in->nb_samples, s->level_in, s->level_in, inlink, inlink); if (out != in) av_frame_free(&in); return ff_filter_frame(outlink, out); }
static int arequest_frame(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; LoopContext *s = ctx->priv; int ret = 0; if ((!s->size) || (s->nb_samples < s->size) || (s->nb_samples >= s->size && s->loop == 0)) { int nb_samples = av_audio_fifo_size(s->left); if (s->loop == 0 && nb_samples > 0) { AVFrame *out; out = ff_get_audio_buffer(outlink, nb_samples); if (!out) return AVERROR(ENOMEM); av_audio_fifo_read(s->left, (void **)out->extended_data, nb_samples); out->pts = s->pts; s->pts += nb_samples; ret = ff_filter_frame(outlink, out); if (ret < 0) return ret; } ret = ff_request_frame(ctx->inputs[0]); } else { ret = push_samples(ctx, 1024); } if (ret == AVERROR_EOF && s->nb_samples > 0 && s->loop != 0) { ret = push_samples(ctx, outlink->sample_rate); } return ret; }
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) { AVFilterLink *outlink = inlink->dst->outputs[0]; int16_t *taps, *endin, *in, *out; AVFrame *outsamples = ff_get_audio_buffer(inlink, insamples->nb_samples); int len; if (!outsamples) { av_frame_free(&insamples); return AVERROR(ENOMEM); } av_frame_copy_props(outsamples, insamples); taps = ((EarwaxContext *)inlink->dst->priv)->taps; out = (int16_t *)outsamples->data[0]; in = (int16_t *)insamples ->data[0]; len = FFMIN(NUMTAPS, 2*insamples->nb_samples); // copy part of new input and process with saved input memcpy(taps+NUMTAPS, in, len * sizeof(*taps)); out = scalarproduct(taps, taps + len, out); // process current input if (2*insamples->nb_samples >= NUMTAPS ){ endin = in + insamples->nb_samples * 2 - NUMTAPS; scalarproduct(in, endin, out); // save part of input for next round memcpy(taps, endin, NUMTAPS * sizeof(*taps)); } else memmove(taps, taps + 2*insamples->nb_samples, NUMTAPS * sizeof(*taps)); av_frame_free(&insamples); return ff_filter_frame(outlink, outsamples); }
static int request_frame(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; ResampleContext *s = ctx->priv; int ret = 0; s->got_output = 0; while (ret >= 0 && !s->got_output) ret = ff_request_frame(ctx->inputs[0]); /* flush the lavr delay buffer */ if (ret == AVERROR_EOF && s->avr) { AVFrame *frame; int nb_samples = avresample_get_out_samples(s->avr, 0); if (!nb_samples) return ret; frame = ff_get_audio_buffer(outlink, nb_samples); if (!frame) return AVERROR(ENOMEM); ret = avresample_convert(s->avr, frame->extended_data, frame->linesize[0], nb_samples, NULL, 0, 0); if (ret <= 0) { av_frame_free(&frame); return (ret == 0) ? AVERROR_EOF : ret; } frame->pts = s->next_pts; return ff_filter_frame(outlink, frame); } return ret; }
static int request_frame(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; SilenceRemoveContext *s = ctx->priv; int ret; ret = ff_request_frame(ctx->inputs[0]); if (ret == AVERROR_EOF && (s->mode == SILENCE_COPY_FLUSH || s->mode == SILENCE_COPY)) { int nbs = s->stop_holdoff_end - s->stop_holdoff_offset; if (nbs) { AVFrame *frame; frame = ff_get_audio_buffer(outlink, nbs / outlink->channels); if (!frame) return AVERROR(ENOMEM); memcpy(frame->data[0], &s->stop_holdoff[s->stop_holdoff_offset], nbs * sizeof(double)); ret = ff_filter_frame(ctx->inputs[0], frame); } s->mode = SILENCE_STOP; } return ret; }
static int filter_frame(AVFilterLink *inlink, AVFrame *frame) { AVFilterContext *ctx = inlink->dst; AudioEchoContext *s = ctx->priv; AVFrame *out_frame; #ifdef IDE_COMPILE AVRational tmp; #endif if (av_frame_is_writable(frame)) { out_frame = frame; } else { out_frame = ff_get_audio_buffer(inlink, frame->nb_samples); if (!out_frame) return AVERROR(ENOMEM); av_frame_copy_props(out_frame, frame); } s->echo_samples(s, s->delayptrs, frame->extended_data, out_frame->extended_data, frame->nb_samples, inlink->channels); #ifdef IDE_COMPILE tmp.num = 1; tmp.den = inlink->sample_rate; s->next_pts = frame->pts + av_rescale_q(frame->nb_samples, tmp, inlink->time_base); #else s->next_pts = frame->pts + av_rescale_q(frame->nb_samples, (AVRational){1, inlink->sample_rate}, inlink->time_base); #endif if (frame != out_frame) av_frame_free(&frame); return ff_filter_frame(ctx->outputs[0], out_frame); }
static int request_frame(AVFilterLink *link) { AVFilterContext *ctx = link->src; ASyncContext *s = ctx->priv; int ret = 0; int nb_samples; s->got_output = 0; while (ret >= 0 && !s->got_output) ret = ff_request_frame(ctx->inputs[0]); /* flush the fifo */ if (ret == AVERROR_EOF) { if (s->first_pts != AV_NOPTS_VALUE) handle_trimming(ctx); if (nb_samples = get_delay(s)) { AVFrame *buf = ff_get_audio_buffer(link, nb_samples); if (!buf) return AVERROR(ENOMEM); ret = avresample_convert(s->avr, buf->extended_data, buf->linesize[0], nb_samples, NULL, 0, 0); if (ret <= 0) { av_frame_free(&buf); return (ret < 0) ? ret : AVERROR_EOF; } buf->pts = s->pts; return ff_filter_frame(link, buf); } } return ret; }
static int filter_frame(AVFilterLink *inlink, AVFrame *frame) { int ret; AVFrame *out_frame; Bs2bContext *bs2b = inlink->dst->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; if (av_frame_is_writable(frame)) { out_frame = frame; } else { out_frame = ff_get_audio_buffer(inlink, frame->nb_samples); if (!out_frame) return AVERROR(ENOMEM); av_frame_copy(out_frame, frame); ret = av_frame_copy_props(out_frame, frame); if (ret < 0) { av_frame_free(&out_frame); av_frame_free(&frame); return ret; } } bs2b->filter(bs2b->bs2bp, out_frame->extended_data[0], out_frame->nb_samples); if (frame != out_frame) av_frame_free(&frame); return ff_filter_frame(outlink, out_frame); }
static int filter_frame(AVFilterLink *inlink, AVFrame *in) { AVFilterContext *ctx = inlink->dst; AVFilterLink *outlink = ctx->outputs[0]; AudioGateContext *s = ctx->priv; const double *src = (const double *)in->data[0]; const double makeup = s->makeup; const double attack_coeff = s->attack_coeff; const double release_coeff = s->release_coeff; const double level_in = s->level_in; AVFrame *out; double *dst; int n, c; if (av_frame_is_writable(in)) { out = in; } else { out = ff_get_audio_buffer(inlink, in->nb_samples); if (!out) { av_frame_free(&in); return AVERROR(ENOMEM); } av_frame_copy_props(out, in); } dst = (double *)out->data[0]; for (n = 0; n < in->nb_samples; n++, src += inlink->channels, dst += inlink->channels) { double abs_sample = fabs(src[0]), gain = 1.0; for (c = 0; c < inlink->channels; c++) dst[c] = src[c] * level_in; if (s->link == 1) { for (c = 1; c < inlink->channels; c++) abs_sample = FFMAX(fabs(src[c]), abs_sample); } else { for (c = 1; c < inlink->channels; c++) abs_sample += fabs(src[c]); abs_sample /= inlink->channels; } if (s->detection) abs_sample *= abs_sample; s->lin_slope += (abs_sample - s->lin_slope) * (abs_sample > s->lin_slope ? attack_coeff : release_coeff); if (s->lin_slope > 0.0) gain = output_gain(s->lin_slope, s->ratio, s->thres, s->knee, s->knee_start, s->knee_stop, s->lin_knee_stop, s->range); for (c = 0; c < inlink->channels; c++) dst[c] *= gain * makeup; } if (out != in) av_frame_free(&in); return ff_filter_frame(outlink, out); }
/** * Read samples from the input FIFOs, mix, and write to the output link. */ static int output_frame(AVFilterLink *outlink, int nb_samples) { AVFilterContext *ctx = outlink->src; MixContext *s = ctx->priv; AVFrame *out_buf, *in_buf; int i; calculate_scales(s, nb_samples); out_buf = ff_get_audio_buffer(outlink, nb_samples); if (!out_buf) return AVERROR(ENOMEM); in_buf = ff_get_audio_buffer(outlink, nb_samples); if (!in_buf) { av_frame_free(&out_buf); return AVERROR(ENOMEM); } for (i = 0; i < s->nb_inputs; i++) { if (s->input_state[i] == INPUT_ON) { int planes, plane_size, p; av_audio_fifo_read(s->fifos[i], (void **)in_buf->extended_data, nb_samples); planes = s->planar ? s->nb_channels : 1; plane_size = nb_samples * (s->planar ? 1 : s->nb_channels); plane_size = FFALIGN(plane_size, 16); for (p = 0; p < planes; p++) { s->fdsp->vector_fmac_scalar((float *)out_buf->extended_data[p], (float *) in_buf->extended_data[p], s->input_scale[i], plane_size); } } } av_frame_free(&in_buf); out_buf->pts = s->next_pts; if (s->next_pts != AV_NOPTS_VALUE) s->next_pts += nb_samples; return ff_filter_frame(outlink, out_buf); }
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf) { AVFilterContext *ctx = inlink->dst; ResampleContext *s = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; if (s->avr) { AVFilterBufferRef *buf_out; int delay, nb_samples, ret; /* maximum possible samples lavr can output */ delay = avresample_get_delay(s->avr); nb_samples = av_rescale_rnd(buf->audio->nb_samples + delay, outlink->sample_rate, inlink->sample_rate, AV_ROUND_UP); buf_out = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples); ret = avresample_convert(s->avr, (void**)buf_out->extended_data, buf_out->linesize[0], nb_samples, (void**)buf->extended_data, buf->linesize[0], buf->audio->nb_samples); av_assert0(!avresample_available(s->avr)); if (s->next_pts == AV_NOPTS_VALUE) { if (buf->pts == AV_NOPTS_VALUE) { av_log(ctx, AV_LOG_WARNING, "First timestamp is missing, " "assuming 0.\n"); s->next_pts = 0; } else s->next_pts = av_rescale_q(buf->pts, inlink->time_base, outlink->time_base); } if (ret > 0) { buf_out->audio->nb_samples = ret; if (buf->pts != AV_NOPTS_VALUE) { buf_out->pts = av_rescale_q(buf->pts, inlink->time_base, outlink->time_base) - av_rescale(delay, outlink->sample_rate, inlink->sample_rate); } else buf_out->pts = s->next_pts; s->next_pts = buf_out->pts + buf_out->audio->nb_samples; ff_filter_samples(outlink, buf_out); } avfilter_unref_buffer(buf); } else ff_filter_samples(outlink, buf); }
int ff_filter_frame(AVFilterLink *link, AVFrame *frame) { int (*filter_frame)(AVFilterLink *, AVFrame *); AVFilterPad *dst = link->dstpad; AVFrame *out; FF_DPRINTF_START(NULL, filter_frame); ff_dlog_link(NULL, link, 1); if (!(filter_frame = dst->filter_frame)) filter_frame = default_filter_frame; /* copy the frame if needed */ if (dst->needs_writable && !av_frame_is_writable(frame)) { av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n"); switch (link->type) { case AVMEDIA_TYPE_VIDEO: out = ff_get_video_buffer(link, link->w, link->h); break; case AVMEDIA_TYPE_AUDIO: out = ff_get_audio_buffer(link, frame->nb_samples); break; default: return AVERROR(EINVAL); } if (!out) { av_frame_free(&frame); return AVERROR(ENOMEM); } av_frame_copy_props(out, frame); switch (link->type) { case AVMEDIA_TYPE_VIDEO: av_image_copy(out->data, out->linesize, frame->data, frame->linesize, frame->format, frame->width, frame->height); break; case AVMEDIA_TYPE_AUDIO: av_samples_copy(out->extended_data, frame->extended_data, 0, 0, frame->nb_samples, av_get_channel_layout_nb_channels(frame->channel_layout), frame->format); break; default: return AVERROR(EINVAL); } av_frame_free(&frame); } else out = frame; return filter_frame(link, out); }
int ff_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref) { int insamples = samplesref->audio->nb_samples, inpos = 0, nb_samples; AVFilterBufferRef *pbuf = link->partial_buf; int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout); int ret = 0; av_assert1(samplesref->format == link->format); av_assert1(samplesref->audio->channel_layout == link->channel_layout); av_assert1(samplesref->audio->sample_rate == link->sample_rate); if (!link->min_samples || (!pbuf && insamples >= link->min_samples && insamples <= link->max_samples)) { return ff_filter_samples_framed(link, samplesref); } /* Handle framing (min_samples, max_samples) */ while (insamples) { if (!pbuf) { AVRational samples_tb = { 1, link->sample_rate }; int perms = link->dstpad->min_perms | AV_PERM_WRITE; pbuf = ff_get_audio_buffer(link, perms, link->partial_buf_size); if (!pbuf) { av_log(link->dst, AV_LOG_WARNING, "Samples dropped due to memory allocation failure.\n"); return 0; } avfilter_copy_buffer_ref_props(pbuf, samplesref); pbuf->pts = samplesref->pts + av_rescale_q(inpos, samples_tb, link->time_base); pbuf->audio->nb_samples = 0; } nb_samples = FFMIN(insamples, link->partial_buf_size - pbuf->audio->nb_samples); av_samples_copy(pbuf->extended_data, samplesref->extended_data, pbuf->audio->nb_samples, inpos, nb_samples, nb_channels, link->format); inpos += nb_samples; insamples -= nb_samples; pbuf->audio->nb_samples += nb_samples; if (pbuf->audio->nb_samples >= link->min_samples) { ret = ff_filter_samples_framed(link, pbuf); pbuf = NULL; } } avfilter_unref_buffer(samplesref); link->partial_buf = pbuf; return ret; }
static int filter_frame(AVFilterLink *inlink, AVFrame *in) { AVFilterContext *ctx = inlink->dst; AVFilterLink *outlink = ctx->outputs[0]; StereoWidenContext *s = ctx->priv; const float *src = (const float *)in->data[0]; const float drymix = s->drymix; const float crossfeed = s->crossfeed; const float feedback = s->feedback; AVFrame *out = NULL; float *dst; int n; if (av_frame_is_writable(in)) { out = in; } else { AVFrame *out = ff_get_audio_buffer(inlink, in->nb_samples); if (!out) { av_frame_free(&in); return AVERROR(ENOMEM); } av_frame_copy_props(out, in); } dst = (float *)out->data[0]; for (n = 0; n < in->nb_samples; n++, src += 2, dst += 2) { const float left = src[0], right = src[1]; float *read = s->write + 2; if (read > s->buffer + s->length) read = s->buffer; dst[0] = drymix * left - crossfeed * right - feedback * read[1]; dst[1] = drymix * right - crossfeed * left - feedback * read[0]; s->write[0] = left; s->write[1] = right; if (s->write == s->buffer + s->length) s->write = s->buffer; else s->write += 2; } if (out != in) av_frame_free(&in); return ff_filter_frame(outlink, out); }
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamplesref) { AConvertContext *aconvert = inlink->dst->priv; const int n = insamplesref->audio->nb_samples; AVFilterLink *const outlink = inlink->dst->outputs[0]; AVFilterBufferRef *outsamplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n); swr_convert(aconvert->swr, outsamplesref->data, n, (void *)insamplesref->data, n); avfilter_copy_buffer_ref_props(outsamplesref, insamplesref); outsamplesref->audio->channel_layout = outlink->channel_layout; ff_filter_samples(outlink, outsamplesref); avfilter_unref_buffer(insamplesref); }