static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf) { AVFilterContext *ctx = inlink->dst; ResampleContext *s = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; if (s->avr) { AVFilterBufferRef *buf_out; int delay, nb_samples, ret; /* maximum possible samples lavr can output */ delay = avresample_get_delay(s->avr); nb_samples = av_rescale_rnd(buf->audio->nb_samples + delay, outlink->sample_rate, inlink->sample_rate, AV_ROUND_UP); buf_out = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples); ret = avresample_convert(s->avr, (void**)buf_out->extended_data, buf_out->linesize[0], nb_samples, (void**)buf->extended_data, buf->linesize[0], buf->audio->nb_samples); av_assert0(!avresample_available(s->avr)); if (s->next_pts == AV_NOPTS_VALUE) { if (buf->pts == AV_NOPTS_VALUE) { av_log(ctx, AV_LOG_WARNING, "First timestamp is missing, " "assuming 0.\n"); s->next_pts = 0; } else s->next_pts = av_rescale_q(buf->pts, inlink->time_base, outlink->time_base); } if (ret > 0) { buf_out->audio->nb_samples = ret; if (buf->pts != AV_NOPTS_VALUE) { buf_out->pts = av_rescale_q(buf->pts, inlink->time_base, outlink->time_base) - av_rescale(delay, outlink->sample_rate, inlink->sample_rate); } else buf_out->pts = s->next_pts; s->next_pts = buf_out->pts + buf_out->audio->nb_samples; ff_filter_samples(outlink, buf_out); } avfilter_unref_buffer(buf); } else ff_filter_samples(outlink, buf); }
static int request_frame(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; ResampleContext *s = ctx->priv; int ret = avfilter_request_frame(ctx->inputs[0]); /* flush the lavr delay buffer */ if (ret == AVERROR_EOF && s->avr) { AVFilterBufferRef *buf; int nb_samples = av_rescale_rnd(avresample_get_delay(s->avr), outlink->sample_rate, ctx->inputs[0]->sample_rate, AV_ROUND_UP); if (!nb_samples) return ret; buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples); if (!buf) return AVERROR(ENOMEM); ret = avresample_convert(s->avr, (void**)buf->extended_data, buf->linesize[0], nb_samples, NULL, 0, 0); if (ret <= 0) { avfilter_unref_buffer(buf); return (ret == 0) ? AVERROR_EOF : ret; } buf->pts = s->next_pts; ff_filter_samples(outlink, buf); return 0; } return ret; }
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples) { AVFilterLink *outlink = inlink->dst->outputs[0]; int16_t *taps, *endin, *in, *out; AVFilterBufferRef *outsamples = ff_get_audio_buffer(inlink, AV_PERM_WRITE, insamples->audio->nb_samples); int ret; avfilter_copy_buffer_ref_props(outsamples, insamples); taps = ((EarwaxContext *)inlink->dst->priv)->taps; out = (int16_t *)outsamples->data[0]; in = (int16_t *)insamples ->data[0]; // copy part of new input and process with saved input memcpy(taps+NUMTAPS, in, NUMTAPS * sizeof(*taps)); out = scalarproduct(taps, taps + NUMTAPS, out); // process current input endin = in + insamples->audio->nb_samples * 2 - NUMTAPS; out = scalarproduct(in, endin, out); // save part of input for next round memcpy(taps, endin, NUMTAPS * sizeof(*taps)); ret = ff_filter_samples(outlink, outsamples); avfilter_unref_buffer(insamples); return ret; }
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *samplesref) { AVFilterContext *ctx = inlink->dst; int i; for (i = 0; i < ctx->nb_outputs; i++) ff_filter_samples(inlink->dst->outputs[i], avfilter_ref_buffer(samplesref, ~AV_PERM_WRITE)); }
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) { SetPTSContext *setpts = inlink->dst->priv; double d; AVFilterBufferRef *outpicref = avfilter_ref_buffer(inpicref, ~0); if (!outpicref) return AVERROR(ENOMEM); if (isnan(setpts->var_values[VAR_STARTPTS])) { setpts->var_values[VAR_STARTPTS] = TS2D(inpicref->pts); setpts->var_values[VAR_STARTT ] = TS2T(inpicref->pts, inlink->time_base); } setpts->var_values[VAR_PTS ] = TS2D(inpicref->pts); setpts->var_values[VAR_T ] = TS2T(inpicref->pts, inlink->time_base); setpts->var_values[VAR_POS ] = inpicref->pos == -1 ? NAN : inpicref->pos; switch (inlink->type) { case AVMEDIA_TYPE_VIDEO: setpts->var_values[VAR_INTERLACED] = inpicref->video->interlaced; break; case AVMEDIA_TYPE_AUDIO: setpts->var_values[VAR_NB_SAMPLES] = inpicref->audio->nb_samples; break; } d = av_expr_eval(setpts->expr, setpts->var_values, NULL); outpicref->pts = D2TS(d); setpts->var_values[VAR_PREV_INPTS ] = TS2D(inpicref ->pts); setpts->var_values[VAR_PREV_INT ] = TS2T(inpicref ->pts, inlink->time_base); setpts->var_values[VAR_PREV_OUTPTS] = TS2D(outpicref->pts); setpts->var_values[VAR_PREV_OUTT] = TS2T(outpicref->pts, inlink->time_base); av_dlog(inlink->dst, "n:%"PRId64" interlaced:%d nb_samples:%d nb_consumed_samples:%d " "pos:%"PRId64" pts:%"PRId64" t:%f -> pts:%"PRId64" t:%f\n", (int64_t)setpts->var_values[VAR_N], (int)setpts->var_values[VAR_INTERLACED], (int)setpts->var_values[VAR_NB_SAMPLES], (int)setpts->var_values[VAR_NB_CONSUMED_SAMPLES], (int64_t)setpts->var_values[VAR_POS], (int64_t)setpts->var_values[VAR_PREV_INPTS], setpts->var_values[VAR_PREV_INT], (int64_t)setpts->var_values[VAR_PREV_OUTPTS], setpts->var_values[VAR_PREV_OUTT]); setpts->var_values[VAR_N] += 1.0; if (setpts->type == AVMEDIA_TYPE_AUDIO) { setpts->var_values[VAR_NB_CONSUMED_SAMPLES] += inpicref->audio->nb_samples; return ff_filter_samples(inlink->dst->outputs[0], outpicref); } else return ff_start_frame (inlink->dst->outputs[0], outpicref); }
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *samplesref) { AVFilterContext *ctx = inlink->dst; AVFilterLink *outlink = ctx->outputs[0]; if (av_cmp_q(inlink->time_base, outlink->time_base)) { int64_t orig_pts = samplesref->pts; samplesref->pts = av_rescale_q(samplesref->pts, inlink->time_base, outlink->time_base); av_log(ctx, AV_LOG_DEBUG, "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n", inlink ->time_base.num, inlink ->time_base.den, orig_pts, outlink->time_base.num, outlink->time_base.den, samplesref->pts); } return ff_filter_samples(outlink, samplesref); }
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamplesref) { AConvertContext *aconvert = inlink->dst->priv; const int n = insamplesref->audio->nb_samples; AVFilterLink *const outlink = inlink->dst->outputs[0]; AVFilterBufferRef *outsamplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n); swr_convert(aconvert->swr, outsamplesref->data, n, (void *)insamplesref->data, n); avfilter_copy_buffer_ref_props(outsamplesref, insamplesref); outsamplesref->audio->channel_layout = outlink->channel_layout; ff_filter_samples(outlink, outsamplesref); avfilter_unref_buffer(insamplesref); }
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples) { AVFilterContext *ctx = inlink->dst; AVFilterLink *outlink = ctx->outputs[0]; AVFilterBufferRef *outsamples = insamples; if (av_cmp_q(inlink->time_base, outlink->time_base)) { outsamples = avfilter_ref_buffer(insamples, ~0); outsamples->pts = av_rescale_q(insamples->pts, inlink->time_base, outlink->time_base); av_log(ctx, AV_LOG_DEBUG, "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n", inlink ->time_base.num, inlink ->time_base.den, insamples ->pts, outlink->time_base.num, outlink->time_base.den, outsamples->pts); avfilter_unref_buffer(insamples); } return ff_filter_samples(outlink, outsamples); }
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *samplesref) { AVFilterContext *ctx = inlink->dst; ShowInfoContext *showinfo = ctx->priv; uint32_t plane_checksum[8] = {0}, checksum = 0; char chlayout_str[128]; int plane; int linesize = samplesref->audio->nb_samples * av_get_bytes_per_sample(samplesref->format); if (!av_sample_fmt_is_planar(samplesref->format)) linesize *= av_get_channel_layout_nb_channels(samplesref->audio->channel_layout); for (plane = 0; samplesref->data[plane] && plane < 8; plane++) { uint8_t *data = samplesref->data[plane]; plane_checksum[plane] = av_adler32_update(plane_checksum[plane], data, linesize); checksum = av_adler32_update(checksum, data, linesize); } av_get_channel_layout_string(chlayout_str, sizeof(chlayout_str), -1, samplesref->audio->channel_layout); av_log(ctx, AV_LOG_INFO, "n:%d pts:%s pts_time:%s pos:%"PRId64" " "fmt:%s chlayout:%s nb_samples:%d rate:%d " "checksum:%08X plane_checksum[%08X", showinfo->frame, av_ts2str(samplesref->pts), av_ts2timestr(samplesref->pts, &inlink->time_base), samplesref->pos, av_get_sample_fmt_name(samplesref->format), chlayout_str, samplesref->audio->nb_samples, samplesref->audio->sample_rate, checksum, plane_checksum[0]); for (plane = 1; samplesref->data[plane] && plane < 8; plane++) av_log(ctx, AV_LOG_INFO, " %08X", plane_checksum[plane]); av_log(ctx, AV_LOG_INFO, "]\n"); showinfo->frame++; ff_filter_samples(inlink->dst->outputs[0], samplesref); }
/* FIXME: samplesref is same as link->cur_buf. Need to consider removing the redundant parameter. */ void ff_default_filter_samples(AVFilterLink *inlink, AVFilterBufferRef *samplesref) { AVFilterLink *outlink = NULL; if (inlink->dst->output_count) outlink = inlink->dst->outputs[0]; if (outlink) { outlink->out_buf = ff_default_get_audio_buffer(inlink, AV_PERM_WRITE, samplesref->audio->nb_samples); outlink->out_buf->pts = samplesref->pts; outlink->out_buf->audio->sample_rate = samplesref->audio->sample_rate; ff_filter_samples(outlink, avfilter_ref_buffer(outlink->out_buf, ~0)); avfilter_unref_buffer(outlink->out_buf); outlink->out_buf = NULL; } avfilter_unref_buffer(samplesref); inlink->cur_buf = NULL; }
int ff_filter_frame(AVFilterLink *link, AVFilterBufferRef *frame) { int ret; FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); ff_tlog_ref(NULL, frame, 1); switch (link->type) { case AVMEDIA_TYPE_VIDEO: if((ret = ff_start_frame(link, frame)) < 0) return ret; if((ret = ff_draw_slice(link, 0, frame->video->h, 1)) < 0) return ret; if((ret = ff_end_frame(link)) < 0) return ret; return ret; case AVMEDIA_TYPE_AUDIO: return ff_filter_samples(link, frame); default: return AVERROR(EINVAL); } }
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *samplesref) { AVFilterContext *ctx = inlink->dst; int i, ret = 0; for (i = 0; i < ctx->nb_outputs; i++) { AVFilterBufferRef *buf_out = avfilter_ref_buffer(samplesref, ~AV_PERM_WRITE); if (!buf_out) { ret = AVERROR(ENOMEM); break; } ret = ff_filter_samples(inlink->dst->outputs[i], buf_out); if (ret < 0) break; } avfilter_unref_buffer(samplesref); return ret; }
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf) { AVFilterContext *ctx = inlink->dst; AShowInfoContext *s = ctx->priv; char chlayout_str[128]; uint32_t checksum = 0; int channels = av_get_channel_layout_nb_channels(buf->audio->channel_layout); int planar = av_sample_fmt_is_planar(buf->format); int block_align = av_get_bytes_per_sample(buf->format) * (planar ? 1 : channels); int data_size = buf->audio->nb_samples * block_align; int planes = planar ? channels : 1; int i; for (i = 0; i < planes; i++) { uint8_t *data = buf->extended_data[i]; s->plane_checksums[i] = av_adler32_update(0, data, data_size); checksum = i ? av_adler32_update(checksum, data, data_size) : s->plane_checksums[0]; } av_get_channel_layout_string(chlayout_str, sizeof(chlayout_str), -1, buf->audio->channel_layout); av_log(ctx, AV_LOG_INFO, "n:%"PRIu64" pts:%"PRId64" pts_time:%f " "fmt:%s chlayout:%s rate:%d nb_samples:%d " "checksum:%08X ", s->frame, buf->pts, buf->pts * av_q2d(inlink->time_base), av_get_sample_fmt_name(buf->format), chlayout_str, buf->audio->sample_rate, buf->audio->nb_samples, checksum); av_log(ctx, AV_LOG_INFO, "plane_checksums: [ "); for (i = 0; i < planes; i++) av_log(ctx, AV_LOG_INFO, "%08X ", s->plane_checksums[i]); av_log(ctx, AV_LOG_INFO, "]\n"); s->frame++; return ff_filter_samples(inlink->dst->outputs[0], buf); }
static int send_out(AVFilterContext *ctx, int out_id) { AStreamSyncContext *as = ctx->priv; struct buf_queue *queue = &as->queue[out_id]; AVFilterBufferRef *buf = queue->buf[queue->tail]; int ret; queue->buf[queue->tail] = NULL; as->var_values[VAR_B1 + out_id]++; as->var_values[VAR_S1 + out_id] += buf->audio->nb_samples; if (buf->pts != AV_NOPTS_VALUE) as->var_values[VAR_T1 + out_id] = av_q2d(ctx->outputs[out_id]->time_base) * buf->pts; as->var_values[VAR_T1 + out_id] += buf->audio->nb_samples / (double)ctx->inputs[out_id]->sample_rate; ret = ff_filter_samples(ctx->outputs[out_id], buf); queue->nb--; queue->tail = (queue->tail + 1) % QUEUE_SIZE; if (as->req[out_id]) as->req[out_id]--; return ret; }
static int request_frame(AVFilterLink *outlink) { FifoContext *fifo = outlink->src->priv; int ret = 0; if (!fifo->root.next) { if ((ret = ff_request_frame(outlink->src->inputs[0])) < 0) return ret; av_assert0(fifo->root.next); } /* by doing this, we give ownership of the reference to the next filter, * so we don't have to worry about dereferencing it ourselves. */ switch (outlink->type) { case AVMEDIA_TYPE_VIDEO: if ((ret = ff_start_frame(outlink, fifo->root.next->buf)) < 0 || (ret = ff_draw_slice(outlink, 0, outlink->h, 1)) < 0 || (ret = ff_end_frame(outlink)) < 0) return ret; queue_pop(fifo); break; case AVMEDIA_TYPE_AUDIO: if (outlink->request_samples) { return return_audio_frame(outlink->src); } else { ret = ff_filter_samples(outlink, fifo->root.next->buf); queue_pop(fifo); } break; default: return AVERROR(EINVAL); } return ret; }
static int return_audio_frame(AVFilterContext *ctx) { AVFilterLink *link = ctx->outputs[0]; FifoContext *s = ctx->priv; AVFilterBufferRef *head = s->root.next->buf; AVFilterBufferRef *buf_out; int ret; if (!s->buf_out && head->audio->nb_samples >= link->request_samples && calc_ptr_alignment(head) >= 32) { if (head->audio->nb_samples == link->request_samples) { buf_out = head; queue_pop(s); } else { buf_out = avfilter_ref_buffer(head, AV_PERM_READ); if (!buf_out) return AVERROR(ENOMEM); buf_out->audio->nb_samples = link->request_samples; buffer_offset(link, head, link->request_samples); } } else { int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout); if (!s->buf_out) { s->buf_out = ff_get_audio_buffer(link, AV_PERM_WRITE, link->request_samples); if (!s->buf_out) return AVERROR(ENOMEM); s->buf_out->audio->nb_samples = 0; s->buf_out->pts = head->pts; s->allocated_samples = link->request_samples; } else if (link->request_samples != s->allocated_samples) { av_log(ctx, AV_LOG_ERROR, "request_samples changed before the " "buffer was returned.\n"); return AVERROR(EINVAL); } while (s->buf_out->audio->nb_samples < s->allocated_samples) { int len = FFMIN(s->allocated_samples - s->buf_out->audio->nb_samples, head->audio->nb_samples); av_samples_copy(s->buf_out->extended_data, head->extended_data, s->buf_out->audio->nb_samples, 0, len, nb_channels, link->format); s->buf_out->audio->nb_samples += len; if (len == head->audio->nb_samples) { avfilter_unref_buffer(head); queue_pop(s); if (!s->root.next && (ret = ff_request_frame(ctx->inputs[0])) < 0) { if (ret == AVERROR_EOF) { av_samples_set_silence(s->buf_out->extended_data, s->buf_out->audio->nb_samples, s->allocated_samples - s->buf_out->audio->nb_samples, nb_channels, link->format); s->buf_out->audio->nb_samples = s->allocated_samples; break; } return ret; } head = s->root.next->buf; } else { buffer_offset(link, head, len); } } buf_out = s->buf_out; s->buf_out = NULL; } return ff_filter_samples(link, buf_out); }
void ff_null_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref) { ff_filter_samples(link->dst->outputs[0], samplesref); }
static void default_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref) { ff_filter_samples(link->dst->outputs[0], samplesref); }