Exemplo n.º 1
0
static inline double getpix(void *priv, double x, double y, int plane)
{
    int xi, yi;
    GEQContext *geq = priv;
    AVFrame *picref = geq->picref;
    const uint8_t *src = picref->data[plane];
    int linesize = picref->linesize[plane];
    const int w = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(picref->width,  geq->hsub) : picref->width;
    const int h = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(picref->height, geq->vsub) : picref->height;

    if (!src)
        return 0;

    xi = x = av_clipf(x, 0, w - 2);
    yi = y = av_clipf(y, 0, h - 2);

    x -= xi;
    y -= yi;

    if (geq->bps > 8) {
        const uint16_t *src16 = (const uint16_t*)src;
        linesize /= 2;

        return (1-y)*((1-x)*src16[xi +  yi    * linesize] + x*src16[xi + 1 +  yi    * linesize])
              +   y *((1-x)*src16[xi + (yi+1) * linesize] + x*src16[xi + 1 + (yi+1) * linesize]);
    } else {
        return (1-y)*((1-x)*src[xi +  yi    * linesize] + x*src[xi + 1 +  yi    * linesize])
              +   y *((1-x)*src[xi + (yi+1) * linesize] + x*src[xi + 1 + (yi+1) * linesize]);
    }
}
Exemplo n.º 2
0
Arquivo: vo_xv.c Projeto: hroncok/mpv
static int control(struct vo *vo, uint32_t request, void *data)
{
    struct xvctx *ctx = vo->priv;
    switch (request) {
    case VOCTRL_GET_PANSCAN:
        return VO_TRUE;
    case VOCTRL_SET_PANSCAN:
        resize(vo);
        return VO_TRUE;
    case VOCTRL_SET_EQUALIZER: {
        vo->want_redraw = true;
        struct voctrl_set_equalizer_args *args = data;
        return xv_set_eq(vo, ctx->xv_port, args->name, args->value);
    }
    case VOCTRL_GET_EQUALIZER: {
        struct voctrl_get_equalizer_args *args = data;
        return xv_get_eq(vo, ctx->xv_port, args->name, args->valueptr);
    }
    case VOCTRL_SET_YUV_COLORSPACE:;
        struct mp_csp_details* given_cspc = data;
        int is_709 = given_cspc->format == MP_CSP_BT_709;
        xv_set_eq(vo, ctx->xv_port, "bt_709", is_709 * 200 - 100);
        read_xv_csp(vo);
        vo->want_redraw = true;
        return true;
    case VOCTRL_GET_YUV_COLORSPACE:;
        struct mp_csp_details* cspc = data;
        read_xv_csp(vo);
        *cspc = ctx->cached_csp;
        return true;
    case VOCTRL_REDRAW_FRAME:
        redraw_frame(vo);
        return true;
    case VOCTRL_SCREENSHOT: {
        struct voctrl_screenshot_args *args = data;
        args->out_image = get_screenshot(vo);
        return true;
    }
    case VOCTRL_WINDOW_TO_OSD_COORDS: {
        float *c = data;
        struct mp_rect *src = &ctx->src_rect;
        struct mp_rect *dst = &ctx->dst_rect;
        c[0] = av_clipf(c[0], dst->x0, dst->x1) - dst->x0;
        c[1] = av_clipf(c[1], dst->y0, dst->y1) - dst->y0;
        c[0] = c[0] / (dst->x1 - dst->x0) * (src->x1 - src->x0) + src->x0;
        c[1] = c[1] / (dst->y1 - dst->y0) * (src->y1 - src->y0) + src->y0;
        return VO_TRUE;
    }
    }
    int events = 0;
    int r = vo_x11_control(vo, &events, request, data);
    if (events & (VO_EVENT_EXPOSE | VO_EVENT_RESIZE))
        resize(vo);
    return r;
}
Exemplo n.º 3
0
int avfilter_transform(const uint8_t *src, uint8_t *dst,
                        int src_stride, int dst_stride,
                        int width, int height, const float *matrix,
                        enum InterpolateMethod interpolate,
                        enum FillMethod fill)
{
    int x, y;
    float x_s, y_s;
    uint8_t def = 0;
    uint8_t (*func)(float, float, const uint8_t *, int, int, int, uint8_t) = NULL;

    switch(interpolate) {
        case INTERPOLATE_NEAREST:
            func = interpolate_nearest;
            break;
        case INTERPOLATE_BILINEAR:
            func = interpolate_bilinear;
            break;
        case INTERPOLATE_BIQUADRATIC:
            func = interpolate_biquadratic;
            break;
        default:
            return AVERROR(EINVAL);
    }

    for (y = 0; y < height; y++) {
        for(x = 0; x < width; x++) {
            x_s = x * matrix[0] + y * matrix[1] + matrix[2];
            y_s = x * matrix[3] + y * matrix[4] + matrix[5];

            switch(fill) {
                case FILL_ORIGINAL:
                    def = src[y * src_stride + x];
                    break;
                case FILL_CLAMP:
                    y_s = av_clipf(y_s, 0, height - 1);
                    x_s = av_clipf(x_s, 0, width - 1);
                    def = src[(int)y_s * src_stride + (int)x_s];
                    break;
                case FILL_MIRROR:
                    x_s = mirror(x_s,  width-1);
                    y_s = mirror(y_s, height-1);

                    av_assert2(x_s >= 0 && y_s >= 0);
                    av_assert2(x_s < width && y_s < height);
                    def = src[(int)y_s * src_stride + (int)x_s];
            }

            dst[y * dst_stride + x] = func(x_s, y_s, src, width, height, src_stride, def);
        }
    }
    return 0;
}
Exemplo n.º 4
0
static double get_scene_score(AVFilterContext *ctx, AVFilterBufferRef *picref)
{
    double ret = 0;
    SelectContext *select = ctx->priv;
    AVFilterBufferRef *prev_picref = select->prev_picref;

    if (prev_picref &&
        picref->video->h    == prev_picref->video->h &&
        picref->video->w    == prev_picref->video->w &&
        picref->linesize[0] == prev_picref->linesize[0]) {
        int x, y;
        int64_t sad;
        double mafd, diff;
        uint8_t *p1 =      picref->data[0];
        uint8_t *p2 = prev_picref->data[0];
        const int linesize = picref->linesize[0];

        for (sad = y = 0; y < picref->video->h; y += 8)
            for (x = 0; x < linesize; x += 8)
                sad += select->c.sad[1](select,
                                        p1 + y * linesize + x,
                                        p2 + y * linesize + x,
                                        linesize, 8);
        emms_c();
        mafd = sad / (picref->video->h * picref->video->w * 3);
        diff = fabs(mafd - select->prev_mafd);
        ret  = av_clipf(FFMIN(mafd, diff) / 100., 0, 1);
        select->prev_mafd = mafd;
        avfilter_unref_buffer(prev_picref);
    }
    select->prev_picref = avfilter_ref_buffer(picref, ~0);
    return ret;
}
Exemplo n.º 5
0
static double get_scene_score(AVFilterContext *ctx, AVFrame *frame)
{
    double ret = 0;
    SelectContext *select = ctx->priv;
    AVFrame *prev_picref = select->prev_picref;

    if (prev_picref &&
        frame->height == prev_picref->height &&
        frame->width  == prev_picref->width) {
        int x, y, nb_sad = 0;
        int64_t sad = 0;
        double mafd, diff;
        uint8_t *p1 =      frame->data[0];
        uint8_t *p2 = prev_picref->data[0];
        const int p1_linesize =       frame->linesize[0];
        const int p2_linesize = prev_picref->linesize[0];

        for (y = 0; y < frame->height - 7; y += 8) {
            for (x = 0; x < frame->width*3 - 7; x += 8) {
                sad += select->sad(p1 + x, p1_linesize, p2 + x, p2_linesize);
                nb_sad += 8 * 8;
            }
            p1 += 8 * p1_linesize;
            p2 += 8 * p2_linesize;
        }
        emms_c();
        mafd = nb_sad ? (double)sad / nb_sad : 0;
        diff = fabs(mafd - select->prev_mafd);
        ret  = av_clipf(FFMIN(mafd, diff) / 100., 0, 1);
        select->prev_mafd = mafd;
        av_frame_free(&prev_picref);
    }
    select->prev_picref = av_frame_clone(frame);
    return ret;
}
Exemplo n.º 6
0
int64_t swr_next_pts(struct SwrContext *s, int64_t pts){
    if(pts == INT64_MIN)
        return s->outpts;

    if (s->firstpts == AV_NOPTS_VALUE)
        s->outpts = s->firstpts = pts;

    if(s->min_compensation >= FLT_MAX) {
        return (s->outpts = pts - swr_get_delay(s, s->in_sample_rate * (int64_t)s->out_sample_rate));
    } else {
        int64_t delta = pts - swr_get_delay(s, s->in_sample_rate * (int64_t)s->out_sample_rate) - s->outpts + s->drop_output*(int64_t)s->in_sample_rate;
        double fdelta = delta /(double)(s->in_sample_rate * (int64_t)s->out_sample_rate);

        if(fabs(fdelta) > s->min_compensation) {
            if(s->outpts == s->firstpts || fabs(fdelta) > s->min_hard_compensation){
                int ret;
                if(delta > 0) ret = swr_inject_silence(s,  delta / s->out_sample_rate);
                else          ret = swr_drop_output   (s, -delta / s-> in_sample_rate);
                if(ret<0){
                    av_log(s, AV_LOG_ERROR, "Failed to compensate for timestamp delta of %f\n", fdelta);
                }
            } else if(s->soft_compensation_duration && s->max_soft_compensation) {
                int duration = s->out_sample_rate * s->soft_compensation_duration;
                double max_soft_compensation = s->max_soft_compensation / (s->max_soft_compensation < 0 ? -s->in_sample_rate : 1);
                int comp = av_clipf(fdelta, -max_soft_compensation, max_soft_compensation) * duration ;
                av_log(s, AV_LOG_VERBOSE, "compensating audio timestamp drift:%f compensation:%d in:%d\n", fdelta, comp, duration);
                swr_set_compensation(s, comp, duration);
            }
        }

        return s->outpts;
    }
}
Exemplo n.º 7
0
static double get_scene_score(AVFilterContext *ctx, AVFrame *crnt, AVFrame *next)
{
    FrameRateContext *s = ctx->priv;
    double ret = 0;

    ff_dlog(ctx, "get_scene_score()\n");

    if (crnt->height == next->height &&
        crnt->width  == next->width) {
        int64_t sad;
        double mafd, diff;

        ff_dlog(ctx, "get_scene_score() process\n");
        if (s->bitdepth == 8)
            sad = scene_sad8(s, crnt->data[0], crnt->linesize[0], next->data[0], next->linesize[0], crnt->width, crnt->height);
        else
            sad = scene_sad16(s, (const uint16_t*)crnt->data[0], crnt->linesize[0] / 2, (const uint16_t*)next->data[0], next->linesize[0] / 2, crnt->width, crnt->height);

        mafd = (double)sad * 100.0 / FFMAX(1, (crnt->height & ~7) * (crnt->width & ~7)) / (1 << s->bitdepth);
        diff = fabs(mafd - s->prev_mafd);
        ret  = av_clipf(FFMIN(mafd, diff), 0, 100.0);
        s->prev_mafd = mafd;
    }
    ff_dlog(ctx, "get_scene_score() result is:%f\n", ret);
    return ret;
}
Exemplo n.º 8
0
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    ShowVolumeContext *s = ctx->priv;
    int c, i, j, k;
    double values[VAR_VARS_NB];

    if (!s->out || s->out->width  != outlink->w ||
                   s->out->height != outlink->h) {
        av_frame_free(&s->out);
        s->out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!s->out) {
            av_frame_free(&insamples);
            return AVERROR(ENOMEM);
        }

        for (i = 0; i < outlink->h; i++)
            memset(s->out->data[0] + i * s->out->linesize[0], 0, outlink->w * 4);
    }
    s->out->pts = insamples->pts;

    for (j = 0; j < outlink->h; j++) {
        uint8_t *dst = s->out->data[0] + j * s->out->linesize[0];
        for (k = 0; k < s->w; k++) {
            dst[k * 4 + 0] = FFMAX(dst[k * 4 + 0] - s->f, 0);
            dst[k * 4 + 1] = FFMAX(dst[k * 4 + 1] - s->f, 0);
            dst[k * 4 + 2] = FFMAX(dst[k * 4 + 2] - s->f, 0);
            dst[k * 4 + 3] = FFMAX(dst[k * 4 + 3] - s->f, 0);
        }
    }

    for (c = 0; c < inlink->channels; c++) {
        float *src = (float *)insamples->extended_data[c];
        float max = 0;
        int color;

        for (i = 0; i < insamples->nb_samples; i++)
            max = FFMAX(max, src[i]);

        max = av_clipf(max, 0, 1);
        values[VAR_VOLUME] = 20.0 * log(max) / M_LN10;
        color = av_expr_eval(s->c_expr, values, NULL);

        for (j = 0; j < s->h; j++) {
            uint8_t *dst = s->out->data[0] + (c * s->h + c * s->b + j) * s->out->linesize[0];

            for (k = 0; k < s->w * max; k++)
                AV_WN32A(dst + k * 4, color);
        }

        if (s->h >= 8 && s->draw_text)
            drawtext(s->out, 2, c * (s->h + s->b) + (s->h - 8) / 2,
                     av_get_channel_name(av_channel_layout_extract_channel(insamples->channel_layout, c)));
    }

    av_frame_free(&insamples);

    return ff_filter_frame(outlink, av_frame_clone(s->out));
}
Exemplo n.º 9
0
void avfilter_transform(const uint8_t *src, uint8_t *dst,
                        int src_stride, int dst_stride,
                        int width, int height, const float *matrix,
                        enum InterpolateMethod interpolate,
                        enum FillMethod fill)
{
    int x, y;
    float x_s, y_s;
    uint8_t def = 0;
    uint8_t (*func)(float, float, const uint8_t *, int, int, int, uint8_t) = NULL;

    switch(interpolate) {
        case INTERPOLATE_NEAREST:
            func = interpolate_nearest;
            break;
        case INTERPOLATE_BILINEAR:
            func = interpolate_bilinear;
            break;
        case INTERPOLATE_BIQUADRATIC:
            func = interpolate_biquadratic;
            break;
    }

    for (y = 0; y < height; y++) {
        for(x = 0; x < width; x++) {
            x_s = x * matrix[0] + y * matrix[1] + matrix[2];
            y_s = x * matrix[3] + y * matrix[4] + matrix[5];

            switch(fill) {
                case FILL_ORIGINAL:
                    def = src[y * src_stride + x];
                    break;
                case FILL_CLAMP:
                    y_s = av_clipf(y_s, 0, height - 1);
                    x_s = av_clipf(x_s, 0, width - 1);
                    def = src[(int)y_s * src_stride + (int)x_s];
                    break;
                case FILL_MIRROR:
                    y_s = (y_s < 0) ? -y_s : (y_s >= height) ? (height + height - y_s) : y_s;
                    x_s = (x_s < 0) ? -x_s : (x_s >= width) ? (width + width - x_s) : x_s;
                    def = src[(int)y_s * src_stride + (int)x_s];
            }

            dst[y * dst_stride + x] = func(x_s, y_s, src, width, height, src_stride, def);
        }
    }
}
Exemplo n.º 10
0
static void decode(RA288Context *ractx, float gain, int cb_coef)
{
    int i, j;
    double sumsum;
    float sum, buffer[5];
    float *block = ractx->sp_block + 36; // Current block

    memmove(ractx->sp_block, ractx->sp_block + 5, 36*sizeof(*ractx->sp_block));

    for (i=0; i < 5; i++) {
        block[i] = 0.;
        for (j=0; j < 36; j++)
            block[i] -= block[i-1-j]*ractx->sp_lpc[j];
    }

    /* block 46 of G.728 spec */
    sum = 32.;
    for (i=0; i < 10; i++)
        sum -= ractx->gain_block[9-i] * ractx->gain_lpc[i];

    /* block 47 of G.728 spec */
    sum = av_clipf(sum, 0, 60);

    /* block 48 of G.728 spec */
    sumsum = exp(sum * 0.1151292546497) * gain; /* pow(10.0,sum/20)*gain */

    for (i=0; i < 5; i++)
        buffer[i] = codetable[cb_coef][i] * sumsum;

    sum = scalar_product_float(buffer, buffer, 5) / 5;

    sum = FFMAX(sum, 1);

    /* shift and store */
    memmove(ractx->gain_block, ractx->gain_block + 1,
            9 * sizeof(*ractx->gain_block));

    ractx->gain_block[9] = 10 * log10(sum) - 32;

    for (i=1; i < 5; i++)
        for (j=i-1; j >= 0; j--)
            buffer[i] -= ractx->sp_lpc[i-j-1] * buffer[j];

    /* output */
    for (i=0; i < 5; i++)
        block[i] = av_clipf(block[i] + buffer[i], -4095, 4095);
}
Exemplo n.º 11
0
Arquivo: cook.c Projeto: supr/ffmpeg
/**
 * Saturate the output signal and interleave.
 *
 * @param q                 pointer to the COOKContext
 * @param chan              channel to saturate
 * @param out               pointer to the output vector
 */
static void saturate_output_float(COOKContext *q, int chan, float *out)
{
    int j;
    float *output = q->mono_mdct_output + q->samples_per_channel;
    for (j = 0; j < q->samples_per_channel; j++) {
        out[chan + q->nb_channels * j] = av_clipf(output[j], -1.0, 1.0);
    }
}
Exemplo n.º 12
0
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    ExtraStereoContext *s = ctx->priv;
    const float *src = (const float *)in->data[0];
    const float mult = s->mult;
    AVFrame *out;
    float *dst;
    int n;

    if (av_frame_is_writable(in)) {
        out = in;
    } else {
        out = ff_get_audio_buffer(inlink, in->nb_samples);
        if (!out) {
            av_frame_free(&in);
            return AVERROR(ENOMEM);
        }
        av_frame_copy_props(out, in);
    }
    dst = (float *)out->data[0];

    for (n = 0; n < in->nb_samples; n++) {
        float average, left, right;

        left    = src[n * 2    ];
        right   = src[n * 2 + 1];
        average = (left + right) / 2.;
        left    = average + mult * (left  - average);
        right   = average + mult * (right - average);

        if (s->clip) {
            left  = av_clipf(left,  -1, 1);
            right = av_clipf(right, -1, 1);
        }

        dst[n * 2    ] = left;
        dst[n * 2 + 1] = right;
    }

    if (out != in)
        av_frame_free(&in);
    return ff_filter_frame(outlink, out);
}
Exemplo n.º 13
0
static void decode(RA288Context *ractx, float gain, int cb_coef)
{
    int i;
    double sumsum;
    float sum, buffer[5];
    float *block = ractx->sp_hist + 70 + 36; // current block
    float *gain_block = ractx->gain_hist + 28;

    memmove(ractx->sp_hist + 70, ractx->sp_hist + 75, 36*sizeof(*block));

    /* block 46 of G.728 spec */
    sum = 32.;
    for (i=0; i < 10; i++)
        sum -= gain_block[9-i] * ractx->gain_lpc[i];

    /* block 47 of G.728 spec */
    sum = av_clipf(sum, 0, 60);

    /* block 48 of G.728 spec */
    /* exp(sum * 0.1151292546497) == pow(10.0,sum/20) */
    sumsum = exp(sum * 0.1151292546497) * gain * (1.0/(1<<23));

    for (i=0; i < 5; i++)
        buffer[i] = codetable[cb_coef][i] * sumsum;

    sum = ff_dot_productf(buffer, buffer, 5) * ((1<<24)/5.);

    sum = FFMAX(sum, 1);

    /* shift and store */
    memmove(gain_block, gain_block + 1, 9 * sizeof(*gain_block));

    gain_block[9] = 10 * log10(sum) - 32;

    ff_celp_lp_synthesis_filterf(block, ractx->sp_lpc, buffer, 5, 36);

    /* output */
    for (i=0; i < 5; i++)
        block[i] = av_clipf(block[i], -4095./4096., 4095./4096.);
}
Exemplo n.º 14
0
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    CrystalizerContext *s = ctx->priv;
    const float *src = (const float *)in->data[0];
    const float mult = s->mult;
    AVFrame *out;
    float *dst, *prv;
    int n, c;

    if (!s->prev) {
        s->prev = ff_get_audio_buffer(inlink, 1);
        if (!s->prev) {
            av_frame_free(&in);
            return AVERROR(ENOMEM);
        }
    }

    if (av_frame_is_writable(in)) {
        out = in;
    } else {
        out = ff_get_audio_buffer(inlink, in->nb_samples);
        if (!out) {
            av_frame_free(&in);
            return AVERROR(ENOMEM);
        }
        av_frame_copy_props(out, in);
    }

    dst = (float *)out->data[0];
    prv = (float *)s->prev->data[0];

    for (n = 0; n < in->nb_samples; n++) {
        for (c = 0; c < in->channels; c++) {
            float current = src[c];

            dst[c] = current + (current - prv[c]) * mult;
            prv[c] = current;
            if (s->clip) {
                dst[c] = av_clipf(dst[c], -1, 1);
            }
        }
        dst += c;
        src += c;
    }

    if (out != in)
        av_frame_free(&in);

    return ff_filter_frame(outlink, out);
}
Exemplo n.º 15
0
static void method2_int16(af_volnorm_t *s, af_data_t *c)
{
  register int i = 0;
  int16_t *data = (int16_t*)c->audio;	// Audio data
  int len = c->len/2;		// Number of samples
  float curavg = 0.0, newavg, avg = 0.0;
  int tmp, totallen = 0;

  for (i = 0; i < len; i++)
  {
    tmp = data[i];
    curavg += tmp * tmp;
  }
  curavg = sqrt(curavg / (float) len);

  // Evaluate an adequate 'mul' coefficient based on previous state, current
  // samples level, etc
  for (i = 0; i < NSAMPLES; i++)
  {
    avg += s->mem[i].avg * (float)s->mem[i].len;
    totallen += s->mem[i].len;
  }

  if (totallen > MIN_SAMPLE_SIZE)
  {
    avg /= (float)totallen;
    if (avg >= SIL_S16)
    {
	s->mul = s->mid_s16 / avg;
	s->mul = av_clipf(s->mul, MUL_MIN, MUL_MAX);
    }
  }

  // Scale & clamp the samples
  for (i = 0; i < len; i++)
  {
    tmp = s->mul * data[i];
    tmp = av_clip_int16(tmp);
    data[i] = tmp;
  }

  // Evaulation of newavg (not 100% accurate because of values clamping)
  newavg = s->mul * curavg;

  // Stores computed values for future smoothing
  s->mem[s->idx].len = len;
  s->mem[s->idx].avg = newavg;
  s->idx = (s->idx + 1) % NSAMPLES;
}
Exemplo n.º 16
0
static int libopus_decode(AVCodecContext *avc, void *data,
                          int *got_frame_ptr, AVPacket *pkt)
{
    struct libopus_context *opus = avc->priv_data;
    AVFrame *frame               = data;
    int ret, nb_samples;

    frame->nb_samples = MAX_FRAME_SIZE;
    ret = ff_get_buffer(avc, frame);
    if (ret < 0) {
        av_log(avc, AV_LOG_ERROR, "get_buffer() failed\n");
        return ret;
    }

    if (avc->sample_fmt == AV_SAMPLE_FMT_S16)
        nb_samples = opus_multistream_decode(opus->dec, pkt->data, pkt->size,
                                             (opus_int16 *)frame->data[0],
                                             frame->nb_samples, 0);
    else
        nb_samples = opus_multistream_decode_float(opus->dec, pkt->data, pkt->size,
                                                   (float *)frame->data[0],
                                                   frame->nb_samples, 0);

    if (nb_samples < 0) {
        av_log(avc, AV_LOG_ERROR, "Decoding error: %s\n",
               opus_strerror(nb_samples));
        return ff_opus_error_to_averror(nb_samples);
    }

#ifndef OPUS_SET_GAIN
    {
        int i = avc->channels * nb_samples;
        if (avc->sample_fmt == AV_SAMPLE_FMT_FLT) {
            float *pcm = (float *)frame->data[0];
            for (; i > 0; i--, pcm++)
                *pcm = av_clipf(*pcm * opus->gain.d, -1, 1);
        } else {
            int16_t *pcm = (int16_t *)frame->data[0];
            for (; i > 0; i--, pcm++)
                *pcm = av_clip_int16(((int64_t)opus->gain.i * *pcm) >> 16);
        }
    }
#endif

    frame->nb_samples = nb_samples;
    *got_frame_ptr    = 1;

    return pkt->size;
}
Exemplo n.º 17
0
/*
 * Calculate the ReplayGain value from the specified loudness histogram;
 * clip to -24 / +64 dB.
 */
static float calc_replaygain(uint32_t *histogram)
{
    uint32_t loud_count = 0, total_windows = 0;
    float gain;
    int i;

    for (i = 0; i < HISTOGRAM_SLOTS; i++)
        total_windows += histogram [i];

    while (i--)
        if ((loud_count += histogram [i]) * 20 >= total_windows)
            break;

    gain = (float)(64.54 - i / 100.0);

    return av_clipf(gain, -24.0, 64.0);
}
Exemplo n.º 18
0
static void vector_clipf_c(float *dst, const float *src,
                           float min, float max, int len)
{
    int i;

    if (min < 0 && max > 0) {
        vector_clipf_c_opposite_sign(dst, src, &min, &max, len);
    } else {
        for (i = 0; i < len; i += 8) {
            dst[i]     = av_clipf(src[i], min, max);
            dst[i + 1] = av_clipf(src[i + 1], min, max);
            dst[i + 2] = av_clipf(src[i + 2], min, max);
            dst[i + 3] = av_clipf(src[i + 3], min, max);
            dst[i + 4] = av_clipf(src[i + 4], min, max);
            dst[i + 5] = av_clipf(src[i + 5], min, max);
            dst[i + 6] = av_clipf(src[i + 6], min, max);
            dst[i + 7] = av_clipf(src[i + 7], min, max);
        }
    }
}
Exemplo n.º 19
0
static void method1_int16(af_volnorm_t *s, af_data_t *c)
{
  register int i = 0;
  int16_t *data = (int16_t*)c->audio;	// Audio data
  int len = c->len/2;		// Number of samples
  float curavg = 0.0, newavg, neededmul;
  int tmp;

  for (i = 0; i < len; i++)
  {
    tmp = data[i];
    curavg += tmp * tmp;
  }
  curavg = sqrt(curavg / (float) len);

  // Evaluate an adequate 'mul' coefficient based on previous state, current
  // samples level, etc

  if (curavg > SIL_S16)
  {
    neededmul = s->mid_s16 / (curavg * s->mul);
    s->mul = (1.0 - SMOOTH_MUL) * s->mul + SMOOTH_MUL * neededmul;

    // clamp the mul coefficient
    s->mul = av_clipf(s->mul, MUL_MIN, MUL_MAX);
  }

  // Scale & clamp the samples
  for (i = 0; i < len; i++)
  {
    tmp = s->mul * data[i];
    tmp = av_clip_int16(tmp);
    data[i] = tmp;
  }

  // Evaulation of newavg (not 100% accurate because of values clamping)
  newavg = s->mul * curavg;

  // Stores computed values for future smoothing
  s->lastavg = (1.0 - SMOOTH_LASTAVG) * s->lastavg + SMOOTH_LASTAVG * newavg;
}
Exemplo n.º 20
0
static void filter_fltp(void **d, void **p, const void **s,
                        int nb_samples, int channels,
                        float mult, int clip)
{
    int n, c;

    for (c = 0; c < channels; c++) {
        const float *src = s[c];
        float *dst = d[c];
        float *prv = p[c];

        for (n = 0; n < nb_samples; n++) {
            float current = src[n];

            dst[n] = current + (current - prv[0]) * mult;
            prv[0] = current;
            if (clip) {
                dst[n] = av_clipf(dst[n], -1, 1);
            }
        }
    }
}
Exemplo n.º 21
0
static void update_context(VignetteContext *s, AVFilterLink *inlink, AVFrame *frame)
{
    int x, y;
    float *dst = s->fmap;
    int dst_linesize = s->fmap_linesize;

    if (frame) {
        s->var_values[VAR_N]   = inlink->frame_count_out;
        s->var_values[VAR_T]   = TS2T(frame->pts, inlink->time_base);
        s->var_values[VAR_PTS] = TS2D(frame->pts);
    } else {
        s->var_values[VAR_N]   = NAN;
        s->var_values[VAR_T]   = NAN;
        s->var_values[VAR_PTS] = NAN;
    }

    s->angle = av_expr_eval(s->angle_pexpr, s->var_values, NULL);
    s->x0 = av_expr_eval(s->x0_pexpr, s->var_values, NULL);
    s->y0 = av_expr_eval(s->y0_pexpr, s->var_values, NULL);

    if (isnan(s->x0) || isnan(s->y0) || isnan(s->angle))
        s->eval_mode = EVAL_MODE_FRAME;

    s->angle = av_clipf(s->angle, 0, M_PI_2);

    if (s->backward) {
        for (y = 0; y < inlink->h; y++) {
            for (x = 0; x < inlink->w; x++)
                dst[x] = 1. / get_natural_factor(s, x, y);
            dst += dst_linesize;
        }
    } else {
        for (y = 0; y < inlink->h; y++) {
            for (x = 0; x < inlink->w; x++)
                dst[x] = get_natural_factor(s, x, y);
            dst += dst_linesize;
        }
    }
}
Exemplo n.º 22
0
static void quantize_triangular_ns(DitherContext *c, DitherState *state,
                                   int16_t *dst, const float *src,
                                   int nb_samples)
{
    int i, j;
    float *dither = &state->noise_buf[state->noise_buf_ptr];

    if (state->mute > c->mute_reset_threshold)
        memset(state->dither_a, 0, sizeof(state->dither_a));

    for (i = 0; i < nb_samples; i++) {
        float err = 0;
        float sample = src[i] * S16_SCALE;

        for (j = 0; j < 4; j++) {
            err += c->ns_coef_b[j] * state->dither_b[j] -
                   c->ns_coef_a[j] * state->dither_a[j];
        }
        for (j = 3; j > 0; j--) {
            state->dither_a[j] = state->dither_a[j - 1];
            state->dither_b[j] = state->dither_b[j - 1];
        }
        state->dither_a[0] = err;
        sample -= err;

        if (state->mute > c->mute_dither_threshold) {
            dst[i]             = av_clip_int16(lrintf(sample));
            state->dither_b[0] = 0;
        } else {
            dst[i]             = av_clip_int16(lrintf(sample + dither[i]));
            state->dither_b[0] = av_clipf(dst[i] - sample, -1.5f, 1.5f);
        }

        state->mute++;
        if (src[i])
            state->mute = 0;
    }
}
Exemplo n.º 23
0
static double get_scene_score(AVFilterContext *ctx, AVFrame *crnt, AVFrame *next)
{
    FrameRateContext *s = ctx->priv;
    double ret = 0;

    ff_dlog(ctx, "get_scene_score()\n");

    if (crnt &&
        crnt->height == next->height &&
        crnt->width  == next->width) {
        int x, y;
        int64_t sad;
        double mafd, diff;
        uint8_t *p1 = crnt->data[0];
        uint8_t *p2 = next->data[0];
        const int p1_linesize = crnt->linesize[0];
        const int p2_linesize = next->linesize[0];

        ff_dlog(ctx, "get_scene_score() process\n");

        for (sad = y = 0; y < crnt->height; y += 8) {
            for (x = 0; x < p1_linesize; x += 8) {
                sad += s->sad(p1 + y * p1_linesize + x,
                              p1_linesize,
                              p2 + y * p2_linesize + x,
                              p2_linesize);
            }
        }
        emms_c();
        mafd = sad / (crnt->height * crnt->width * 3);
        diff = fabs(mafd - s->prev_mafd);
        ret  = av_clipf(FFMIN(mafd, diff), 0, 100.0);
        s->prev_mafd = mafd;
    }
        ff_dlog(ctx, "get_scene_score() result is:%f\n", ret);
    return ret;
}
Exemplo n.º 24
0
static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
                            const AVFrame *frame, int *got_packet_ptr)
{
    AACEncContext *s = avctx->priv_data;
    float **samples = s->planar_samples, *samples2, *la, *overlap;
    ChannelElement *cpe;
    SingleChannelElement *sce;
    IndividualChannelStream *ics;
    int i, its, ch, w, chans, tag, start_ch, ret, frame_bits;
    int target_bits, rate_bits, too_many_bits, too_few_bits;
    int ms_mode = 0, is_mode = 0, tns_mode = 0, pred_mode = 0;
    int chan_el_counter[4];
    FFPsyWindowInfo windows[AAC_MAX_CHANNELS];

    if (s->last_frame == 2)
        return 0;

    /* add current frame to queue */
    if (frame) {
        if ((ret = ff_af_queue_add(&s->afq, frame)) < 0)
            return ret;
    }

    copy_input_samples(s, frame);
    if (s->psypp)
        ff_psy_preprocess(s->psypp, s->planar_samples, s->channels);

    if (!avctx->frame_number)
        return 0;

    start_ch = 0;
    for (i = 0; i < s->chan_map[0]; i++) {
        FFPsyWindowInfo* wi = windows + start_ch;
        tag      = s->chan_map[i+1];
        chans    = tag == TYPE_CPE ? 2 : 1;
        cpe      = &s->cpe[i];
        for (ch = 0; ch < chans; ch++) {
            float clip_avoidance_factor;
            sce = &cpe->ch[ch];
            ics = &sce->ics;
            s->cur_channel = start_ch + ch;
            overlap  = &samples[s->cur_channel][0];
            samples2 = overlap + 1024;
            la       = samples2 + (448+64);
            if (!frame)
                la = NULL;
            if (tag == TYPE_LFE) {
                wi[ch].window_type[0] = ONLY_LONG_SEQUENCE;
                wi[ch].window_shape   = 0;
                wi[ch].num_windows    = 1;
                wi[ch].grouping[0]    = 1;

                /* Only the lowest 12 coefficients are used in a LFE channel.
                 * The expression below results in only the bottom 8 coefficients
                 * being used for 11.025kHz to 16kHz sample rates.
                 */
                ics->num_swb = s->samplerate_index >= 8 ? 1 : 3;
            } else {
                wi[ch] = s->psy.model->window(&s->psy, samples2, la, s->cur_channel,
                                              ics->window_sequence[0]);
            }
            ics->window_sequence[1] = ics->window_sequence[0];
            ics->window_sequence[0] = wi[ch].window_type[0];
            ics->use_kb_window[1]   = ics->use_kb_window[0];
            ics->use_kb_window[0]   = wi[ch].window_shape;
            ics->num_windows        = wi[ch].num_windows;
            ics->swb_sizes          = s->psy.bands    [ics->num_windows == 8];
            ics->num_swb            = tag == TYPE_LFE ? ics->num_swb : s->psy.num_bands[ics->num_windows == 8];
            ics->max_sfb            = FFMIN(ics->max_sfb, ics->num_swb);
            ics->swb_offset         = wi[ch].window_type[0] == EIGHT_SHORT_SEQUENCE ?
                                        ff_swb_offset_128 [s->samplerate_index]:
                                        ff_swb_offset_1024[s->samplerate_index];
            ics->tns_max_bands      = wi[ch].window_type[0] == EIGHT_SHORT_SEQUENCE ?
                                        ff_tns_max_bands_128 [s->samplerate_index]:
                                        ff_tns_max_bands_1024[s->samplerate_index];
            clip_avoidance_factor = 0.0f;
            for (w = 0; w < ics->num_windows; w++)
                ics->group_len[w] = wi[ch].grouping[w];
            for (w = 0; w < ics->num_windows; w++) {
                if (wi[ch].clipping[w] > CLIP_AVOIDANCE_FACTOR) {
                    ics->window_clipping[w] = 1;
                    clip_avoidance_factor = FFMAX(clip_avoidance_factor, wi[ch].clipping[w]);
                } else {
                    ics->window_clipping[w] = 0;
                }
            }
            if (clip_avoidance_factor > CLIP_AVOIDANCE_FACTOR) {
                ics->clip_avoidance_factor = CLIP_AVOIDANCE_FACTOR / clip_avoidance_factor;
            } else {
                ics->clip_avoidance_factor = 1.0f;
            }

            apply_window_and_mdct(s, sce, overlap);

            if (s->options.ltp && s->coder->update_ltp) {
                s->coder->update_ltp(s, sce);
                apply_window[sce->ics.window_sequence[0]](s->fdsp, sce, &sce->ltp_state[0]);
                s->mdct1024.mdct_calc(&s->mdct1024, sce->lcoeffs, sce->ret_buf);
            }

            if (!(isfinite(cpe->ch[ch].coeffs[    0]) &&
                  isfinite(cpe->ch[ch].coeffs[  128]) &&
                  isfinite(cpe->ch[ch].coeffs[2*128]) &&
                  isfinite(cpe->ch[ch].coeffs[3*128]) &&
                  isfinite(cpe->ch[ch].coeffs[4*128]) &&
                  isfinite(cpe->ch[ch].coeffs[5*128]) &&
                  isfinite(cpe->ch[ch].coeffs[6*128]) &&
                  isfinite(cpe->ch[ch].coeffs[7*128]))
            ) {
                av_log(avctx, AV_LOG_ERROR, "Input contains NaN/+-Inf\n");
                return AVERROR(EINVAL);
            }
            avoid_clipping(s, sce);
        }
        start_ch += chans;
    }
    if ((ret = ff_alloc_packet2(avctx, avpkt, 8192 * s->channels, 0)) < 0)
        return ret;
    frame_bits = its = 0;
    do {
        init_put_bits(&s->pb, avpkt->data, avpkt->size);

        if ((avctx->frame_number & 0xFF)==1 && !(avctx->flags & AV_CODEC_FLAG_BITEXACT))
            put_bitstream_info(s, LIBAVCODEC_IDENT);
        start_ch = 0;
        target_bits = 0;
        memset(chan_el_counter, 0, sizeof(chan_el_counter));
        for (i = 0; i < s->chan_map[0]; i++) {
            FFPsyWindowInfo* wi = windows + start_ch;
            const float *coeffs[2];
            tag      = s->chan_map[i+1];
            chans    = tag == TYPE_CPE ? 2 : 1;
            cpe      = &s->cpe[i];
            cpe->common_window = 0;
            memset(cpe->is_mask, 0, sizeof(cpe->is_mask));
            memset(cpe->ms_mask, 0, sizeof(cpe->ms_mask));
            put_bits(&s->pb, 3, tag);
            put_bits(&s->pb, 4, chan_el_counter[tag]++);
            for (ch = 0; ch < chans; ch++) {
                sce = &cpe->ch[ch];
                coeffs[ch] = sce->coeffs;
                sce->ics.predictor_present = 0;
                sce->ics.ltp.present = 0;
                memset(sce->ics.ltp.used, 0, sizeof(sce->ics.ltp.used));
                memset(sce->ics.prediction_used, 0, sizeof(sce->ics.prediction_used));
                memset(&sce->tns, 0, sizeof(TemporalNoiseShaping));
                for (w = 0; w < 128; w++)
                    if (sce->band_type[w] > RESERVED_BT)
                        sce->band_type[w] = 0;
            }
            s->psy.bitres.alloc = -1;
            s->psy.bitres.bits = s->last_frame_pb_count / s->channels;
            s->psy.model->analyze(&s->psy, start_ch, coeffs, wi);
            if (s->psy.bitres.alloc > 0) {
                /* Lambda unused here on purpose, we need to take psy's unscaled allocation */
                target_bits += s->psy.bitres.alloc
                    * (s->lambda / (avctx->global_quality ? avctx->global_quality : 120));
                s->psy.bitres.alloc /= chans;
            }
            s->cur_type = tag;
            for (ch = 0; ch < chans; ch++) {
                s->cur_channel = start_ch + ch;
                if (s->options.pns && s->coder->mark_pns)
                    s->coder->mark_pns(s, avctx, &cpe->ch[ch]);
                s->coder->search_for_quantizers(avctx, s, &cpe->ch[ch], s->lambda);
            }
            if (chans > 1
                && wi[0].window_type[0] == wi[1].window_type[0]
                && wi[0].window_shape   == wi[1].window_shape) {

                cpe->common_window = 1;
                for (w = 0; w < wi[0].num_windows; w++) {
                    if (wi[0].grouping[w] != wi[1].grouping[w]) {
                        cpe->common_window = 0;
                        break;
                    }
                }
            }
            for (ch = 0; ch < chans; ch++) { /* TNS and PNS */
                sce = &cpe->ch[ch];
                s->cur_channel = start_ch + ch;
                if (s->options.tns && s->coder->search_for_tns)
                    s->coder->search_for_tns(s, sce);
                if (s->options.tns && s->coder->apply_tns_filt)
                    s->coder->apply_tns_filt(s, sce);
                if (sce->tns.present)
                    tns_mode = 1;
                if (s->options.pns && s->coder->search_for_pns)
                    s->coder->search_for_pns(s, avctx, sce);
            }
            s->cur_channel = start_ch;
            if (s->options.intensity_stereo) { /* Intensity Stereo */
                if (s->coder->search_for_is)
                    s->coder->search_for_is(s, avctx, cpe);
                if (cpe->is_mode) is_mode = 1;
                apply_intensity_stereo(cpe);
            }
            if (s->options.pred) { /* Prediction */
                for (ch = 0; ch < chans; ch++) {
                    sce = &cpe->ch[ch];
                    s->cur_channel = start_ch + ch;
                    if (s->options.pred && s->coder->search_for_pred)
                        s->coder->search_for_pred(s, sce);
                    if (cpe->ch[ch].ics.predictor_present) pred_mode = 1;
                }
                if (s->coder->adjust_common_pred)
                    s->coder->adjust_common_pred(s, cpe);
                for (ch = 0; ch < chans; ch++) {
                    sce = &cpe->ch[ch];
                    s->cur_channel = start_ch + ch;
                    if (s->options.pred && s->coder->apply_main_pred)
                        s->coder->apply_main_pred(s, sce);
                }
                s->cur_channel = start_ch;
            }
            if (s->options.mid_side) { /* Mid/Side stereo */
                if (s->options.mid_side == -1 && s->coder->search_for_ms)
                    s->coder->search_for_ms(s, cpe);
                else if (cpe->common_window)
                    memset(cpe->ms_mask, 1, sizeof(cpe->ms_mask));
                apply_mid_side_stereo(cpe);
            }
            adjust_frame_information(cpe, chans);
            if (s->options.ltp) { /* LTP */
                for (ch = 0; ch < chans; ch++) {
                    sce = &cpe->ch[ch];
                    s->cur_channel = start_ch + ch;
                    if (s->coder->search_for_ltp)
                        s->coder->search_for_ltp(s, sce, cpe->common_window);
                    if (sce->ics.ltp.present) pred_mode = 1;
                }
                s->cur_channel = start_ch;
                if (s->coder->adjust_common_ltp)
                    s->coder->adjust_common_ltp(s, cpe);
            }
            if (chans == 2) {
                put_bits(&s->pb, 1, cpe->common_window);
                if (cpe->common_window) {
                    put_ics_info(s, &cpe->ch[0].ics);
                    if (s->coder->encode_main_pred)
                        s->coder->encode_main_pred(s, &cpe->ch[0]);
                    if (s->coder->encode_ltp_info)
                        s->coder->encode_ltp_info(s, &cpe->ch[0], 1);
                    encode_ms_info(&s->pb, cpe);
                    if (cpe->ms_mode) ms_mode = 1;
                }
            }
            for (ch = 0; ch < chans; ch++) {
                s->cur_channel = start_ch + ch;
                encode_individual_channel(avctx, s, &cpe->ch[ch], cpe->common_window);
            }
            start_ch += chans;
        }

        if (avctx->flags & CODEC_FLAG_QSCALE) {
            /* When using a constant Q-scale, don't mess with lambda */
            break;
        }

        /* rate control stuff
         * allow between the nominal bitrate, and what psy's bit reservoir says to target
         * but drift towards the nominal bitrate always
         */
        frame_bits = put_bits_count(&s->pb);
        rate_bits = avctx->bit_rate * 1024 / avctx->sample_rate;
        rate_bits = FFMIN(rate_bits, 6144 * s->channels - 3);
        too_many_bits = FFMAX(target_bits, rate_bits);
        too_many_bits = FFMIN(too_many_bits, 6144 * s->channels - 3);
        too_few_bits = FFMIN(FFMAX(rate_bits - rate_bits/4, target_bits), too_many_bits);

        /* When using ABR, be strict (but only for increasing) */
        too_few_bits = too_few_bits - too_few_bits/8;
        too_many_bits = too_many_bits + too_many_bits/2;

        if (   its == 0 /* for steady-state Q-scale tracking */
            || (its < 5 && (frame_bits < too_few_bits || frame_bits > too_many_bits))
            || frame_bits >= 6144 * s->channels - 3  )
        {
            float ratio = ((float)rate_bits) / frame_bits;

            if (frame_bits >= too_few_bits && frame_bits <= too_many_bits) {
                /*
                 * This path is for steady-state Q-scale tracking
                 * When frame bits fall within the stable range, we still need to adjust
                 * lambda to maintain it like so in a stable fashion (large jumps in lambda
                 * create artifacts and should be avoided), but slowly
                 */
                ratio = sqrtf(sqrtf(ratio));
                ratio = av_clipf(ratio, 0.9f, 1.1f);
            } else {
                /* Not so fast though */
                ratio = sqrtf(ratio);
            }
            s->lambda = FFMIN(s->lambda * ratio, 65536.f);

            /* Keep iterating if we must reduce and lambda is in the sky */
            if (ratio > 0.9f && ratio < 1.1f) {
                break;
            } else {
                if (is_mode || ms_mode || tns_mode || pred_mode) {
                    for (i = 0; i < s->chan_map[0]; i++) {
                        // Must restore coeffs
                        chans = tag == TYPE_CPE ? 2 : 1;
                        cpe = &s->cpe[i];
                        for (ch = 0; ch < chans; ch++)
                            memcpy(cpe->ch[ch].coeffs, cpe->ch[ch].pcoeffs, sizeof(cpe->ch[ch].coeffs));
                    }
                }
                its++;
            }
        } else {
            break;
        }
    } while (1);

    if (s->options.ltp && s->coder->ltp_insert_new_frame)
        s->coder->ltp_insert_new_frame(s);

    put_bits(&s->pb, 3, TYPE_END);
    flush_put_bits(&s->pb);

    s->last_frame_pb_count = put_bits_count(&s->pb);

    s->lambda_sum += s->lambda;
    s->lambda_count++;

    if (!frame)
        s->last_frame++;

    ff_af_queue_remove(&s->afq, avctx->frame_size, &avpkt->pts,
                       &avpkt->duration);

    avpkt->size = put_bits_count(&s->pb) >> 3;
    *got_packet_ptr = 1;
    return 0;
}
Exemplo n.º 25
0
/**
 * Find the estimated global motion for a scene given the most likely shift
 * for each block in the frame. The global motion is estimated to be the
 * same as the motion from most blocks in the frame, so if most blocks
 * move one pixel to the right and two pixels down, this would yield a
 * motion vector (1, -2).
 */
static void find_motion(DeshakeContext *deshake, uint8_t *src1, uint8_t *src2,
                        int width, int height, int stride, Transform *t)
{
    int x, y;
    IntMotionVector mv = {0, 0};
    int counts[2*MAX_R+1][2*MAX_R+1];
    int count_max_value = 0;
    int contrast;

    int pos;
    double *angles = av_malloc(sizeof(*angles) * width * height / (16 * deshake->blocksize));
    int center_x = 0, center_y = 0;
    double p_x, p_y;

    // Reset counts to zero
    for (x = 0; x < deshake->rx * 2 + 1; x++) {
        for (y = 0; y < deshake->ry * 2 + 1; y++) {
            counts[x][y] = 0;
        }
    }

    pos = 0;
    // Find motion for every block and store the motion vector in the counts
    for (y = deshake->ry; y < height - deshake->ry - (deshake->blocksize * 2); y += deshake->blocksize * 2) {
        // We use a width of 16 here to match the libavcodec sad functions
        for (x = deshake->rx; x < width - deshake->rx - 16; x += 16) {
            // If the contrast is too low, just skip this block as it probably
            // won't be very useful to us.
            contrast = block_contrast(src2, x, y, stride, deshake->blocksize);
            if (contrast > deshake->contrast) {
                //av_log(NULL, AV_LOG_ERROR, "%d\n", contrast);
                find_block_motion(deshake, src1, src2, x, y, stride, &mv);
                if (mv.x != -1 && mv.y != -1) {
                    counts[mv.x + deshake->rx][mv.y + deshake->ry] += 1;
                    if (x > deshake->rx && y > deshake->ry)
                        angles[pos++] = block_angle(x, y, 0, 0, &mv);

                    center_x += mv.x;
                    center_y += mv.y;
                }
            }
        }
    }

    if (pos) {
         center_x /= pos;
         center_y /= pos;
         t->angle = clean_mean(angles, pos);
         if (t->angle < 0.001)
              t->angle = 0;
    } else {
         t->angle = 0;
    }

    // Find the most common motion vector in the frame and use it as the gmv
    for (y = deshake->ry * 2; y >= 0; y--) {
        for (x = 0; x < deshake->rx * 2 + 1; x++) {
            //av_log(NULL, AV_LOG_ERROR, "%5d ", counts[x][y]);
            if (counts[x][y] > count_max_value) {
                t->vector.x = x - deshake->rx;
                t->vector.y = y - deshake->ry;
                count_max_value = counts[x][y];
            }
        }
        //av_log(NULL, AV_LOG_ERROR, "\n");
    }

    p_x = (center_x - width / 2.0);
    p_y = (center_y - height / 2.0);
    t->vector.x += (cos(t->angle)-1)*p_x  - sin(t->angle)*p_y;
    t->vector.y += sin(t->angle)*p_x  + (cos(t->angle)-1)*p_y;

    // Clamp max shift & rotation?
    t->vector.x = av_clipf(t->vector.x, -deshake->rx * 2, deshake->rx * 2);
    t->vector.y = av_clipf(t->vector.y, -deshake->ry * 2, deshake->ry * 2);
    t->angle = av_clipf(t->angle, -0.1, 0.1);

    //av_log(NULL, AV_LOG_ERROR, "%d x %d\n", avg->x, avg->y);
    av_free(angles);
}
Exemplo n.º 26
0
static void stereo_position(float a, float p, float *x, float *y)
{
      *x = av_clipf(a+FFMAX(0, sinf(p-M_PI_2))*FFDIFFSIGN(a,0), -1, 1);
      *y = av_clipf(cosf(a*M_PI_2+M_PI)*cosf(M_PI_2-p/M_PI)*M_LN10+1, -1, 1);
}
Exemplo n.º 27
0
av_cold int ff_rate_control_init(MpegEncContext *s)
{
    RateControlContext *rcc = &s->rc_context;
    int i, res;
    static const char * const const_names[] = {
        "PI",
        "E",
        "iTex",
        "pTex",
        "tex",
        "mv",
        "fCode",
        "iCount",
        "mcVar",
        "var",
        "isI",
        "isP",
        "isB",
        "avgQP",
        "qComp",
#if 0
        "lastIQP",
        "lastPQP",
        "lastBQP",
        "nextNonBQP",
#endif
        "avgIITex",
        "avgPITex",
        "avgPPTex",
        "avgBPTex",
        "avgTex",
        NULL
    };
    static double (* const func1[])(void *, double) = {
        (void *)bits2qp,
        (void *)qp2bits,
        NULL
    };
    static const char * const func1_names[] = {
        "bits2qp",
        "qp2bits",
        NULL
    };
    emms_c();

    if (!s->avctx->rc_max_available_vbv_use && s->avctx->rc_buffer_size) {
        if (s->avctx->rc_max_rate) {
            s->avctx->rc_max_available_vbv_use = av_clipf(s->avctx->rc_max_rate/(s->avctx->rc_buffer_size*get_fps(s->avctx)), 1.0/3, 1.0);
        } else
            s->avctx->rc_max_available_vbv_use = 1.0;
    }

    res = av_expr_parse(&rcc->rc_eq_eval,
                        s->avctx->rc_eq ? s->avctx->rc_eq : "tex^qComp",
                        const_names, func1_names, func1,
                        NULL, NULL, 0, s->avctx);
    if (res < 0) {
        av_log(s->avctx, AV_LOG_ERROR, "Error parsing rc_eq \"%s\"\n", s->avctx->rc_eq);
        return res;
    }

    for (i = 0; i < 5; i++) {
        rcc->pred[i].coeff = FF_QP2LAMBDA * 7.0;
        rcc->pred[i].count = 1.0;
        rcc->pred[i].decay = 0.4;

        rcc->i_cplx_sum [i] =
        rcc->p_cplx_sum [i] =
        rcc->mv_bits_sum[i] =
        rcc->qscale_sum [i] =
        rcc->frame_count[i] = 1; // 1 is better because of 1/0 and such

        rcc->last_qscale_for[i] = FF_QP2LAMBDA * 5;
    }
    rcc->buffer_index = s->avctx->rc_initial_buffer_occupancy;
    if (!rcc->buffer_index)
        rcc->buffer_index = s->avctx->rc_buffer_size * 3 / 4;

    if (s->flags & CODEC_FLAG_PASS2) {
        int i;
        char *p;

        /* find number of pics */
        p = s->avctx->stats_in;
        for (i = -1; p; i++)
            p = strchr(p + 1, ';');
        i += s->max_b_frames;
        if (i <= 0 || i >= INT_MAX / sizeof(RateControlEntry))
            return -1;
        rcc->entry       = av_mallocz(i * sizeof(RateControlEntry));
        rcc->num_entries = i;

        /* init all to skipped p frames
         * (with b frames we might have a not encoded frame at the end FIXME) */
        for (i = 0; i < rcc->num_entries; i++) {
            RateControlEntry *rce = &rcc->entry[i];

            rce->pict_type  = rce->new_pict_type = AV_PICTURE_TYPE_P;
            rce->qscale     = rce->new_qscale    = FF_QP2LAMBDA * 2;
            rce->misc_bits  = s->mb_num + 10;
            rce->mb_var_sum = s->mb_num * 100;
        }

        /* read stats */
        p = s->avctx->stats_in;
        for (i = 0; i < rcc->num_entries - s->max_b_frames; i++) {
            RateControlEntry *rce;
            int picture_number;
            int e;
            char *next;

            next = strchr(p, ';');
            if (next) {
                (*next) = 0; // sscanf in unbelievably slow on looong strings // FIXME copy / do not write
                next++;
            }
            e = sscanf(p, " in:%d ", &picture_number);

            assert(picture_number >= 0);
            assert(picture_number < rcc->num_entries);
            rce = &rcc->entry[picture_number];

            e += sscanf(p, " in:%*d out:%*d type:%d q:%f itex:%d ptex:%d mv:%d misc:%d fcode:%d bcode:%d mc-var:%"SCNd64" var:%"SCNd64" icount:%d skipcount:%d hbits:%d",
                        &rce->pict_type, &rce->qscale, &rce->i_tex_bits, &rce->p_tex_bits,
                        &rce->mv_bits, &rce->misc_bits,
                        &rce->f_code, &rce->b_code,
                        &rce->mc_mb_var_sum, &rce->mb_var_sum,
                        &rce->i_count, &rce->skip_count, &rce->header_bits);
            if (e != 14) {
                av_log(s->avctx, AV_LOG_ERROR,
                       "statistics are damaged at line %d, parser out=%d\n",
                       i, e);
                return -1;
            }

            p = next;
        }

        if (init_pass2(s) < 0)
            return -1;

        // FIXME maybe move to end
        if ((s->flags & CODEC_FLAG_PASS2) && s->avctx->rc_strategy == FF_RC_STRATEGY_XVID) {
#if CONFIG_LIBXVID
            return ff_xvid_rate_control_init(s);
#else
            av_log(s->avctx, AV_LOG_ERROR,
                   "Xvid ratecontrol requires libavcodec compiled with Xvid support.\n");
            return -1;
#endif
        }
    }

    if (!(s->flags & CODEC_FLAG_PASS2)) {
        rcc->short_term_qsum   = 0.001;
        rcc->short_term_qcount = 0.001;

        rcc->pass1_rc_eq_output_sum = 0.001;
        rcc->pass1_wanted_bits      = 0.001;

        if (s->avctx->qblur > 1.0) {
            av_log(s->avctx, AV_LOG_ERROR, "qblur too large\n");
            return -1;
        }
        /* init stuff with the user specified complexity */
        if (s->avctx->rc_initial_cplx) {
            for (i = 0; i < 60 * 30; i++) {
                double bits = s->avctx->rc_initial_cplx * (i / 10000.0 + 1.0) * s->mb_num;
                RateControlEntry rce;

                if (i % ((s->gop_size + 3) / 4) == 0)
                    rce.pict_type = AV_PICTURE_TYPE_I;
                else if (i % (s->max_b_frames + 1))
                    rce.pict_type = AV_PICTURE_TYPE_B;
                else
                    rce.pict_type = AV_PICTURE_TYPE_P;

                rce.new_pict_type = rce.pict_type;
                rce.mc_mb_var_sum = bits * s->mb_num / 100000;
                rce.mb_var_sum    = s->mb_num;

                rce.qscale    = FF_QP2LAMBDA * 2;
                rce.f_code    = 2;
                rce.b_code    = 1;
                rce.misc_bits = 1;

                if (s->pict_type == AV_PICTURE_TYPE_I) {
                    rce.i_count    = s->mb_num;
                    rce.i_tex_bits = bits;
                    rce.p_tex_bits = 0;
                    rce.mv_bits    = 0;
                } else {
                    rce.i_count    = 0; // FIXME we do know this approx
                    rce.i_tex_bits = 0;
                    rce.p_tex_bits = bits * 0.9;
                    rce.mv_bits    = bits * 0.1;
                }
                rcc->i_cplx_sum[rce.pict_type]  += rce.i_tex_bits * rce.qscale;
                rcc->p_cplx_sum[rce.pict_type]  += rce.p_tex_bits * rce.qscale;
                rcc->mv_bits_sum[rce.pict_type] += rce.mv_bits;
                rcc->frame_count[rce.pict_type]++;

                get_qscale(s, &rce, rcc->pass1_wanted_bits / rcc->pass1_rc_eq_output_sum, i);

                // FIXME misbehaves a little for variable fps
                rcc->pass1_wanted_bits += s->bit_rate / get_fps(s->avctx);
            }
        }
    }

    return 0;
}
Exemplo n.º 28
0
static void search_for_pns(AACEncContext *s, AVCodecContext *avctx, SingleChannelElement *sce)
{
    FFPsyBand *band;
    int w, g, w2, i;
    int wlen = 1024 / sce->ics.num_windows;
    int bandwidth, cutoff;
    float *PNS = &s->scoefs[0*128], *PNS34 = &s->scoefs[1*128];
    float *NOR34 = &s->scoefs[3*128];
    uint8_t nextband[128];
    const float lambda = s->lambda;
    const float freq_mult = avctx->sample_rate*0.5f/wlen;
    const float thr_mult = NOISE_LAMBDA_REPLACE*(100.0f/lambda);
    const float spread_threshold = FFMIN(0.75f, NOISE_SPREAD_THRESHOLD*FFMAX(0.5f, lambda/100.f));
    const float dist_bias = av_clipf(4.f * 120 / lambda, 0.25f, 4.0f);
    const float pns_transient_energy_r = FFMIN(0.7f, lambda / 140.f);

    int refbits = avctx->bit_rate * 1024.0 / avctx->sample_rate
        / ((avctx->flags & CODEC_FLAG_QSCALE) ? 2.0f : avctx->channels)
        * (lambda / 120.f);

    /** Keep this in sync with twoloop's cutoff selection */
    float rate_bandwidth_multiplier = 1.5f;
    int prev = -1000, prev_sf = -1;
    int frame_bit_rate = (avctx->flags & CODEC_FLAG_QSCALE)
        ? (refbits * rate_bandwidth_multiplier * avctx->sample_rate / 1024)
        : (avctx->bit_rate / avctx->channels);

    frame_bit_rate *= 1.15f;

    if (avctx->cutoff > 0) {
        bandwidth = avctx->cutoff;
    } else {
        bandwidth = FFMAX(3000, AAC_CUTOFF_FROM_BITRATE(frame_bit_rate, 1, avctx->sample_rate));
    }

    cutoff = bandwidth * 2 * wlen / avctx->sample_rate;

    memcpy(sce->band_alt, sce->band_type, sizeof(sce->band_type));
    ff_init_nextband_map(sce, nextband);
    for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
        int wstart = w*128;
        for (g = 0; g < sce->ics.num_swb; g++) {
            int noise_sfi;
            float dist1 = 0.0f, dist2 = 0.0f, noise_amp;
            float pns_energy = 0.0f, pns_tgt_energy, energy_ratio, dist_thresh;
            float sfb_energy = 0.0f, threshold = 0.0f, spread = 2.0f;
            float min_energy = -1.0f, max_energy = 0.0f;
            const int start = wstart+sce->ics.swb_offset[g];
            const float freq = (start-wstart)*freq_mult;
            const float freq_boost = FFMAX(0.88f*freq/NOISE_LOW_LIMIT, 1.0f);
            if (freq < NOISE_LOW_LIMIT || (start-wstart) >= cutoff) {
                if (!sce->zeroes[w*16+g])
                    prev_sf = sce->sf_idx[w*16+g];
                continue;
            }
            for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) {
                band = &s->psy.ch[s->cur_channel].psy_bands[(w+w2)*16+g];
                sfb_energy += band->energy;
                spread     = FFMIN(spread, band->spread);
                threshold  += band->threshold;
                if (!w2) {
                    min_energy = max_energy = band->energy;
                } else {
                    min_energy = FFMIN(min_energy, band->energy);
                    max_energy = FFMAX(max_energy, band->energy);
                }
            }

            /* Ramps down at ~8000Hz and loosens the dist threshold */
            dist_thresh = av_clipf(2.5f*NOISE_LOW_LIMIT/freq, 0.5f, 2.5f) * dist_bias;

            /* PNS is acceptable when all of these are true:
             * 1. high spread energy (noise-like band)
             * 2. near-threshold energy (high PE means the random nature of PNS content will be noticed)
             * 3. on short window groups, all windows have similar energy (variations in energy would be destroyed by PNS)
             *
             * At this stage, point 2 is relaxed for zeroed bands near the noise threshold (hole avoidance is more important)
             */
            if ((!sce->zeroes[w*16+g] && !ff_sfdelta_can_remove_band(sce, nextband, prev_sf, w*16+g)) ||
                ((sce->zeroes[w*16+g] || !sce->band_alt[w*16+g]) && sfb_energy < threshold*sqrtf(1.0f/freq_boost)) || spread < spread_threshold ||
                (!sce->zeroes[w*16+g] && sce->band_alt[w*16+g] && sfb_energy > threshold*thr_mult*freq_boost) ||
                min_energy < pns_transient_energy_r * max_energy ) {
                sce->pns_ener[w*16+g] = sfb_energy;
                if (!sce->zeroes[w*16+g])
                    prev_sf = sce->sf_idx[w*16+g];
                continue;
            }

            pns_tgt_energy = sfb_energy*FFMIN(1.0f, spread*spread);
            noise_sfi = av_clip(roundf(log2f(pns_tgt_energy)*2), -100, 155); /* Quantize */
            noise_amp = -ff_aac_pow2sf_tab[noise_sfi + POW_SF2_ZERO];    /* Dequantize */
            if (prev != -1000) {
                int noise_sfdiff = noise_sfi - prev + SCALE_DIFF_ZERO;
                if (noise_sfdiff < 0 || noise_sfdiff > 2*SCALE_MAX_DIFF) {
                    if (!sce->zeroes[w*16+g])
                        prev_sf = sce->sf_idx[w*16+g];
                    continue;
                }
            }
            for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) {
                float band_energy, scale, pns_senergy;
                const int start_c = (w+w2)*128+sce->ics.swb_offset[g];
                band = &s->psy.ch[s->cur_channel].psy_bands[(w+w2)*16+g];
                for (i = 0; i < sce->ics.swb_sizes[g]; i+=2) {
                    double rnd[2];
                    av_bmg_get(&s->lfg, rnd);
                    PNS[i+0] = (float)rnd[0];
                    PNS[i+1] = (float)rnd[1];
                }
                band_energy = s->fdsp->scalarproduct_float(PNS, PNS, sce->ics.swb_sizes[g]);
                scale = noise_amp/sqrtf(band_energy);
                s->fdsp->vector_fmul_scalar(PNS, PNS, scale, sce->ics.swb_sizes[g]);
                pns_senergy = s->fdsp->scalarproduct_float(PNS, PNS, sce->ics.swb_sizes[g]);
                pns_energy += pns_senergy;
                abs_pow34_v(NOR34, &sce->coeffs[start_c], sce->ics.swb_sizes[g]);
                abs_pow34_v(PNS34, PNS, sce->ics.swb_sizes[g]);
                dist1 += quantize_band_cost(s, &sce->coeffs[start_c],
                                            NOR34,
                                            sce->ics.swb_sizes[g],
                                            sce->sf_idx[(w+w2)*16+g],
                                            sce->band_alt[(w+w2)*16+g],
                                            lambda/band->threshold, INFINITY, NULL, NULL, 0);
                /* Estimate rd on average as 5 bits for SF, 4 for the CB, plus spread energy * lambda/thr */
                dist2 += band->energy/(band->spread*band->spread)*lambda*dist_thresh/band->threshold;
            }
            if (g && sce->band_type[w*16+g-1] == NOISE_BT) {
                dist2 += 5;
            } else {
                dist2 += 9;
            }
            energy_ratio = pns_tgt_energy/pns_energy; /* Compensates for quantization error */
            sce->pns_ener[w*16+g] = energy_ratio*pns_tgt_energy;
            if (sce->zeroes[w*16+g] || !sce->band_alt[w*16+g] || (energy_ratio > 0.85f && energy_ratio < 1.25f && dist2 < dist1)) {
                sce->band_type[w*16+g] = NOISE_BT;
                sce->zeroes[w*16+g] = 0;
                prev = noise_sfi;
            } else {
                if (!sce->zeroes[w*16+g])
                    prev_sf = sce->sf_idx[w*16+g];
            }
        }
    }
}
Exemplo n.º 29
0
static av_cold int encode_init(AVCodecContext* avc_context)
{
    th_info t_info;
    th_comment t_comment;
    ogg_packet o_packet;
    unsigned int offset;
    TheoraContext *h = avc_context->priv_data;
    uint32_t gop_size = avc_context->gop_size;

    /* Set up the theora_info struct */
    th_info_init(&t_info);
    t_info.frame_width  = FFALIGN(avc_context->width,  16);
    t_info.frame_height = FFALIGN(avc_context->height, 16);
    t_info.pic_width    = avc_context->width;
    t_info.pic_height   = avc_context->height;
    t_info.pic_x        = 0;
    t_info.pic_y        = 0;
    /* Swap numerator and denominator as time_base in AVCodecContext gives the
     * time period between frames, but theora_info needs the framerate.  */
    t_info.fps_numerator   = avc_context->time_base.den;
    t_info.fps_denominator = avc_context->time_base.num;
    if (avc_context->sample_aspect_ratio.num) {
        t_info.aspect_numerator   = avc_context->sample_aspect_ratio.num;
        t_info.aspect_denominator = avc_context->sample_aspect_ratio.den;
    } else {
        t_info.aspect_numerator   = 1;
        t_info.aspect_denominator = 1;
    }

    if (avc_context->color_primaries == AVCOL_PRI_BT470M)
        t_info.colorspace = TH_CS_ITU_REC_470M;
    else if (avc_context->color_primaries == AVCOL_PRI_BT470BG)
        t_info.colorspace = TH_CS_ITU_REC_470BG;
    else
        t_info.colorspace = TH_CS_UNSPECIFIED;

    if (avc_context->pix_fmt == AV_PIX_FMT_YUV420P)
        t_info.pixel_fmt = TH_PF_420;
    else if (avc_context->pix_fmt == AV_PIX_FMT_YUV422P)
        t_info.pixel_fmt = TH_PF_422;
    else if (avc_context->pix_fmt == AV_PIX_FMT_YUV444P)
        t_info.pixel_fmt = TH_PF_444;
    else {
        av_log(avc_context, AV_LOG_ERROR, "Unsupported pix_fmt\n");
        return -1;
    }
    av_pix_fmt_get_chroma_sub_sample(avc_context->pix_fmt,
                                     &h->uv_hshift, &h->uv_vshift);

    if (avc_context->flags & CODEC_FLAG_QSCALE) {
        /* to be constant with the libvorbis implementation, clip global_quality to 0 - 10
           Theora accepts a quality parameter p, which is:
                * 0 <= p <=63
                * an int value
         */
        t_info.quality        = av_clipf(avc_context->global_quality / (float)FF_QP2LAMBDA, 0, 10) * 6.3;
        t_info.target_bitrate = 0;
    } else {
        t_info.target_bitrate = avc_context->bit_rate;
        t_info.quality        = 0;
    }

    /* Now initialise libtheora */
    h->t_state = th_encode_alloc(&t_info);
    if (!h->t_state) {
        av_log(avc_context, AV_LOG_ERROR, "theora_encode_init failed\n");
        return -1;
    }

    h->keyframe_mask = (1 << t_info.keyframe_granule_shift) - 1;
    /* Clear up theora_info struct */
    th_info_clear(&t_info);

    if (th_encode_ctl(h->t_state, TH_ENCCTL_SET_KEYFRAME_FREQUENCY_FORCE,
                      &gop_size, sizeof(gop_size))) {
        av_log(avc_context, AV_LOG_ERROR, "Error setting GOP size\n");
        return -1;
    }

    // need to enable 2 pass (via TH_ENCCTL_2PASS_) before encoding headers
    if (avc_context->flags & CODEC_FLAG_PASS1) {
        if (get_stats(avc_context, 0))
            return -1;
    } else if (avc_context->flags & CODEC_FLAG_PASS2) {
        if (submit_stats(avc_context))
            return -1;
    }

    /*
        Output first header packet consisting of theora
        header, comment, and tables.

        Each one is prefixed with a 16bit size, then they
        are concatenated together into libavcodec's extradata.
    */
    offset = 0;

    /* Headers */
    th_comment_init(&t_comment);

    while (th_encode_flushheader(h->t_state, &t_comment, &o_packet))
        if (concatenate_packet(&offset, avc_context, &o_packet))
            return -1;

    th_comment_clear(&t_comment);

    /* Set up the output AVFrame */
    avc_context->coded_frame = av_frame_alloc();

    return 0;
}
Exemplo n.º 30
0
Arquivo: aacps.c Projeto: Arcen/FFmpeg
static void decorrelation(PSContext *ps, float (*out)[32][2], const float (*s)[32][2], int is34)
{
    float power[34][PS_QMF_TIME_SLOTS] = {{0}};
    float transient_gain[34][PS_QMF_TIME_SLOTS];
    float *peak_decay_nrg = ps->peak_decay_nrg;
    float *power_smooth = ps->power_smooth;
    float *peak_decay_diff_smooth = ps->peak_decay_diff_smooth;
    float (*delay)[PS_QMF_TIME_SLOTS + PS_MAX_DELAY][2] = ps->delay;
    float (*ap_delay)[PS_AP_LINKS][PS_QMF_TIME_SLOTS + PS_MAX_AP_DELAY][2] = ps->ap_delay;
    const int8_t *k_to_i = is34 ? k_to_i_34 : k_to_i_20;
    const float peak_decay_factor = 0.76592833836465f;
    const float transient_impact  = 1.5f;
    const float a_smooth          = 0.25f; ///< Smoothing coefficient
    int i, k, m, n;
    int n0 = 0, nL = 32;
    static const int link_delay[] = { 3, 4, 5 };
    static const float a[] = { 0.65143905753106f,
                               0.56471812200776f,
                               0.48954165955695f };

    if (is34 != ps->is34bands_old) {
        memset(ps->peak_decay_nrg,         0, sizeof(ps->peak_decay_nrg));
        memset(ps->power_smooth,           0, sizeof(ps->power_smooth));
        memset(ps->peak_decay_diff_smooth, 0, sizeof(ps->peak_decay_diff_smooth));
        memset(ps->delay,                  0, sizeof(ps->delay));
        memset(ps->ap_delay,               0, sizeof(ps->ap_delay));
    }

    for (n = n0; n < nL; n++) {
        for (k = 0; k < NR_BANDS[is34]; k++) {
            int i = k_to_i[k];
            power[i][n] += s[k][n][0] * s[k][n][0] + s[k][n][1] * s[k][n][1];
        }
    }

    //Transient detection
    for (i = 0; i < NR_PAR_BANDS[is34]; i++) {
        for (n = n0; n < nL; n++) {
            float decayed_peak = peak_decay_factor * peak_decay_nrg[i];
            float denom;
            peak_decay_nrg[i] = FFMAX(decayed_peak, power[i][n]);
            power_smooth[i] += a_smooth * (power[i][n] - power_smooth[i]);
            peak_decay_diff_smooth[i] += a_smooth * (peak_decay_nrg[i] - power[i][n] - peak_decay_diff_smooth[i]);
            denom = transient_impact * peak_decay_diff_smooth[i];
            transient_gain[i][n]   = (denom > power_smooth[i]) ?
                                         power_smooth[i] / denom : 1.0f;
        }
    }

    //Decorrelation and transient reduction
    //                         PS_AP_LINKS - 1
    //                               -----
    //                                | |  Q_fract_allpass[k][m]*z^-link_delay[m] - a[m]*g_decay_slope[k]
    //H[k][z] = z^-2 * phi_fract[k] * | | ----------------------------------------------------------------
    //                                | | 1 - a[m]*g_decay_slope[k]*Q_fract_allpass[k][m]*z^-link_delay[m]
    //                               m = 0
    //d[k][z] (out) = transient_gain_mapped[k][z] * H[k][z] * s[k][z]
    for (k = 0; k < NR_ALLPASS_BANDS[is34]; k++) {
        int b = k_to_i[k];
        float g_decay_slope = 1.f - DECAY_SLOPE * (k - DECAY_CUTOFF[is34]);
        float ag[PS_AP_LINKS];
        g_decay_slope = av_clipf(g_decay_slope, 0.f, 1.f);
        memcpy(delay[k], delay[k]+nL, PS_MAX_DELAY*sizeof(delay[k][0]));
        memcpy(delay[k]+PS_MAX_DELAY, s[k], numQMFSlots*sizeof(delay[k][0]));
        for (m = 0; m < PS_AP_LINKS; m++) {
            memcpy(ap_delay[k][m],   ap_delay[k][m]+numQMFSlots,           5*sizeof(ap_delay[k][m][0]));
            ag[m] = a[m] * g_decay_slope;
        }
        for (n = n0; n < nL; n++) {
            float in_re = delay[k][n+PS_MAX_DELAY-2][0] * phi_fract[is34][k][0] -
                          delay[k][n+PS_MAX_DELAY-2][1] * phi_fract[is34][k][1];
            float in_im = delay[k][n+PS_MAX_DELAY-2][0] * phi_fract[is34][k][1] +
                          delay[k][n+PS_MAX_DELAY-2][1] * phi_fract[is34][k][0];
            for (m = 0; m < PS_AP_LINKS; m++) {
                float a_re                = ag[m] * in_re;
                float a_im                = ag[m] * in_im;
                float link_delay_re       = ap_delay[k][m][n+5-link_delay[m]][0];
                float link_delay_im       = ap_delay[k][m][n+5-link_delay[m]][1];
                float fractional_delay_re = Q_fract_allpass[is34][k][m][0];
                float fractional_delay_im = Q_fract_allpass[is34][k][m][1];
                ap_delay[k][m][n+5][0] = in_re;
                ap_delay[k][m][n+5][1] = in_im;
                in_re = link_delay_re * fractional_delay_re - link_delay_im * fractional_delay_im - a_re;
                in_im = link_delay_re * fractional_delay_im + link_delay_im * fractional_delay_re - a_im;
                ap_delay[k][m][n+5][0] += ag[m] * in_re;
                ap_delay[k][m][n+5][1] += ag[m] * in_im;
            }
            out[k][n][0] = transient_gain[b][n] * in_re;
            out[k][n][1] = transient_gain[b][n] * in_im;
        }
    }
    for (; k < SHORT_DELAY_BAND[is34]; k++) {
        memcpy(delay[k], delay[k]+nL, PS_MAX_DELAY*sizeof(delay[k][0]));
        memcpy(delay[k]+PS_MAX_DELAY, s[k], numQMFSlots*sizeof(delay[k][0]));
        for (n = n0; n < nL; n++) {
            //H = delay 14
            out[k][n][0] = transient_gain[k_to_i[k]][n] * delay[k][n+PS_MAX_DELAY-14][0];
            out[k][n][1] = transient_gain[k_to_i[k]][n] * delay[k][n+PS_MAX_DELAY-14][1];
        }
    }
    for (; k < NR_BANDS[is34]; k++) {
        memcpy(delay[k], delay[k]+nL, PS_MAX_DELAY*sizeof(delay[k][0]));
        memcpy(delay[k]+PS_MAX_DELAY, s[k], numQMFSlots*sizeof(delay[k][0]));
        for (n = n0; n < nL; n++) {
            //H = delay 1
            out[k][n][0] = transient_gain[k_to_i[k]][n] * delay[k][n+PS_MAX_DELAY-1][0];
            out[k][n][1] = transient_gain[k_to_i[k]][n] * delay[k][n+PS_MAX_DELAY-1][1];
        }
    }
}