Example #1
0
static int vibrance_slice16(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs)
{
    VibranceContext *s = avctx->priv;
    AVFrame *frame = arg;
    const int depth = s->depth;
    const float max = (1 << depth) - 1;
    const float scale = 1.f / max;
    const float gc = s->lcoeffs[0];
    const float bc = s->lcoeffs[1];
    const float rc = s->lcoeffs[2];
    const int width = frame->width;
    const int height = frame->height;
    const float intensity = s->intensity;
    const float alternate = s->alternate ? 1.f : -1.f;
    const float gintensity = intensity * s->balance[0];
    const float bintensity = intensity * s->balance[1];
    const float rintensity = intensity * s->balance[2];
    const float sgintensity = alternate * FFSIGN(gintensity);
    const float sbintensity = alternate * FFSIGN(bintensity);
    const float srintensity = alternate * FFSIGN(rintensity);
    const int slice_start = (height * jobnr) / nb_jobs;
    const int slice_end = (height * (jobnr + 1)) / nb_jobs;
    const int glinesize = frame->linesize[0] / 2;
    const int blinesize = frame->linesize[1] / 2;
    const int rlinesize = frame->linesize[2] / 2;
    uint16_t *gptr = (uint16_t *)frame->data[0] + slice_start * glinesize;
    uint16_t *bptr = (uint16_t *)frame->data[1] + slice_start * blinesize;
    uint16_t *rptr = (uint16_t *)frame->data[2] + slice_start * rlinesize;

    for (int y = slice_start; y < slice_end; y++) {
        for (int x = 0; x < width; x++) {
            float g = gptr[x] * scale;
            float b = bptr[x] * scale;
            float r = rptr[x] * scale;
            float max_color = FFMAX3(r, g, b);
            float min_color = FFMIN3(r, g, b);
            float color_saturation = max_color - min_color;
            float luma = g * gc + r * rc + b * bc;
            const float cg = 1.f + gintensity * (1.f - sgintensity * color_saturation);
            const float cb = 1.f + bintensity * (1.f - sbintensity * color_saturation);
            const float cr = 1.f + rintensity * (1.f - srintensity * color_saturation);

            g = lerpf(luma, g, cg);
            b = lerpf(luma, b, cb);
            r = lerpf(luma, r, cr);

            gptr[x] = av_clip_uintp2_c(g * max, depth);
            bptr[x] = av_clip_uintp2_c(b * max, depth);
            rptr[x] = av_clip_uintp2_c(r * max, depth);
        }

        gptr += glinesize;
        bptr += blinesize;
        rptr += rlinesize;
    }

    return 0;
}
Example #2
0
static int cmp_entry(const void *pa, const void *pb)
{
    const struct find_entry *a = pa, *b = pb;
    // check "similar" filenames first
    int matchdiff = b->matchlen - a->matchlen;
    if (matchdiff)
        return FFSIGN(matchdiff);
    // check small files first
    off_t sizediff = a->size - b->size;
    if (sizediff)
        return FFSIGN(sizediff);
    return 0;
}
Example #3
0
static inline void doHorizDefFilter_C(uint8_t dst[], int stride, const PPContext *c)
{
    int y;
    for(y=0; y<BLOCK_SIZE; y++){
        const int middleEnergy= 5*(dst[4] - dst[3]) + 2*(dst[2] - dst[5]);

        if(FFABS(middleEnergy) < 8*c->QP){
            const int q=(dst[3] - dst[4])/2;
            const int leftEnergy=  5*(dst[2] - dst[1]) + 2*(dst[0] - dst[3]);
            const int rightEnergy= 5*(dst[6] - dst[5]) + 2*(dst[4] - dst[7]);

            int d= FFABS(middleEnergy) - FFMIN( FFABS(leftEnergy), FFABS(rightEnergy) );
            d= FFMAX(d, 0);

            d= (5*d + 32) >> 6;
            d*= FFSIGN(-middleEnergy);

            if(q>0)
            {
                d = FFMAX(d, 0);
                d = FFMIN(d, q);
            }
            else
            {
                d = FFMIN(d, 0);
                d = FFMAX(d, q);
            }

            dst[3]-= d;
            dst[4]+= d;
        }
        dst+= stride;
    }
}
Example #4
0
static int32_t parse_gain(const char *gain)
{
    char *fraction;
    int  scale = 10000;
    int32_t mb = 0;
    int db;

    if (!gain)
        return INT32_MIN;

    gain += strspn(gain, " \t");

    db = strtol(gain, &fraction, 0);
    if (*fraction++ == '.') {
        while (av_isdigit(*fraction) && scale) {
            mb += scale * (*fraction - '0');
            scale /= 10;
            fraction++;
        }
    }

    if (abs(db) > (INT32_MAX - mb) / 100000)
        return INT32_MIN;

    return db * 100000 + FFSIGN(db) * mb;
}
Example #5
0
File: vc1enc.c Project: Kjir/amsn
/**
 * Transform and quantize a block
 *
 * @param s Encoder Context
 * @param block block to encode
 * @param n block index
 * @param qscale quantizer scale
 * @param overflow
 *
 * @return last significant coeff in zz order
 */
int vc1_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow)
{
    VC1Context * const t= s->avctx->priv_data;
    const uint8_t *scantable;
    int q, i, j, level, last_non_zero, start_i;

    if( I_TYPE == s->pict_type ) {
        scantable = s->intra_scantable.scantable;
        last_non_zero = 0;
        start_i = 1;
    } else {
        scantable = s->inter_scantable.scantable;
        last_non_zero = -1;
        start_i = 0;
        if (s->mb_intra){
            for(i=0;i<64;i++)
                block[i] -= 128;
        }
    }

    s->dsp.vc1_fwd_trans_8x8(block);

    if (n < 4)
        q = s->y_dc_scale;
    else
        q = s->c_dc_scale;

    block[0] /= q;
    q = 2 * qscale + t->halfpq;

    for(i=63;i>=start_i;i--) {
        j = scantable[i];
        level =  (block[j] - t->pquantizer*(FFSIGN(block[j]) * qscale)) / q;
        if(level){
            last_non_zero = i;
            break;
        }
    }
    for(i=start_i; i<=last_non_zero; i++) {
        j = scantable[i];
        block[j] =  (block[j] - t->pquantizer*(FFSIGN(block[j]) * qscale)) / q ;
    }
    *overflow = 0;
    return last_non_zero;
}
Example #6
0
static int
set_station_by_step (struct pvr_t *pvr, int step, int v4lAction)
{
  if (!pvr || !pvr->stationlist.list)
    return -1;

  if (pvr->stationlist.enabled >= abs (step))
  {
    int gotcha = 0;
    int chidx = pvr->chan_idx + step;

    while (!gotcha)
    {
      chidx = (chidx + pvr->stationlist.used) % pvr->stationlist.used;

      mp_msg (MSGT_OPEN, MSGL_DBG2,
              "%s Offset switch: current %d, enabled %d, step %d -> %d\n",
              LOG_LEVEL_V4L2, pvr->chan_idx,
              pvr->stationlist.enabled, step, chidx);

      if (!pvr->stationlist.list[chidx].enabled)
      {
        mp_msg (MSGT_OPEN, MSGL_DBG2,
                "%s Switch disabled to user station channel: %8s - freq: %8d - station: %s\n", LOG_LEVEL_V4L2,
                pvr->stationlist.list[chidx].name,
                pvr->stationlist.list[chidx].freq,
                pvr->stationlist.list[chidx].station);
        chidx += FFSIGN (step);
      }
      else
        gotcha = 1;
    }

    pvr->freq = pvr->stationlist.list[chidx].freq;
    pvr->chan_idx_last = pvr->chan_idx;
    pvr->chan_idx = chidx;

    mp_msg (MSGT_OPEN, MSGL_INFO,
            "%s Switch to user station channel: %8s - freq: %8d - station: %s\n", LOG_LEVEL_V4L2,
            pvr->stationlist.list[chidx].name,
            pvr->stationlist.list[chidx].freq,
            pvr->stationlist.list[chidx].station);

    if (v4lAction)
      return set_v4l2_freq (pvr);

    return (pvr->freq > 0) ? 0 : -1;
  }

  mp_msg (MSGT_OPEN, MSGL_ERR,
          "%s Ooops couldn't set freq by channel entry step %d to current %d, enabled %d\n", LOG_LEVEL_V4L2,
          step, pvr->chan_idx, pvr->stationlist.enabled);

  return -1;
}
Example #7
0
/**
 * Experimental Filter 1 (Horizontal)
 * will not damage linear gradients
 * Flat blocks should look like they were passed through the (1,1,2,2,4,2,2,1,1) 9-Tap filter
 * can only smooth blocks at the expected locations (it cannot smooth them if they did move)
 * MMX2 version does correct clipping C version does not
 * not identical with the vertical one
 */
static inline void horizX1Filter(uint8_t *src, int stride, int QP)
{
    int y;
    static uint64_t lut[256];
    if(!lut[255])
    {
        int i;
        for(i=0; i<256; i++)
        {
            int v= i < 128 ? 2*i : 2*(i-256);
/*
//Simulate 112242211 9-Tap filter
            uint64_t a= (v/16)  & 0xFF;
            uint64_t b= (v/8)   & 0xFF;
            uint64_t c= (v/4)   & 0xFF;
            uint64_t d= (3*v/8) & 0xFF;
*/
//Simulate piecewise linear interpolation
            uint64_t a= (v/16)   & 0xFF;
            uint64_t b= (v*3/16) & 0xFF;
            uint64_t c= (v*5/16) & 0xFF;
            uint64_t d= (7*v/16) & 0xFF;
            uint64_t A= (0x100 - a)&0xFF;
            uint64_t B= (0x100 - b)&0xFF;
            uint64_t C= (0x100 - c)&0xFF;
            uint64_t D= (0x100 - c)&0xFF;

            lut[i]   = (a<<56) | (b<<48) | (c<<40) | (d<<32) |
                       (D<<24) | (C<<16) | (B<<8)  | (A);
            //lut[i] = (v<<32) | (v<<24);
        }
    }

    for(y=0; y<BLOCK_SIZE; y++){
        int a= src[1] - src[2];
        int b= src[3] - src[4];
        int c= src[5] - src[6];

        int d= FFMAX(FFABS(b) - (FFABS(a) + FFABS(c))/2, 0);

        if(d < QP){
            int v = d * FFSIGN(-b);

            src[1] +=v/8;
            src[2] +=v/4;
            src[3] +=3*v/8;
            src[4] -=3*v/8;
            src[5] -=v/4;
            src[6] -=v/8;
        }
        src+=stride;
    }
}
Example #8
0
File: vc1enc.c Project: Kjir/amsn
/**
 * Unquantize a block
 *
 * @param s Encoder context
 * @param block Block to quantize
 * @param n index of block
 * @param qscale quantizer scale
 */
void vc1_unquantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale)
{
    VC1Context * const t= s->avctx->priv_data;
    int i, level, nCoeffs, q;
    ScanTable scantable;

    if(s->pict_type == I_TYPE)
        scantable = s->intra_scantable;
    else {
        scantable = s->inter_scantable;
        if( P_TYPE == s->pict_type )
            for(i=0;i<64;i++)
                block[i] += 128;
    }

    nCoeffs= s->block_last_index[n];

    if (n < 4)
        block[0] *= s->y_dc_scale;
    else
        block[0] *= s->c_dc_scale;

    q = 2 * qscale + t->halfpq;

    for(i=1; i<= nCoeffs; i++) {
        int j= scantable.permutated[i];
        level = block[j];
        if (level) {
            level = level * q + t->pquantizer*(FFSIGN(block[j]) * qscale);
        }
        block[j]=level;
    }

    for(; i< 64; i++) {
        int j= scantable.permutated[i];
        block[j]=0;
    }

}
Example #9
0
/* TODO: merge with VLC tables or use LUT */
static inline int dequant_and_decompand(int level, int quantisation)
{
    int64_t abslevel = abs(level);
    return (abslevel + ((768 * abslevel * abslevel * abslevel) / (255 * 255 * 255))) * FFSIGN(level) * quantisation;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    AudioVectorScopeContext *s = ctx->priv;
    const int hw = s->hw;
    const int hh = s->hh;
    unsigned x, y;
    unsigned prev_x = s->prev_x, prev_y = s->prev_y;
    const double zoom = s->zoom;
    int i;

    if (!s->outpicref || s->outpicref->width  != outlink->w ||
                         s->outpicref->height != outlink->h) {
        av_frame_free(&s->outpicref);
        s->outpicref = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!s->outpicref) {
            av_frame_free(&insamples);
            return AVERROR(ENOMEM);
        }

        for (i = 0; i < outlink->h; i++)
            memset(s->outpicref->data[0] + i * s->outpicref->linesize[0], 0, outlink->w * 4);
    }
    s->outpicref->pts = insamples->pts;

    fade(s);

    switch (insamples->format) {
    case AV_SAMPLE_FMT_S16:
        for (i = 0; i < insamples->nb_samples; i++) {
            int16_t *src = (int16_t *)insamples->data[0] + i * 2;

            if (s->mode == LISSAJOUS) {
                x = ((src[1] - src[0]) * zoom / (float)(UINT16_MAX) + 1) * hw;
                y = (1.0 - (src[0] + src[1]) * zoom / (float)UINT16_MAX) * hh;
            } else if (s->mode == LISSAJOUS_XY) {
                x = (src[1] * zoom / (float)INT16_MAX + 1) * hw;
                y = (src[0] * zoom / (float)INT16_MAX + 1) * hh;
            } else {
                float sx, sy, cx, cy;

                sx = src[1] * zoom / (float)INT16_MAX;
                sy = src[0] * zoom / (float)INT16_MAX;
                cx = sx * sqrtf(1 - 0.5*sy*sy);
                cy = sy * sqrtf(1 - 0.5*sx*sx);
                x = hw + hw * FFSIGN(cx + cy) * (cx - cy) * .7;
                y = s->h - s->h * fabsf(cx + cy) * .7;
            }

            if (s->draw == DOT) {
                draw_dot(s, x, y);
            } else {
                draw_line(s, x, y, prev_x, prev_y);
            }
            prev_x = x;
            prev_y = y;
        }
        break;
    case AV_SAMPLE_FMT_FLT:
        for (i = 0; i < insamples->nb_samples; i++) {
            float *src = (float *)insamples->data[0] + i * 2;

            if (s->mode == LISSAJOUS) {
                x = ((src[1] - src[0]) * zoom / 2 + 1) * hw;
                y = (1.0 - (src[0] + src[1]) * zoom / 2) * hh;
            } else if (s->mode == LISSAJOUS_XY){
                x = (src[1] * zoom + 1) * hw;
                y = (src[0] * zoom + 1) * hh;
            } else {
                float sx, sy, cx, cy;

                sx = src[1] * zoom;
                sy = src[0] * zoom;
                cx = sx * sqrtf(1 - 0.5 * sy * sy);
                cy = sy * sqrtf(1 - 0.5 * sx * sx);
                x = hw + hw * FFSIGN(cx + cy) * (cx - cy) * .7;
                y = s->h - s->h * fabsf(cx + cy) * .7;
            }

            if (s->draw == DOT) {
                draw_dot(s, x, y);
            } else {
                draw_line(s, x, y, prev_x, prev_y);
            }
            prev_x = x;
            prev_y = y;
        }
        break;
    }

    s->prev_x = x, s->prev_y = y;
    av_frame_free(&insamples);

    return ff_filter_frame(outlink, av_frame_clone(s->outpicref));
}