コード例 #1
0
ファイル: vf_dctdnoiz.c プロジェクト: markjreed/vice-emu
static av_always_inline void color_correlation(uint8_t *dst, int dst_linesize,
                                               float **src, int src_linesize,
                                               int w, int h,
                                               int r, int g, int b)
{
    int x, y;
    const float *src_r = src[0];
    const float *src_g = src[1];
    const float *src_b = src[2];

    for (y = 0; y < h; y++) {
        uint8_t *dstp = dst;

        for (x = 0; x < w; x++) {
            dstp[r] = av_clip_uint8(src_r[x] * DCT3X3_0_0 + src_g[x] * DCT3X3_1_0 + src_b[x] * DCT3X3_2_0);
            dstp[g] = av_clip_uint8(src_r[x] * DCT3X3_0_1 +                         src_b[x] * DCT3X3_2_1);
            dstp[b] = av_clip_uint8(src_r[x] * DCT3X3_0_2 + src_g[x] * DCT3X3_1_2 + src_b[x] * DCT3X3_2_2);
            dstp += 3;
        }
        dst += dst_linesize;
        src_r += src_linesize;
        src_g += src_linesize;
        src_b += src_linesize;
    }
}
コード例 #2
0
ファイル: vorbis.c プロジェクト: Samangan/mpc-hc
static void render_line(int x0, int y0, int x1, int y1, float *buf)
{
    int dy  = y1 - y0;
    int adx = x1 - x0;
    int ady = FFABS(dy);
    int sy  = dy < 0 ? -1 : 1;
    buf[x0] = ff_vorbis_floor1_inverse_db_table[av_clip_uint8(y0)];
    if (ady*2 <= adx) { // optimized common case
        render_line_unrolled(x0, y0, x1, sy, ady, adx, buf);
    } else {
        int base  = dy / adx;
        int x     = x0;
        int y     = y0;
        int err   = -adx;
        ady -= FFABS(base) * adx;
        while (++x < x1) {
            y += base;
            err += ady;
            if (err >= 0) {
                err -= adx;
                y   += sy;
            }
            buf[x] = ff_vorbis_floor1_inverse_db_table[av_clip_uint8(y)];
        }
    }
}
コード例 #3
0
/** Function used to do motion compensation with bicubic interpolation
 */
static void vc1_mspel_mc(uint8_t *dst, const uint8_t *src, int stride, int hmode, int vmode, int rnd)
{
    int i, j;
    uint8_t tmp[8*11], *tptr;
    int r;

    r = rnd;
    src -= stride;
    tptr = tmp;
    for(j = 0; j < 11; j++) {
        for(i = 0; i < 8; i++)
            tptr[i] = av_clip_uint8(vc1_mspel_filter(src + i, 1, hmode, r));
        src += stride;
        tptr += 8;
    }
    r = 1 - rnd;

    tptr = tmp + 8;
    for(j = 0; j < 8; j++) {
        for(i = 0; i < 8; i++)
            dst[i] = av_clip_uint8(vc1_mspel_filter(tptr + i, 8, vmode, r));
        dst += stride;
        tptr += 8;
    }
}
コード例 #4
0
ファイル: vc1dsp.c プロジェクト: hajuuk/R7000
/**
 * VC-1 in-loop deblocking filter for one line
 * @param src source block type
 * @param stride block stride
 * @param pq block quantizer
 * @return whether other 3 pairs should be filtered or not
 * @see 8.6
 */
static av_always_inline int vc1_filter_line(uint8_t* src, int stride, int pq) {
    int a0 = (2*(src[-2*stride] - src[ 1*stride]) - 5*(src[-1*stride] - src[ 0*stride]) + 4) >> 3;
    int a0_sign = a0 >> 31;        /* Store sign */
    a0 = (a0 ^ a0_sign) - a0_sign; /* a0 = FFABS(a0); */
    if(a0 < pq) {
        int a1 = FFABS((2*(src[-4*stride] - src[-1*stride]) - 5*(src[-3*stride] - src[-2*stride]) + 4) >> 3);
        int a2 = FFABS((2*(src[ 0*stride] - src[ 3*stride]) - 5*(src[ 1*stride] - src[ 2*stride]) + 4) >> 3);
        if(a1 < a0 || a2 < a0) {
            int clip = src[-1*stride] - src[ 0*stride];
            int clip_sign = clip >> 31;
            clip = ((clip ^ clip_sign) - clip_sign)>>1;
            if(clip) {
                int a3 = FFMIN(a1, a2);
                int d = 5 * (a3 - a0);
                int d_sign = (d >> 31);
                d = ((d ^ d_sign) - d_sign) >> 3;
                d_sign ^= a0_sign;

                if( d_sign ^ clip_sign )
                    d = 0;
                else {
                    d = FFMIN(d, clip);
                    d = (d ^ d_sign) - d_sign;          /* Restore sign */
                    src[-1*stride] = av_clip_uint8(src[-1*stride] - d);
                    src[ 0*stride] = av_clip_uint8(src[ 0*stride] + d);
                }
                return 1;
            }
        }
コード例 #5
0
int av_audio_convert(AVAudioConvert *ctx,
                           void * const out[6], const int out_stride[6],
                     const void * const  in[6], const int  in_stride[6], int len)
{
    int ch;

    //FIXME optimize common cases

    for(ch=0; ch<ctx->out_channels; ch++){
        const int is=  in_stride[ch];
        const int os= out_stride[ch];
        const uint8_t *pi=  in[ch];
        uint8_t *po= out[ch];
        uint8_t *end= po + os*len;
        if(!out[ch])
            continue;

#define CONV(ofmt, otype, ifmt, expr)\
if(ctx->fmt_pair == ofmt + AV_SAMPLE_FMT_NB*ifmt){\
    do{\
        *(otype*)po = expr; pi += is; po += os;\
    }while(po < end);\
}

//FIXME put things below under ifdefs so we do not waste space for cases no codec will need
//FIXME rounding ?

             CONV(AV_SAMPLE_FMT_U8 , uint8_t, AV_SAMPLE_FMT_U8 ,  *(const uint8_t*)pi)
        else CONV(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_U8 , (*(const uint8_t*)pi - 0x80)<<8)
        else CONV(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_U8 , (*(const uint8_t*)pi - 0x80)<<24)
        else CONV(AV_SAMPLE_FMT_FLT, float  , AV_SAMPLE_FMT_U8 , (*(const uint8_t*)pi - 0x80)*(1.0 / (1<<7)))
        else CONV(AV_SAMPLE_FMT_DBL, double , AV_SAMPLE_FMT_U8 , (*(const uint8_t*)pi - 0x80)*(1.0 / (1<<7)))
        else CONV(AV_SAMPLE_FMT_U8 , uint8_t, AV_SAMPLE_FMT_S16, (*(const int16_t*)pi>>8) + 0x80)
        else CONV(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_S16,  *(const int16_t*)pi)
        else CONV(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16,  *(const int16_t*)pi<<16)
        else CONV(AV_SAMPLE_FMT_FLT, float  , AV_SAMPLE_FMT_S16,  *(const int16_t*)pi*(1.0 / (1<<15)))
        else CONV(AV_SAMPLE_FMT_DBL, double , AV_SAMPLE_FMT_S16,  *(const int16_t*)pi*(1.0 / (1<<15)))
        else CONV(AV_SAMPLE_FMT_U8 , uint8_t, AV_SAMPLE_FMT_S32, (*(const int32_t*)pi>>24) + 0x80)
        else CONV(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_S32,  *(const int32_t*)pi>>16)
        else CONV(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S32,  *(const int32_t*)pi)
        else CONV(AV_SAMPLE_FMT_FLT, float  , AV_SAMPLE_FMT_S32,  *(const int32_t*)pi*(1.0 / (1U<<31)))
        else CONV(AV_SAMPLE_FMT_DBL, double , AV_SAMPLE_FMT_S32,  *(const int32_t*)pi*(1.0 / (1U<<31)))
        else CONV(AV_SAMPLE_FMT_U8 , uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(  lrintf(*(const float*)pi * (1<<7)) + 0x80))
        else CONV(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(  lrintf(*(const float*)pi * (1<<15))))
        else CONV(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float*)pi * (1U<<31))))
        else CONV(AV_SAMPLE_FMT_FLT, float  , AV_SAMPLE_FMT_FLT, *(const float*)pi)
        else CONV(AV_SAMPLE_FMT_DBL, double , AV_SAMPLE_FMT_FLT, *(const float*)pi)
        else CONV(AV_SAMPLE_FMT_U8 , uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(  lrint(*(const double*)pi * (1<<7)) + 0x80))
        else CONV(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(  lrint(*(const double*)pi * (1<<15))))
        else CONV(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double*)pi * (1U<<31))))
        else CONV(AV_SAMPLE_FMT_FLT, float  , AV_SAMPLE_FMT_DBL, *(const double*)pi)
        else CONV(AV_SAMPLE_FMT_DBL, double , AV_SAMPLE_FMT_DBL, *(const double*)pi)
        else return -1;
    }
    return 0;
}
コード例 #6
0
ファイル: vf_vibrance.c プロジェクト: FFmpeg/FFmpeg
static int vibrance_slice8(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs)
{
    VibranceContext *s = avctx->priv;
    AVFrame *frame = arg;
    const int width = frame->width;
    const int height = frame->height;
    const float scale = 1.f / 255.f;
    const float gc = s->lcoeffs[0];
    const float bc = s->lcoeffs[1];
    const float rc = s->lcoeffs[2];
    const float intensity = s->intensity;
    const float alternate = s->alternate ? 1.f : -1.f;
    const float gintensity = intensity * s->balance[0];
    const float bintensity = intensity * s->balance[1];
    const float rintensity = intensity * s->balance[2];
    const float sgintensity = alternate * FFSIGN(gintensity);
    const float sbintensity = alternate * FFSIGN(bintensity);
    const float srintensity = alternate * FFSIGN(rintensity);
    const int slice_start = (height * jobnr) / nb_jobs;
    const int slice_end = (height * (jobnr + 1)) / nb_jobs;
    const int glinesize = frame->linesize[0];
    const int blinesize = frame->linesize[1];
    const int rlinesize = frame->linesize[2];
    uint8_t *gptr = frame->data[0] + slice_start * glinesize;
    uint8_t *bptr = frame->data[1] + slice_start * blinesize;
    uint8_t *rptr = frame->data[2] + slice_start * rlinesize;

    for (int y = slice_start; y < slice_end; y++) {
        for (int x = 0; x < width; x++) {
            float g = gptr[x] * scale;
            float b = bptr[x] * scale;
            float r = rptr[x] * scale;
            float max_color = FFMAX3(r, g, b);
            float min_color = FFMIN3(r, g, b);
            float color_saturation = max_color - min_color;
            float luma = g * gc + r * rc + b * bc;
            const float cg = 1.f + gintensity * (1.f - sgintensity * color_saturation);
            const float cb = 1.f + bintensity * (1.f - sbintensity * color_saturation);
            const float cr = 1.f + rintensity * (1.f - srintensity * color_saturation);

            g = lerpf(luma, g, cg);
            b = lerpf(luma, b, cb);
            r = lerpf(luma, r, cr);

            gptr[x] = av_clip_uint8(g * 255.f);
            bptr[x] = av_clip_uint8(b * 255.f);
            rptr[x] = av_clip_uint8(r * 255.f);
        }

        gptr += glinesize;
        bptr += blinesize;
        rptr += rlinesize;
    }

    return 0;
}
コード例 #7
0
ファイル: vp56.c プロジェクト: Fluffiest/splayer
static void vp56_edge_filter(VP56Context *s, uint8_t *yuv,
                             int pix_inc, int line_inc, int t)
{
    int pix2_inc = 2 * pix_inc;
    int i, v;

    for (i=0; i<12; i++) {
        v = (yuv[-pix2_inc] + 3*(yuv[0]-yuv[-pix_inc]) - yuv[pix_inc] + 4) >>3;
        v = s->adjust(v, t);
        yuv[-pix_inc] = av_clip_uint8(yuv[-pix_inc] + v);
        yuv[0] = av_clip_uint8(yuv[0] - v);
        yuv += line_inc;
    }
}
コード例 #8
0
ファイル: diracdsp.c プロジェクト: mingfeisun/FFmpeg
static void put_signed_rect_clamped_8bit_c(uint8_t *dst, int dst_stride, const uint8_t *_src, int src_stride, int width, int height)
{
    int x, y;
    int16_t *src = (int16_t *)_src;
    for (y = 0; y < height; y++) {
        for (x = 0; x < width; x+=4) {
            dst[x  ] = av_clip_uint8(src[x  ] + 128);
            dst[x+1] = av_clip_uint8(src[x+1] + 128);
            dst[x+2] = av_clip_uint8(src[x+2] + 128);
            dst[x+3] = av_clip_uint8(src[x+3] + 128);
        }
        dst += dst_stride;
        src += src_stride >> 1;
    }
}
コード例 #9
0
ファイル: mss34dsp.c プロジェクト: Vadiza/sage-3.5b
void ff_mss34_dct_put(uint8_t *dst, int stride, int *block)
{
  int i, j;
  int *ptr;

  ptr = block;
  for (i = 0; i < 8; i++) {
    DCT_TEMPLATE(ptr, 1, SOP_ROW, 13);
    ptr += 8;
  }

  ptr = block;
  for (i = 0; i < 8; i++) {
    DCT_TEMPLATE(ptr, 8, SOP_COL, 22);
    ptr++;
  }

  ptr = block;
  for (j = 0; j < 8; j++) {
    for (i = 0; i < 8; i++)
      dst[i] = av_clip_uint8(ptr[i] + 128);
    dst += stride;
    ptr += 8;
  }
}
コード例 #10
0
ファイル: vf_midequalizer.c プロジェクト: DeHackEd/FFmpeg
static void midequalizer8(const uint8_t *in0, const uint8_t *in1,
                          uint8_t *dst,
                          ptrdiff_t linesize1, ptrdiff_t linesize2,
                          ptrdiff_t dlinesize,
                          int w0, int h0,
                          int w1, int h1,
                          float *histogram1, float *histogram2,
                          unsigned *cchange,
                          size_t hsize)
{
    int x, y;

    compute_histogram8(in0, linesize1, w0, h0, histogram1, hsize);
    compute_histogram8(in1, linesize2, w1, h1, histogram2, hsize);

    compute_contrast_change(histogram1, histogram2, cchange, hsize);

    for (y = 0; y < h0; y++) {
        for (x = 0; x < w0; x++) {
            dst[x] = av_clip_uint8(cchange[in0[x]]);
        }
        dst += dlinesize;
        in0 += linesize1;
    }
}
コード例 #11
0
static void idct(uint8_t *dst, int dst_linesize, int src[64])
{
    int i, j, k;
    double tmp[64];

    for (i = 0; i < 8; i++) {
        for (j = 0; j < 8; j++) {
            double sum = 0.0;

            for (k = 0; k < 8; k++)
                sum += c[k*8+j] * src[8*i+k];

            tmp[8*i+j] = sum;
        }
    }

    for (j = 0; j < 8; j++) {
        for (i = 0; i < 8; i++) {
            double sum = 0.0;

            for (k = 0; k < 8; k++)
                sum += c[k*8+i]*tmp[8*k+j];

            dst[dst_linesize*i + j] = av_clip_uint8((int)floor(sum+0.5));
        }
    }
}
コード例 #12
0
ファイル: eamad.c プロジェクト: 309746069/FFmpeg
static inline void comp(unsigned char *dst, int dst_stride,
                        unsigned char *src, int src_stride, int add)
{
    int j, i;
    for (j=0; j<8; j++)
        for (i=0; i<8; i++)
            dst[j*dst_stride + i] = av_clip_uint8(src[j*src_stride + i] + add);
}
コード例 #13
0
static void filter_mb_mbaff_edgev( H264Context *h, uint8_t *pix, int stride, int16_t bS[4], int bsi, int qp ) {
    int i;
    const int bit_depth = h->sps.bit_depth_luma;
    const int qp_bd_offset = 6*(bit_depth-8);
    int index_a = qp - qp_bd_offset + h->slice_alpha_c0_offset;
    int alpha = alpha_table[index_a] << (bit_depth-8);
    int beta  = beta_table[qp - qp_bd_offset + h->slice_beta_offset] << (bit_depth-8);
    for( i = 0; i < 8; i++, pix += stride) {
        const int bS_index = (i >> 1) * bsi;

        if( bS[bS_index] == 0 ) {
            continue;
        }

        if( bS[bS_index] < 4 ) {
            const int tc0 = tc0_table[index_a][bS[bS_index]] << (bit_depth-8);
            const int p0 = pix[-1];
            const int p1 = pix[-2];
            const int p2 = pix[-3];
            const int q0 = pix[0];
            const int q1 = pix[1];
            const int q2 = pix[2];

            if( FFABS( p0 - q0 ) < alpha &&
                FFABS( p1 - p0 ) < beta &&
                FFABS( q1 - q0 ) < beta ) {
                int tc = tc0;
                int i_delta;

                if( FFABS( p2 - p0 ) < beta ) {
                    if(tc0)
                    pix[-2] = p1 + av_clip( ( p2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( p1 << 1 ) ) >> 1, -tc0, tc0 );
                    tc++;
                }
                if( FFABS( q2 - q0 ) < beta ) {
                    if(tc0)
                    pix[1] = q1 + av_clip( ( q2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( q1 << 1 ) ) >> 1, -tc0, tc0 );
                    tc++;
                }

                i_delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
                pix[-1] = av_clip_uint8( p0 + i_delta );    /* p0' */
                pix[0]  = av_clip_uint8( q0 - i_delta );    /* q0' */
                tprintf(h->s.avctx, "filter_mb_mbaff_edgev i:%d, qp:%d, indexA:%d, alpha:%d, beta:%d, tc:%d\n# bS:%d -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x]\n", i, qp[qp_index], index_a, alpha, beta, tc, bS[bS_index], pix[-3], p1, p0, q0, q1, pix[2], p1, pix[-1], pix[0], q1);
            }
        }else{
コード例 #14
0
ファイル: ivi_dsp.c プロジェクト: 0day-ci/FFmpeg
void ff_ivi_recompose_haar(const IVIPlaneDesc *plane, uint8_t *dst,
                           const int dst_pitch)
{
    int             x, y, indx, b0, b1, b2, b3, p0, p1, p2, p3;
    const short     *b0_ptr, *b1_ptr, *b2_ptr, *b3_ptr;
    int32_t         pitch;

    /* all bands should have the same pitch */
    pitch = plane->bands[0].pitch;

    /* get pointers to the wavelet bands */
    b0_ptr = plane->bands[0].buf;
    b1_ptr = plane->bands[1].buf;
    b2_ptr = plane->bands[2].buf;
    b3_ptr = plane->bands[3].buf;

    for (y = 0; y < plane->height; y += 2) {
        for (x = 0, indx = 0; x < plane->width; x += 2, indx++) {
            /* load coefficients */
            b0 = b0_ptr[indx]; //should be: b0 = (num_bands > 0) ? b0_ptr[indx] : 0;
            b1 = b1_ptr[indx]; //should be: b1 = (num_bands > 1) ? b1_ptr[indx] : 0;
            b2 = b2_ptr[indx]; //should be: b2 = (num_bands > 2) ? b2_ptr[indx] : 0;
            b3 = b3_ptr[indx]; //should be: b3 = (num_bands > 3) ? b3_ptr[indx] : 0;

            /* haar wavelet recomposition */
            p0 = (b0 + b1 + b2 + b3 + 2) >> 2;
            p1 = (b0 + b1 - b2 - b3 + 2) >> 2;
            p2 = (b0 - b1 + b2 - b3 + 2) >> 2;
            p3 = (b0 - b1 - b2 + b3 + 2) >> 2;

            /* bias, convert and output four pixels */
            dst[x]                 = av_clip_uint8(p0 + 128);
            dst[x + 1]             = av_clip_uint8(p1 + 128);
            dst[dst_pitch + x]     = av_clip_uint8(p2 + 128);
            dst[dst_pitch + x + 1] = av_clip_uint8(p3 + 128);
        }// for x

        dst += dst_pitch << 1;

        b0_ptr += pitch;
        b1_ptr += pitch;
        b2_ptr += pitch;
        b3_ptr += pitch;
    }// for y
}
コード例 #15
0
ファイル: swscale_vsx.c プロジェクト: lihp1603/ffmpeg
static void yuv2plane1_8_u(const int16_t *src, uint8_t *dest, int dstW,
                           const uint8_t *dither, int offset, int start)
{
    int i;
    for (i = start; i < dstW; i++) {
        int val = (src[i] + dither[(i + offset) & 7]) >> 7;
        dest[i] = av_clip_uint8(val);
    }
}
コード例 #16
0
ファイル: vf_convolution.c プロジェクト: 0day-ci/FFmpeg
static void filter_5x5(ConvolutionContext *s, AVFrame *in, AVFrame *out, int plane)
{
    const uint8_t *src = in->data[plane];
    uint8_t *dst = out->data[plane];
    const int stride = in->linesize[plane];
    const int bstride = s->bstride;
    const int height = s->planeheight[plane];
    const int width  = s->planewidth[plane];
    uint8_t *p0 = s->buffer + 16;
    uint8_t *p1 = p0 + bstride;
    uint8_t *p2 = p1 + bstride;
    uint8_t *p3 = p2 + bstride;
    uint8_t *p4 = p3 + bstride;
    uint8_t *orig = p0, *end = p4;
    const int *matrix = s->matrix[plane];
    float rdiv = s->rdiv[plane];
    float bias = s->bias[plane];
    int y, x, i;

    line_copy8(p0, src + 2 * stride, width, 2);
    line_copy8(p1, src + stride, width, 2);
    line_copy8(p2, src, width, 2);
    src += stride;
    line_copy8(p3, src, width, 2);


    for (y = 0; y < height; y++) {
        uint8_t *array[] = {
            p0 - 2, p0 - 1, p0, p0 + 1, p0 + 2,
            p1 - 2, p1 - 1, p1, p1 + 1, p1 + 2,
            p2 - 2, p2 - 1, p2, p2 + 1, p2 + 2,
            p3 - 2, p3 - 1, p3, p3 + 1, p3 + 2,
            p4 - 2, p4 - 1, p4, p4 + 1, p4 + 2
        };

        src += stride * (y < height - 2 ? 1 : -1);
        line_copy8(p4, src, width, 2);

        for (x = 0; x < width; x++) {
            int sum = 0;

            for (i = 0; i < 25; i++) {
                sum += *(array[i] + x) * matrix[i];
            }
            sum = (int)(sum * rdiv + bias + 0.5f);
            dst[x] = av_clip_uint8(sum);
        }

        p0 = p1;
        p1 = p2;
        p2 = p3;
        p3 = p4;
        p4 = (p4 == end) ? orig: p4 + bstride;
        dst += out->linesize[plane];
    }
}
コード例 #17
0
ファイル: avwrapper.c プロジェクト: TriptychCrypto/hw
AVWRAP_DECL int AVWrapper_WriteFrame(uint8_t *buf)
{
    int x, y, stride = g_Width * 4;
    uint8_t *data[3];

    // copy pointers, prepare source
    memcpy(data, g_pVFrame->data, sizeof(data));
    buf += (g_Height - 1) * stride;

    // convert to YUV 4:2:0
    for (y = 0; y < g_Height; y++) {
        for (x = 0; x < g_Width; x++) {
            int r = buf[x * 4 + 0];
            int g = buf[x * 4 + 1];
            int b = buf[x * 4 + 2];

            int luma = (int)(0.299f * r +  0.587f * g + 0.114f * b);
            data[0][x] = av_clip_uint8(luma);

            if (!(x & 1) && !(y & 1)) {
                int r = (buf[x * 4 + 0]          + buf[(x + 1) * 4 + 0] +
                         buf[x * 4 + 0 + stride] + buf[(x + 1) * 4 + 0 + stride]) / 4;
                int g = (buf[x * 4 + 1]          + buf[(x + 1) * 4 + 1] +
                         buf[x * 4 + 1 + stride] + buf[(x + 1) * 4 + 1 + stride]) / 4;
                int b = (buf[x * 4 + 2]          + buf[(x + 1) * 4 + 2] +
                         buf[x * 4 + 2 + stride] + buf[(x + 1) * 4 + 2 + stride]) / 4;

                int cr = (int)(-0.14713f * r - 0.28886f * g + 0.436f   * b);
                int cb = (int)( 0.615f   * r - 0.51499f * g - 0.10001f * b);
                data[1][x / 2] = av_clip_uint8(128 + cr);
                data[2][x / 2] = av_clip_uint8(128 + cb);
            }
        }
        buf += -stride;
        data[0] += g_pVFrame->linesize[0];
        if (y & 1) {
            data[1] += g_pVFrame->linesize[1];
            data[2] += g_pVFrame->linesize[2];
        }
    }

    return WriteFrame(g_pVFrame);
}
コード例 #18
0
ファイル: vf_convolution.c プロジェクト: LongChair/FFmpeg
static int filter_sobel(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
{
    ConvolutionContext *s = ctx->priv;
    ThreadData *td = arg;
    AVFrame *in = td->in;
    AVFrame *out = td->out;
    const int plane = td->plane;
    const int stride = in->linesize[plane];
    const int bstride = s->bstride;
    const int height = s->planeheight[plane];
    const int width  = s->planewidth[plane];
    const int slice_start = (height * jobnr) / nb_jobs;
    const int slice_end = (height * (jobnr+1)) / nb_jobs;
    const uint8_t *src = in->data[plane] + slice_start * stride;
    uint8_t *dst = out->data[plane] + slice_start * out->linesize[plane];
    const float scale = s->scale;
    const float delta = s->delta;
    uint8_t *p0 = s->bptrs[jobnr] + 16;
    uint8_t *p1 = p0 + bstride;
    uint8_t *p2 = p1 + bstride;
    uint8_t *orig = p0, *end = p2;
    int y, x;

    line_copy8(p0, src + stride * (slice_start == 0 ? 1 : -1), width, 1);
    line_copy8(p1, src, width, 1);

    for (y = slice_start; y < slice_end; y++) {
        src += stride * (y < height - 1 ? 1 : -1);
        line_copy8(p2, src, width, 1);

        for (x = 0; x < width; x++) {
            int suma = p0[x - 1] * -1 +
                       p0[x] *     -2 +
                       p0[x + 1] * -1 +
                       p2[x - 1] *  1 +
                       p2[x] *      2 +
                       p2[x + 1] *  1;
            int sumb = p0[x - 1] * -1 +
                       p0[x + 1] *  1 +
                       p1[x - 1] * -2 +
                       p1[x + 1] *  2 +
                       p2[x - 1] * -1 +
                       p2[x + 1] *  1;

            dst[x] = av_clip_uint8(sqrt(suma*suma + sumb*sumb) * scale + delta);
        }

        p0 = p1;
        p1 = p2;
        p2 = (p2 == end) ? orig: p2 + bstride;
        dst += out->linesize[plane];
    }

    return 0;
}
コード例 #19
0
ファイル: diracdsp.c プロジェクト: mingfeisun/FFmpeg
static void dirac_hpel_filter(uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, const uint8_t *src,
                              int stride, int width, int height)
{
    int x, y;

    for (y = 0; y < height; y++) {
        for (x = -3; x < width+5; x++)
            dstv[x] = av_clip_uint8(FILTER(src+x, stride));

        for (x = 0; x < width; x++)
            dstc[x] = av_clip_uint8(FILTER(dstv+x, 1));

        for (x = 0; x < width; x++)
            dsth[x] = av_clip_uint8(FILTER(src+x, 1));

        src  += stride;
        dsth += stride;
        dstv += stride;
        dstc += stride;
    }
}
コード例 #20
0
ファイル: vf_convolution.c プロジェクト: LongChair/FFmpeg
static int filter_3x3(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
{
    ConvolutionContext *s = ctx->priv;
    ThreadData *td = arg;
    AVFrame *in = td->in;
    AVFrame *out = td->out;
    const int plane = td->plane;
    const int stride = in->linesize[plane];
    const int bstride = s->bstride;
    const int height = s->planeheight[plane];
    const int width  = s->planewidth[plane];
    const int slice_start = (height * jobnr) / nb_jobs;
    const int slice_end = (height * (jobnr+1)) / nb_jobs;
    const uint8_t *src = in->data[plane] + slice_start * stride;
    uint8_t *dst = out->data[plane] + slice_start * out->linesize[plane];
    uint8_t *p0 = s->bptrs[jobnr] + 16;
    uint8_t *p1 = p0 + bstride;
    uint8_t *p2 = p1 + bstride;
    uint8_t *orig = p0, *end = p2;
    const int *matrix = s->matrix[plane];
    const float rdiv = s->rdiv[plane];
    const float bias = s->bias[plane];
    int y, x;

    line_copy8(p0, src + stride * (slice_start == 0 ? 1 : -1), width, 1);
    line_copy8(p1, src, width, 1);

    for (y = slice_start; y < slice_end; y++) {
        src += stride * (y < height - 1 ? 1 : -1);
        line_copy8(p2, src, width, 1);

        for (x = 0; x < width; x++) {
            int sum = p0[x - 1] * matrix[0] +
                      p0[x] *     matrix[1] +
                      p0[x + 1] * matrix[2] +
                      p1[x - 1] * matrix[3] +
                      p1[x] *     matrix[4] +
                      p1[x + 1] * matrix[5] +
                      p2[x - 1] * matrix[6] +
                      p2[x] *     matrix[7] +
                      p2[x + 1] * matrix[8];
            sum = (int)(sum * rdiv + bias + 0.5f);
            dst[x] = av_clip_uint8(sum);
        }

        p0 = p1;
        p1 = p2;
        p2 = (p2 == end) ? orig: p2 + bstride;
        dst += out->linesize[plane];
    }

    return 0;
}
コード例 #21
0
ファイル: vc1dsp.c プロジェクト: hajuuk/R7000
/** Apply overlap transform to vertical edge
*/
static void vc1_h_overlap_c(uint8_t* src, int stride)
{
    int i;
    int a, b, c, d;
    int d1, d2;
    int rnd = 1;
    for(i = 0; i < 8; i++) {
        a = src[-2];
        b = src[-1];
        c = src[0];
        d = src[1];
        d1 = (a - d + 3 + rnd) >> 3;
        d2 = (a - d + b - c + 4 - rnd) >> 3;

        src[-2] = a - d1;
        src[-1] = av_clip_uint8(b - d2);
        src[0] = av_clip_uint8(c + d2);
        src[1] = d + d1;
        src += stride;
        rnd = !rnd;
    }
}
コード例 #22
0
ファイル: vorbis.c プロジェクト: Samangan/mpc-hc
static inline void render_line_unrolled(intptr_t x, int y, int x1,
                                        intptr_t sy, int ady, int adx,
                                        float *buf)
{
    int err = -adx;
    x -= x1 - 1;
    buf += x1 - 1;
    while (++x < 0) {
        err += ady;
        if (err >= 0) {
            err += ady - adx;
            y   += sy;
            buf[x++] = ff_vorbis_floor1_inverse_db_table[av_clip_uint8(y)];
        }
        buf[x] = ff_vorbis_floor1_inverse_db_table[av_clip_uint8(y)];
    }
    if (x <= 0) {
        if (err + ady >= 0)
            y += sy;
        buf[x] = ff_vorbis_floor1_inverse_db_table[av_clip_uint8(y)];
    }
}
コード例 #23
0
ファイル: swresample-test.c プロジェクト: 309746069/FFmpeg
static void  set(uint8_t *a[], int ch, int index, int ch_count, enum AVSampleFormat f, double v){
    uint8_t *p;
    if(av_sample_fmt_is_planar(f)){
        f= av_get_alt_sample_fmt(f, 0);
        p= a[ch];
    }else{
        p= a[0];
        index= ch + index*ch_count;
    }
    switch(f){
    case AV_SAMPLE_FMT_U8 : ((uint8_t*)p)[index]= av_clip_uint8 (lrint((v+1.0)*127));     break;
    case AV_SAMPLE_FMT_S16: ((int16_t*)p)[index]= av_clip_int16 (lrint(v*32767));         break;
    case AV_SAMPLE_FMT_S32: ((int32_t*)p)[index]= av_clipl_int32(llrint(v*2147483647));   break;
    case AV_SAMPLE_FMT_FLT: ((float  *)p)[index]= v;                                      break;
    case AV_SAMPLE_FMT_DBL: ((double *)p)[index]= v;                                      break;
    default: av_assert2(0);
    }
}
コード例 #24
0
ファイル: vf_convolution.c プロジェクト: 0day-ci/FFmpeg
static void filter_3x3(ConvolutionContext *s, AVFrame *in, AVFrame *out, int plane)
{
    const uint8_t *src = in->data[plane];
    uint8_t *dst = out->data[plane];
    const int stride = in->linesize[plane];
    const int bstride = s->bstride;
    const int height = s->planeheight[plane];
    const int width  = s->planewidth[plane];
    uint8_t *p0 = s->buffer + 16;
    uint8_t *p1 = p0 + bstride;
    uint8_t *p2 = p1 + bstride;
    uint8_t *orig = p0, *end = p2;
    const int *matrix = s->matrix[plane];
    const float rdiv = s->rdiv[plane];
    const float bias = s->bias[plane];
    int y, x;

    line_copy8(p0, src + stride, width, 1);
    line_copy8(p1, src, width, 1);

    for (y = 0; y < height; y++) {
        src += stride * (y < height - 1 ? 1 : -1);
        line_copy8(p2, src, width, 1);

        for (x = 0; x < width; x++) {
            int sum = p0[x - 1] * matrix[0] +
                      p0[x] *     matrix[1] +
                      p0[x + 1] * matrix[2] +
                      p1[x - 1] * matrix[3] +
                      p1[x] *     matrix[4] +
                      p1[x + 1] * matrix[5] +
                      p2[x - 1] * matrix[6] +
                      p2[x] *     matrix[7] +
                      p2[x + 1] * matrix[8];
            sum = (int)(sum * rdiv + bias + 0.5f);
            dst[x] = av_clip_uint8(sum);
        }

        p0 = p1;
        p1 = p2;
        p2 = (p2 == end) ? orig: p2 + bstride;
        dst += out->linesize[plane];
    }
}
コード例 #25
0
ファイル: wmv2dsp.c プロジェクト: AlexZhangOG/FFmpeg
static void wmv2_idct_add_c(uint8_t *dest, int line_size, int16_t *block)
{
    int i;

    for (i = 0; i < 64; i += 8)
        wmv2_idct_row(block + i);
    for (i = 0; i < 8; i++)
        wmv2_idct_col(block + i);

    for (i = 0; i < 8; i++) {
        dest[0] = av_clip_uint8(dest[0] + block[0]);
        dest[1] = av_clip_uint8(dest[1] + block[1]);
        dest[2] = av_clip_uint8(dest[2] + block[2]);
        dest[3] = av_clip_uint8(dest[3] + block[3]);
        dest[4] = av_clip_uint8(dest[4] + block[4]);
        dest[5] = av_clip_uint8(dest[5] + block[5]);
        dest[6] = av_clip_uint8(dest[6] + block[6]);
        dest[7] = av_clip_uint8(dest[7] + block[7]);
        dest += line_size;
        block += 8;
    }
}
コード例 #26
0
ファイル: dds.c プロジェクト: MAXsundai/FFmpeg
static void run_postproc(AVCodecContext *avctx, AVFrame *frame)
{
    DDSContext *ctx = avctx->priv_data;
    int i, x_off;

    switch (ctx->postproc) {
    case DDS_ALPHA_EXP:
        /* Alpha-exponential mode divides each channel by the maximum
         * R, G or B value, and stores the multiplying factor in the
         * alpha channel. */
        av_log(avctx, AV_LOG_DEBUG, "Post-processing alpha exponent.\n");

        for (i = 0; i < frame->linesize[0] * frame->height; i += 4) {
            uint8_t *src = frame->data[0] + i;
            int r = src[0];
            int g = src[1];
            int b = src[2];
            int a = src[3];

            src[0] = r * a / 255;
            src[1] = g * a / 255;
            src[2] = b * a / 255;
            src[3] = 255;
        }
        break;
    case DDS_NORMAL_MAP:
        /* Normal maps work in the XYZ color space and they encode
         * X in R or in A, depending on the texture type, Y in G and
         * derive Z with a square root of the distance.
         *
         * http://www.realtimecollisiondetection.net/blog/?p=28 */
        av_log(avctx, AV_LOG_DEBUG, "Post-processing normal map.\n");

        x_off = ctx->tex_ratio == 8 ? 0 : 3;
        for (i = 0; i < frame->linesize[0] * frame->height; i += 4) {
            uint8_t *src = frame->data[0] + i;
            int x = src[x_off];
            int y = src[1];
            int z = 127;

            int d = (255 * 255 - x * x - y * y) / 2;
            if (d > 0)
                z = lrint(sqrtf(d));

            src[0] = x;
            src[1] = y;
            src[2] = z;
            src[3] = 255;
        }
        break;
    case DDS_RAW_YCOCG:
        /* Data is Y-Co-Cg-A and not RGBA, but they are represented
         * with the same masks in the DDPF header. */
        av_log(avctx, AV_LOG_DEBUG, "Post-processing raw YCoCg.\n");

        for (i = 0; i < frame->linesize[0] * frame->height; i += 4) {
            uint8_t *src = frame->data[0] + i;
            int a  = src[0];
            int cg = src[1] - 128;
            int co = src[2] - 128;
            int y  = src[3];

            src[0] = av_clip_uint8(y + co - cg);
            src[1] = av_clip_uint8(y + cg);
            src[2] = av_clip_uint8(y - co - cg);
            src[3] = a;
        }
        break;
    case DDS_SWAP_ALPHA:
        /* Alpha and Luma are stored swapped. */
        av_log(avctx, AV_LOG_DEBUG, "Post-processing swapped Luma/Alpha.\n");

        for (i = 0; i < frame->linesize[0] * frame->height; i += 2) {
            uint8_t *src = frame->data[0] + i;
            FFSWAP(uint8_t, src[0], src[1]);
        }
        break;
    case DDS_SWIZZLE_A2XY:
        /* Swap R and G, often used to restore a standard RGTC2. */
        av_log(avctx, AV_LOG_DEBUG, "Post-processing A2XY swizzle.\n");
        do_swizzle(frame, 0, 1);
        break;
    case DDS_SWIZZLE_RBXG:
        /* Swap G and A, then B and new A (G). */
        av_log(avctx, AV_LOG_DEBUG, "Post-processing RBXG swizzle.\n");
        do_swizzle(frame, 1, 3);
        do_swizzle(frame, 2, 3);
        break;
    case DDS_SWIZZLE_RGXB:
        /* Swap B and A. */
        av_log(avctx, AV_LOG_DEBUG, "Post-processing RGXB swizzle.\n");
        do_swizzle(frame, 2, 3);
        break;
    case DDS_SWIZZLE_RXBG:
        /* Swap G and A. */
        av_log(avctx, AV_LOG_DEBUG, "Post-processing RXBG swizzle.\n");
        do_swizzle(frame, 1, 3);
        break;
    case DDS_SWIZZLE_RXGB:
        /* Swap R and A (misleading name). */
        av_log(avctx, AV_LOG_DEBUG, "Post-processing RXGB swizzle.\n");
        do_swizzle(frame, 0, 3);
        break;
    case DDS_SWIZZLE_XGBR:
        /* Swap B and A, then R and new A (B). */
        av_log(avctx, AV_LOG_DEBUG, "Post-processing XGBR swizzle.\n");
        do_swizzle(frame, 2, 3);
        do_swizzle(frame, 0, 3);
        break;
    case DDS_SWIZZLE_XGXR:
        /* Swap G and A, then R and new A (G), then new R (G) and new G (A).
         * This variant does not store any B component. */
        av_log(avctx, AV_LOG_DEBUG, "Post-processing XGXR swizzle.\n");
        do_swizzle(frame, 1, 3);
        do_swizzle(frame, 0, 3);
        do_swizzle(frame, 0, 1);
        break;
    case DDS_SWIZZLE_XRBG:
        /* Swap G and A, then R and new A (G). */
        av_log(avctx, AV_LOG_DEBUG, "Post-processing XRBG swizzle.\n");
        do_swizzle(frame, 1, 3);
        do_swizzle(frame, 0, 3);
        break;
    }
}
コード例 #27
0
ファイル: aaccoder.c プロジェクト: Arcen/FFmpeg
static void search_for_quantizers_faac(AVCodecContext *avctx, AACEncContext *s,
                                       SingleChannelElement *sce,
                                       const float lambda)
{
    int start = 0, i, w, w2, g;
    float uplim[128], maxq[128];
    int minq, maxsf;
    float distfact = ((sce->ics.num_windows > 1) ? 85.80 : 147.84) / lambda;
    int last = 0, lastband = 0, curband = 0;
    float avg_energy = 0.0;
    if (sce->ics.num_windows == 1) {
        start = 0;
        for (i = 0; i < 1024; i++) {
            if (i - start >= sce->ics.swb_sizes[curband]) {
                start += sce->ics.swb_sizes[curband];
                curband++;
            }
            if (sce->coeffs[i]) {
                avg_energy += sce->coeffs[i] * sce->coeffs[i];
                last = i;
                lastband = curband;
            }
        }
    } else {
        for (w = 0; w < 8; w++) {
            const float *coeffs = sce->coeffs + w*128;
            start = 0;
            for (i = 0; i < 128; i++) {
                if (i - start >= sce->ics.swb_sizes[curband]) {
                    start += sce->ics.swb_sizes[curband];
                    curband++;
                }
                if (coeffs[i]) {
                    avg_energy += coeffs[i] * coeffs[i];
                    last = FFMAX(last, i);
                    lastband = FFMAX(lastband, curband);
                }
            }
        }
    }
    last++;
    avg_energy /= last;
    if (avg_energy == 0.0f) {
        for (i = 0; i < FF_ARRAY_ELEMS(sce->sf_idx); i++)
            sce->sf_idx[i] = SCALE_ONE_POS;
        return;
    }
    for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
        start = w*128;
        for (g = 0; g < sce->ics.num_swb; g++) {
            float *coefs   = sce->coeffs + start;
            const int size = sce->ics.swb_sizes[g];
            int start2 = start, end2 = start + size, peakpos = start;
            float maxval = -1, thr = 0.0f, t;
            maxq[w*16+g] = 0.0f;
            if (g > lastband) {
                maxq[w*16+g] = 0.0f;
                start += size;
                for (w2 = 0; w2 < sce->ics.group_len[w]; w2++)
                    memset(coefs + w2*128, 0, sizeof(coefs[0])*size);
                continue;
            }
            for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) {
                for (i = 0; i < size; i++) {
                    float t = coefs[w2*128+i]*coefs[w2*128+i];
                    maxq[w*16+g] = FFMAX(maxq[w*16+g], fabsf(coefs[w2*128 + i]));
                    thr += t;
                    if (sce->ics.num_windows == 1 && maxval < t) {
                        maxval  = t;
                        peakpos = start+i;
                    }
                }
            }
            if (sce->ics.num_windows == 1) {
                start2 = FFMAX(peakpos - 2, start2);
                end2   = FFMIN(peakpos + 3, end2);
            } else {
                start2 -= start;
                end2   -= start;
            }
            start += size;
            thr = pow(thr / (avg_energy * (end2 - start2)), 0.3 + 0.1*(lastband - g) / lastband);
            t   = 1.0 - (1.0 * start2 / last);
            uplim[w*16+g] = distfact / (1.4 * thr + t*t*t + 0.075);
        }
    }
    memset(sce->sf_idx, 0, sizeof(sce->sf_idx));
    abs_pow34_v(s->scoefs, sce->coeffs, 1024);
    for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
        start = w*128;
        for (g = 0;  g < sce->ics.num_swb; g++) {
            const float *coefs  = sce->coeffs + start;
            const float *scaled = s->scoefs   + start;
            const int size      = sce->ics.swb_sizes[g];
            int scf, prev_scf, step;
            int min_scf = -1, max_scf = 256;
            float curdiff;
            if (maxq[w*16+g] < 21.544) {
                sce->zeroes[w*16+g] = 1;
                start += size;
                continue;
            }
            sce->zeroes[w*16+g] = 0;
            scf  = prev_scf = av_clip(SCALE_ONE_POS - SCALE_DIV_512 - log2f(1/maxq[w*16+g])*16/3, 60, 218);
            step = 16;
            for (;;) {
                float dist = 0.0f;
                int quant_max;

                for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) {
                    int b;
                    dist += quantize_band_cost(s, coefs + w2*128,
                                               scaled + w2*128,
                                               sce->ics.swb_sizes[g],
                                               scf,
                                               ESC_BT,
                                               lambda,
                                               INFINITY,
                                               &b);
                    dist -= b;
                }
                dist *= 1.0f / 512.0f / lambda;
                quant_max = quant(maxq[w*16+g], ff_aac_pow2sf_tab[POW_SF2_ZERO - scf + SCALE_ONE_POS - SCALE_DIV_512]);
                if (quant_max >= 8191) { // too much, return to the previous quantizer
                    sce->sf_idx[w*16+g] = prev_scf;
                    break;
                }
                prev_scf = scf;
                curdiff = fabsf(dist - uplim[w*16+g]);
                if (curdiff <= 1.0f)
                    step = 0;
                else
                    step = log2f(curdiff);
                if (dist > uplim[w*16+g])
                    step = -step;
                scf += step;
                scf = av_clip_uint8(scf);
                step = scf - prev_scf;
                if (FFABS(step) <= 1 || (step > 0 && scf >= max_scf) || (step < 0 && scf <= min_scf)) {
                    sce->sf_idx[w*16+g] = av_clip(scf, min_scf, max_scf);
                    break;
                }
                if (step > 0)
                    min_scf = prev_scf;
                else
                    max_scf = prev_scf;
            }
            start += size;
        }
    }
    minq = sce->sf_idx[0] ? sce->sf_idx[0] : INT_MAX;
    for (i = 1; i < 128; i++) {
        if (!sce->sf_idx[i])
            sce->sf_idx[i] = sce->sf_idx[i-1];
        else
            minq = FFMIN(minq, sce->sf_idx[i]);
    }
    if (minq == INT_MAX)
        minq = 0;
    minq = FFMIN(minq, SCALE_MAX_POS);
    maxsf = FFMIN(minq + SCALE_MAX_DIFF, SCALE_MAX_POS);
    for (i = 126; i >= 0; i--) {
        if (!sce->sf_idx[i])
            sce->sf_idx[i] = sce->sf_idx[i+1];
        sce->sf_idx[i] = av_clip(sce->sf_idx[i], minq, maxsf);
    }
}
コード例 #28
0
static int dpcm_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
                             AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    const uint8_t *buf_end = buf + buf_size;
    DPCMContext *s = avctx->priv_data;
    int out = 0;
    int predictor[2];
    int ch = 0;
    int stereo = s->channels - 1;
    int16_t *output_samples = data;

    /* calculate output size */
    switch(avctx->codec->id) {
    case CODEC_ID_ROQ_DPCM:
        out = buf_size - 8;
        break;
    case CODEC_ID_INTERPLAY_DPCM:
        out = buf_size - 6 - s->channels;
        break;
    case CODEC_ID_XAN_DPCM:
        out = buf_size - 2 * s->channels;
        break;
    case CODEC_ID_SOL_DPCM:
        if (avctx->codec_tag != 3)
            out = buf_size * 2;
        else
            out = buf_size;
        break;
    }
    out *= av_get_bytes_per_sample(avctx->sample_fmt);
    if (out <= 0) {
        av_log(avctx, AV_LOG_ERROR, "packet is too small\n");
        return AVERROR(EINVAL);
    }
    if (*data_size < out) {
        av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n");
        return AVERROR(EINVAL);
    }

    switch(avctx->codec->id) {

    case CODEC_ID_ROQ_DPCM:
        buf += 6;

        if (stereo) {
            predictor[1] = (int16_t)(bytestream_get_byte(&buf) << 8);
            predictor[0] = (int16_t)(bytestream_get_byte(&buf) << 8);
        } else {
            predictor[0] = (int16_t)bytestream_get_le16(&buf);
        }

        /* decode the samples */
        while (buf < buf_end) {
            predictor[ch] += s->roq_square_array[*buf++];
            predictor[ch]  = av_clip_int16(predictor[ch]);
            *output_samples++ = predictor[ch];

            /* toggle channel */
            ch ^= stereo;
        }
        break;

    case CODEC_ID_INTERPLAY_DPCM:
        buf += 6;  /* skip over the stream mask and stream length */

        for (ch = 0; ch < s->channels; ch++) {
            predictor[ch] = (int16_t)bytestream_get_le16(&buf);
            *output_samples++ = predictor[ch];
        }

        ch = 0;
        while (buf < buf_end) {
            predictor[ch] += interplay_delta_table[*buf++];
            predictor[ch]  = av_clip_int16(predictor[ch]);
            *output_samples++ = predictor[ch];

            /* toggle channel */
            ch ^= stereo;
        }
        break;

    case CODEC_ID_XAN_DPCM:
    {
        int shift[2] = { 4, 4 };

        for (ch = 0; ch < s->channels; ch++)
            predictor[ch] = (int16_t)bytestream_get_le16(&buf);

        ch = 0;
        while (buf < buf_end) {
            uint8_t n = *buf++;
            int16_t diff = (n & 0xFC) << 8;
            if ((n & 0x03) == 3)
                shift[ch]++;
            else
                shift[ch] -= (2 * (n & 3));
            /* saturate the shifter to a lower limit of 0 */
            if (shift[ch] < 0)
                shift[ch] = 0;

            diff >>= shift[ch];
            predictor[ch] += diff;

            predictor[ch] = av_clip_int16(predictor[ch]);
            *output_samples++ = predictor[ch];

            /* toggle channel */
            ch ^= stereo;
        }
        break;
    }
    case CODEC_ID_SOL_DPCM:
        if (avctx->codec_tag != 3) {
            uint8_t *output_samples_u8 = data;
            while (buf < buf_end) {
                uint8_t n = *buf++;

                s->sample[0] += s->sol_table[n >> 4];
                s->sample[0]  = av_clip_uint8(s->sample[0]);
                *output_samples_u8++ = s->sample[0];

                s->sample[stereo] += s->sol_table[n & 0x0F];
                s->sample[stereo]  = av_clip_uint8(s->sample[stereo]);
                *output_samples_u8++ = s->sample[stereo];
            }
        } else {
            while (buf < buf_end) {
コード例 #29
0
ファイル: faanidct.c プロジェクト: Flameeyes/libav
static inline void p8idct(DCTELEM data[64], FLOAT temp[64], uint8_t *dest, int stride, int x, int y, int type){
    int i;
    FLOAT av_unused tmp0;
    FLOAT s04, d04, s17, d17, s26, d26, s53, d53;
    FLOAT os07, os16, os25, os34;
    FLOAT od07, od16, od25, od34;

    for(i=0; i<y*8; i+=y){
        s17= temp[1*x + i] + temp[7*x + i];
        d17= temp[1*x + i] - temp[7*x + i];
        s53= temp[5*x + i] + temp[3*x + i];
        d53= temp[5*x + i] - temp[3*x + i];

        od07=  s17 + s53;
        od25= (s17 - s53)*(2*A4);

#if 0 //these 2 are equivalent
        tmp0= (d17 + d53)*(2*A2);
        od34=  d17*( 2*B6) - tmp0;
        od16=  d53*(-2*B2) + tmp0;
#else
        od34=  d17*(2*(B6-A2)) - d53*(2*A2);
        od16=  d53*(2*(A2-B2)) + d17*(2*A2);
#endif

        od16 -= od07;
        od25 -= od16;
        od34 += od25;

        s26 = temp[2*x + i] + temp[6*x + i];
        d26 = temp[2*x + i] - temp[6*x + i];
        d26*= 2*A4;
        d26-= s26;

        s04= temp[0*x + i] + temp[4*x + i];
        d04= temp[0*x + i] - temp[4*x + i];

        os07= s04 + s26;
        os34= s04 - s26;
        os16= d04 + d26;
        os25= d04 - d26;

        if(type==0){
            temp[0*x + i]= os07 + od07;
            temp[7*x + i]= os07 - od07;
            temp[1*x + i]= os16 + od16;
            temp[6*x + i]= os16 - od16;
            temp[2*x + i]= os25 + od25;
            temp[5*x + i]= os25 - od25;
            temp[3*x + i]= os34 - od34;
            temp[4*x + i]= os34 + od34;
        }else if(type==1){
            data[0*x + i]= lrintf(os07 + od07);
            data[7*x + i]= lrintf(os07 - od07);
            data[1*x + i]= lrintf(os16 + od16);
            data[6*x + i]= lrintf(os16 - od16);
            data[2*x + i]= lrintf(os25 + od25);
            data[5*x + i]= lrintf(os25 - od25);
            data[3*x + i]= lrintf(os34 - od34);
            data[4*x + i]= lrintf(os34 + od34);
        }else if(type==2){
            dest[0*stride + i]= av_clip_uint8(((int)dest[0*stride + i]) + lrintf(os07 + od07));
            dest[7*stride + i]= av_clip_uint8(((int)dest[7*stride + i]) + lrintf(os07 - od07));
            dest[1*stride + i]= av_clip_uint8(((int)dest[1*stride + i]) + lrintf(os16 + od16));
            dest[6*stride + i]= av_clip_uint8(((int)dest[6*stride + i]) + lrintf(os16 - od16));
            dest[2*stride + i]= av_clip_uint8(((int)dest[2*stride + i]) + lrintf(os25 + od25));
            dest[5*stride + i]= av_clip_uint8(((int)dest[5*stride + i]) + lrintf(os25 - od25));
            dest[3*stride + i]= av_clip_uint8(((int)dest[3*stride + i]) + lrintf(os34 - od34));
            dest[4*stride + i]= av_clip_uint8(((int)dest[4*stride + i]) + lrintf(os34 + od34));
        }else{
            dest[0*stride + i]= av_clip_uint8(lrintf(os07 + od07));
            dest[7*stride + i]= av_clip_uint8(lrintf(os07 - od07));
            dest[1*stride + i]= av_clip_uint8(lrintf(os16 + od16));
            dest[6*stride + i]= av_clip_uint8(lrintf(os16 - od16));
            dest[2*stride + i]= av_clip_uint8(lrintf(os25 + od25));
            dest[5*stride + i]= av_clip_uint8(lrintf(os25 - od25));
            dest[3*stride + i]= av_clip_uint8(lrintf(os34 - od34));
            dest[4*stride + i]= av_clip_uint8(lrintf(os34 + od34));
        }
    }
}
コード例 #30
0
CONV_FUNC(AV_SAMPLE_FMT_U8 , uint8_t, AV_SAMPLE_FMT_U8 ,  *(const uint8_t*)pi)
CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_U8 , (*(const uint8_t*)pi - 0x80U)<<8)
CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_U8 , (*(const uint8_t*)pi - 0x80U)<<24)
CONV_FUNC(AV_SAMPLE_FMT_FLT, float  , AV_SAMPLE_FMT_U8 , (*(const uint8_t*)pi - 0x80)*(1.0f/ (1<<7)))
CONV_FUNC(AV_SAMPLE_FMT_DBL, double , AV_SAMPLE_FMT_U8 , (*(const uint8_t*)pi - 0x80)*(1.0 / (1<<7)))
CONV_FUNC(AV_SAMPLE_FMT_U8 , uint8_t, AV_SAMPLE_FMT_S16, (*(const int16_t*)pi>>8) + 0x80)
CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_S16,  *(const int16_t*)pi)
CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16,  *(const int16_t*)pi<<16)
CONV_FUNC(AV_SAMPLE_FMT_FLT, float  , AV_SAMPLE_FMT_S16,  *(const int16_t*)pi*(1.0f/ (1<<15)))
CONV_FUNC(AV_SAMPLE_FMT_DBL, double , AV_SAMPLE_FMT_S16,  *(const int16_t*)pi*(1.0 / (1<<15)))
CONV_FUNC(AV_SAMPLE_FMT_U8 , uint8_t, AV_SAMPLE_FMT_S32, (*(const int32_t*)pi>>24) + 0x80)
CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_S32,  *(const int32_t*)pi>>16)
CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S32,  *(const int32_t*)pi)
CONV_FUNC(AV_SAMPLE_FMT_FLT, float  , AV_SAMPLE_FMT_S32,  *(const int32_t*)pi*(1.0f/ (1U<<31)))
CONV_FUNC(AV_SAMPLE_FMT_DBL, double , AV_SAMPLE_FMT_S32,  *(const int32_t*)pi*(1.0 / (1U<<31)))
CONV_FUNC(AV_SAMPLE_FMT_U8 , uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(  lrintf(*(const float*)pi * (1<<7)) + 0x80))
CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(  lrintf(*(const float*)pi * (1<<15))))
CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float*)pi * (1U<<31))))
CONV_FUNC(AV_SAMPLE_FMT_FLT, float  , AV_SAMPLE_FMT_FLT, *(const float*)pi)
CONV_FUNC(AV_SAMPLE_FMT_DBL, double , AV_SAMPLE_FMT_FLT, *(const float*)pi)
CONV_FUNC(AV_SAMPLE_FMT_U8 , uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(  lrint(*(const double*)pi * (1<<7)) + 0x80))
CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(  lrint(*(const double*)pi * (1<<15))))
CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double*)pi * (1U<<31))))
CONV_FUNC(AV_SAMPLE_FMT_FLT, float  , AV_SAMPLE_FMT_DBL, *(const double*)pi)
CONV_FUNC(AV_SAMPLE_FMT_DBL, double , AV_SAMPLE_FMT_DBL, *(const double*)pi)

#define FMT_PAIR_FUNC(out, in) [(out) + AV_SAMPLE_FMT_NB*(in)] = CONV_FUNC_NAME(out, in)

static conv_func_type * const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB*AV_SAMPLE_FMT_NB] = {
    FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8 , AV_SAMPLE_FMT_U8 ),
    FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8 ),