Esempio n. 1
0
/* next one assumes that ((line_size % 8) == 0) */
static void avg_pixels8_altivec(uint8_t * block, const uint8_t * pixels, ptrdiff_t line_size, int h)
{
    register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
    int i;

   for (i = 0; i < h; i++) {
       /* block is 8 bytes-aligned, so we're either in the
          left block (16 bytes-aligned) or in the right block (not) */
       int rightside = ((unsigned long)block & 0x0000000F);

       blockv = vec_ld(0, block);
       pixelsv1 = vec_ld( 0, pixels);
       pixelsv2 = vec_ld(16, pixels);
       pixelsv = vec_perm(pixelsv1, pixelsv2, vec_lvsl(0, pixels));

       if (rightside) {
           pixelsv = vec_perm(blockv, pixelsv, vcprm(0,1,s0,s1));
       } else {
           pixelsv = vec_perm(blockv, pixelsv, vcprm(s0,s1,2,3));
       }

       blockv = vec_avg(blockv, pixelsv);

       vec_st(blockv, 0, block);

       pixels += line_size;
       block += line_size;
   }
}
Esempio n. 2
0
/* next one assumes that ((line_size % 8) == 0) */
static void put_no_rnd_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
{
    register int i;
    register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
    register vector unsigned char blockv, temp1, temp2;
    register vector unsigned short pixelssum1, pixelssum2, temp3;
    register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
    register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
    register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);

    temp1 = vec_ld(0, pixels);
    temp2 = vec_ld(16, pixels);
    pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
    if ((((unsigned long)pixels) & 0x0000000F) ==  0x0000000F) {
        pixelsv2 = temp2;
    } else {
        pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
    }
    pixelsv1 = vec_mergeh(vczero, pixelsv1);
    pixelsv2 = vec_mergeh(vczero, pixelsv2);
    pixelssum1 = vec_add((vector unsigned short)pixelsv1,
                         (vector unsigned short)pixelsv2);
    pixelssum1 = vec_add(pixelssum1, vcone);

    for (i = 0; i < h ; i++) {
        int rightside = ((unsigned long)block & 0x0000000F);
        blockv = vec_ld(0, block);

        temp1 = vec_ld(line_size, pixels);
        temp2 = vec_ld(line_size + 16, pixels);
        pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
        if (((((unsigned long)pixels) + line_size) & 0x0000000F) ==  0x0000000F) {
            pixelsv2 = temp2;
        } else {
            pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
        }

        pixelsv1 = vec_mergeh(vczero, pixelsv1);
        pixelsv2 = vec_mergeh(vczero, pixelsv2);
        pixelssum2 = vec_add((vector unsigned short)pixelsv1,
                             (vector unsigned short)pixelsv2);
        temp3 = vec_add(pixelssum1, pixelssum2);
        temp3 = vec_sra(temp3, vctwo);
        pixelssum1 = vec_add(pixelssum2, vcone);
        pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);

        if (rightside) {
            blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
        } else {
            blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
        }

        vec_st(blockv, 0, block);

        block += line_size;
        pixels += line_size;
    }
}
Esempio n. 3
0
static void vector_fmul_window_altivec(float *dst, const float *src0, const float *src1, const float *win, int len)
{
    vector float zero, t0, t1, s0, s1, wi, wj;
    const vector unsigned char reverse = vcprm(3,2,1,0);
    int i,j;

    dst += len;
    win += len;
    src0+= len;

    zero = (vector float)vec_splat_u32(0);

    for(i=-len*4, j=len*4-16; i<0; i+=16, j-=16) {
        s0 = vec_ld(i, src0);
        s1 = vec_ld(j, src1);
        wi = vec_ld(i, win);
        wj = vec_ld(j, win);

        s1 = vec_perm(s1, s1, reverse);
        wj = vec_perm(wj, wj, reverse);

        t0 = vec_madd(s0, wj, zero);
        t0 = vec_nmsub(s1, wi, t0);
        t1 = vec_madd(s0, wi, zero);
        t1 = vec_madd(s1, wj, t1);
        t1 = vec_perm(t1, t1, reverse);

        vec_st(t0, i, dst);
        vec_st(t1, j, dst);
    }
}
Esempio n. 4
0
static void ff_imdct_calc_altivec(FFTContext *s, FFTSample *output, const FFTSample *input)
{
    int k;
    int n = 1 << s->mdct_bits;
    int n4 = n >> 2;
    int n16 = n >> 4;
    vec_u32 sign = {1U<<31,1U<<31,1U<<31,1U<<31};
    vec_u32 *p0 = (vec_u32*)(output+n4);
    vec_u32 *p1 = (vec_u32*)(output+n4*3);

    ff_imdct_half_altivec(s, output+n4, input);

    for (k = 0; k < n16; k++) {
        vec_u32 a = p0[k] ^ sign;
        vec_u32 b = p1[-k-1];
        p0[-k-1] = vec_perm(a, a, vcprm(3,2,1,0));
        p1[k]    = vec_perm(b, b, vcprm(3,2,1,0));
    }
}
Esempio n. 5
0
/* AltiVec-enhanced gmc1. ATM this code assumes stride is a multiple of 8
 * to preserve proper dst alignment. */
void ff_gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */,
                     int stride, int h, int x16, int y16, int rounder)
{
    int i;
    const DECLARE_ALIGNED(16, unsigned short, rounder_a) = rounder;
    const DECLARE_ALIGNED(16, unsigned short, ABCD)[8] = {
        (16 - x16) * (16 - y16), /* A */
             (x16) * (16 - y16), /* B */
        (16 - x16) * (y16),      /* C */
             (x16) * (y16),      /* D */
        0, 0, 0, 0               /* padding */
    };
    register const vector unsigned char vczero =
        (const vector unsigned char) vec_splat_u8(0);
    register const vector unsigned short vcsr8 =
        (const vector unsigned short) vec_splat_u16(8);
    register vector unsigned char dstv, dstv2, srcvB, srcvC, srcvD;
    register vector unsigned short tempB, tempC, tempD;
    unsigned long dst_odd        = (unsigned long) dst & 0x0000000F;
    unsigned long src_really_odd = (unsigned long) src & 0x0000000F;
    register vector unsigned short tempA =
        vec_ld(0, (const unsigned short *) ABCD);
    register vector unsigned short Av = vec_splat(tempA, 0);
    register vector unsigned short Bv = vec_splat(tempA, 1);
    register vector unsigned short Cv = vec_splat(tempA, 2);
    register vector unsigned short Dv = vec_splat(tempA, 3);
    register vector unsigned short rounderV =
        vec_splat((vec_u16) vec_lde(0, &rounder_a), 0);

    /* we'll be able to pick-up our 9 char elements at src from those
     * 32 bytes we load the first batch here, as inside the loop we can
     * reuse 'src + stride' from one iteration as the 'src' of the next. */
    register vector unsigned char src_0 = vec_ld(0, src);
    register vector unsigned char src_1 = vec_ld(16, src);
    register vector unsigned char srcvA = vec_perm(src_0, src_1,
                                                   vec_lvsl(0, src));

    if (src_really_odd != 0x0000000F)
        /* If (src & 0xF) == 0xF, then (src + 1) is properly aligned
         * on the second vector. */
        srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src));
    else
        srcvB = src_1;
    srcvA = vec_mergeh(vczero, srcvA);
    srcvB = vec_mergeh(vczero, srcvB);

    for (i = 0; i < h; i++) {
        dst_odd        =   (unsigned long) dst            & 0x0000000F;
        src_really_odd = (((unsigned long) src) + stride) & 0x0000000F;

        dstv = vec_ld(0, dst);

        /* We'll be able to pick-up our 9 char elements at src + stride from
         * those 32 bytes then reuse the resulting 2 vectors srvcC and srcvD
         * as the next srcvA and srcvB. */
        src_0 = vec_ld(stride +  0, src);
        src_1 = vec_ld(stride + 16, src);
        srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src));

        if (src_really_odd != 0x0000000F)
            /* If (src & 0xF) == 0xF, then (src + 1) is properly aligned
             * on the second vector. */
            srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src));
        else
            srcvD = src_1;

        srcvC = vec_mergeh(vczero, srcvC);
        srcvD = vec_mergeh(vczero, srcvD);

        /* OK, now we (finally) do the math :-)
         * Those four instructions replace 32 int muls & 32 int adds.
         * Isn't AltiVec nice? */
        tempA = vec_mladd((vector unsigned short) srcvA, Av, rounderV);
        tempB = vec_mladd((vector unsigned short) srcvB, Bv, tempA);
        tempC = vec_mladd((vector unsigned short) srcvC, Cv, tempB);
        tempD = vec_mladd((vector unsigned short) srcvD, Dv, tempC);

        srcvA = srcvC;
        srcvB = srcvD;

        tempD = vec_sr(tempD, vcsr8);

        dstv2 = vec_pack(tempD, (vector unsigned short) vczero);

        if (dst_odd)
            dstv2 = vec_perm(dstv, dstv2, vcprm(0, 1, s0, s1));
        else
            dstv2 = vec_perm(dstv, dstv2, vcprm(s0, s1, 2, 3));

        vec_st(dstv2, 0, dst);

        dst += stride;
        src += stride;
    }
}
void gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */, int stride, int h, int x16, int y16, int rounder)
{
POWERPC_PERF_DECLARE(altivec_gmc1_num, GMC1_PERF_COND);
    const DECLARE_ALIGNED_16(unsigned short, rounder_a[8]) =
      {rounder, rounder, rounder, rounder,
       rounder, rounder, rounder, rounder};
    const DECLARE_ALIGNED_16(unsigned short, ABCD[8]) =
      {
        (16-x16)*(16-y16), /* A */
        (   x16)*(16-y16), /* B */
        (16-x16)*(   y16), /* C */
        (   x16)*(   y16), /* D */
        0, 0, 0, 0         /* padding */
      };
    register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0);
    register const_vector unsigned short vcsr8 = (const_vector unsigned short)vec_splat_u16(8);
    register vector unsigned char dstv, dstv2, src_0, src_1, srcvA, srcvB, srcvC, srcvD;
    register vector unsigned short Av, Bv, Cv, Dv, rounderV, tempA, tempB, tempC, tempD;
    int i;
    unsigned long dst_odd = (unsigned long)dst & 0x0000000F;
    unsigned long src_really_odd = (unsigned long)src & 0x0000000F;


POWERPC_PERF_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND);

    tempA = vec_ld(0, (unsigned short*)ABCD);
    Av = vec_splat(tempA, 0);
    Bv = vec_splat(tempA, 1);
    Cv = vec_splat(tempA, 2);
    Dv = vec_splat(tempA, 3);

    rounderV = vec_ld(0, (unsigned short*)rounder_a);

    // we'll be able to pick-up our 9 char elements
    // at src from those 32 bytes
    // we load the first batch here, as inside the loop
    // we can re-use 'src+stride' from one iteration
    // as the 'src' of the next.
    src_0 = vec_ld(0, src);
    src_1 = vec_ld(16, src);
    srcvA = vec_perm(src_0, src_1, vec_lvsl(0, src));

    if (src_really_odd != 0x0000000F)
    { // if src & 0xF == 0xF, then (src+1) is properly aligned on the second vector.
      srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src));
    }
    else
    {
      srcvB = src_1;
    }
    srcvA = vec_mergeh(vczero, srcvA);
    srcvB = vec_mergeh(vczero, srcvB);

    for(i=0; i<h; i++)
    {
      dst_odd = (unsigned long)dst & 0x0000000F;
      src_really_odd = (((unsigned long)src) + stride) & 0x0000000F;

      dstv = vec_ld(0, dst);

      // we we'll be able to pick-up our 9 char elements
      // at src + stride from those 32 bytes
      // then reuse the resulting 2 vectors srvcC and srcvD
      // as the next srcvA and srcvB
      src_0 = vec_ld(stride + 0, src);
      src_1 = vec_ld(stride + 16, src);
      srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src));

      if (src_really_odd != 0x0000000F)
      { // if src & 0xF == 0xF, then (src+1) is properly aligned on the second vector.
        srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src));
      }
      else
      {
        srcvD = src_1;
      }

      srcvC = vec_mergeh(vczero, srcvC);
      srcvD = vec_mergeh(vczero, srcvD);


      // OK, now we (finally) do the math :-)
      // those four instructions replaces 32 int muls & 32 int adds.
      // isn't AltiVec nice ?
      tempA = vec_mladd((vector unsigned short)srcvA, Av, rounderV);
      tempB = vec_mladd((vector unsigned short)srcvB, Bv, tempA);
      tempC = vec_mladd((vector unsigned short)srcvC, Cv, tempB);
      tempD = vec_mladd((vector unsigned short)srcvD, Dv, tempC);

      srcvA = srcvC;
      srcvB = srcvD;

      tempD = vec_sr(tempD, vcsr8);

      dstv2 = vec_pack(tempD, (vector unsigned short)vczero);

      if (dst_odd)
      {
        dstv2 = vec_perm(dstv, dstv2, vcprm(0,1,s0,s1));
      }
      else
      {
        dstv2 = vec_perm(dstv, dstv2, vcprm(s0,s1,2,3));
      }

      vec_st(dstv2, 0, dst);

      dst += stride;
      src += stride;
    }

POWERPC_PERF_STOP_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
}
Esempio n. 7
0
static void ff_imdct_half_altivec(FFTContext *s, FFTSample *output, const FFTSample *input)
{
    int j, k;
    int n = 1 << s->mdct_bits;
    int n4 = n >> 2;
    int n8 = n >> 3;
    int n32 = n >> 5;
    const uint16_t *revtabj = s->revtab;
    const uint16_t *revtabk = s->revtab+n4;
    const vec_f *tcos = (const vec_f*)(s->tcos+n8);
    const vec_f *tsin = (const vec_f*)(s->tsin+n8);
    const vec_f *pin = (const vec_f*)(input+n4);
    vec_f *pout = (vec_f*)(output+n4);

    /* pre rotation */
    k = n32-1;
    do {
        vec_f cos,sin,cos0,sin0,cos1,sin1,re,im,r0,i0,r1,i1,a,b,c,d;
#define CMULA(p,o0,o1,o2,o3)\
        a = pin[ k*2+p];                       /* { z[k].re,    z[k].im,    z[k+1].re,  z[k+1].im  } */\
        b = pin[-k*2-p-1];                     /* { z[-k-2].re, z[-k-2].im, z[-k-1].re, z[-k-1].im } */\
        re = vec_perm(a, b, vcprm(0,2,s0,s2)); /* { z[k].re,    z[k+1].re,  z[-k-2].re, z[-k-1].re } */\
        im = vec_perm(a, b, vcprm(s3,s1,3,1)); /* { z[-k-1].im, z[-k-2].im, z[k+1].im,  z[k].im    } */\
        cos = vec_perm(cos0, cos1, vcprm(o0,o1,s##o2,s##o3)); /* { cos[k], cos[k+1], cos[-k-2], cos[-k-1] } */\
        sin = vec_perm(sin0, sin1, vcprm(o0,o1,s##o2,s##o3));\
        r##p = im*cos - re*sin;\
        i##p = re*cos + im*sin;
#define STORE2(v,dst)\
        j = dst;\
        vec_ste(v, 0, output+j*2);\
        vec_ste(v, 4, output+j*2);
#define STORE8(p)\
        a = vec_perm(r##p, i##p, vcprm(0,s0,0,s0));\
        b = vec_perm(r##p, i##p, vcprm(1,s1,1,s1));\
        c = vec_perm(r##p, i##p, vcprm(2,s2,2,s2));\
        d = vec_perm(r##p, i##p, vcprm(3,s3,3,s3));\
        STORE2(a, revtabk[ p*2-4]);\
        STORE2(b, revtabk[ p*2-3]);\
        STORE2(c, revtabj[-p*2+2]);\
        STORE2(d, revtabj[-p*2+3]);

        cos0 = tcos[k];
        sin0 = tsin[k];
        cos1 = tcos[-k-1];
        sin1 = tsin[-k-1];
        CMULA(0, 0,1,2,3);
        CMULA(1, 2,3,0,1);
        STORE8(0);
        STORE8(1);
        revtabj += 4;
        revtabk -= 4;
        k--;
    } while(k >= 0);

    ff_fft_calc_altivec(s, (FFTComplex*)output);

    /* post rotation + reordering */
    j = -n32;
    k = n32-1;
    do {
        vec_f cos,sin,re,im,a,b,c,d;
#define CMULB(d0,d1,o)\
        re = pout[o*2];\
        im = pout[o*2+1];\
        cos = tcos[o];\
        sin = tsin[o];\
        d0 = im*sin - re*cos;\
        d1 = re*sin + im*cos;

        CMULB(a,b,j);
        CMULB(c,d,k);
        pout[2*j]   = vec_perm(a, d, vcprm(0,s3,1,s2));
        pout[2*j+1] = vec_perm(a, d, vcprm(2,s1,3,s0));
        pout[2*k]   = vec_perm(c, b, vcprm(0,s3,1,s2));
        pout[2*k+1] = vec_perm(c, b, vcprm(2,s1,3,s0));
        j++;
        k--;
    } while(k >= 0);
}
Esempio n. 8
0
void dct64_altivec(real *a,real *b,real *c)
{
  real __attribute__ ((aligned(16))) b1[0x20];
  real __attribute__ ((aligned(16))) b2[0x20];

  real *out0 = a;
  real *out1 = b;
  real *samples = c;

  const vector float vczero = (const vector float)FOUROF(0.);
  const vector unsigned char reverse = (const vector unsigned char)vcprm(3,2,1,0);


  if (((unsigned long)b1 & 0x0000000F) ||
      ((unsigned long)b2 & 0x0000000F))

  {
    printf("MISALIGNED:\t%p\t%p\t%p\t%p\t%p\n",
           b1, b2, a, b, samples);
  }


#ifdef ALTIVEC_USE_REFERENCE_C_CODE

  {
    register real *costab = mp3lib_pnts[0];

    b1[0x00] = samples[0x00] + samples[0x1F];
    b1[0x01] = samples[0x01] + samples[0x1E];
    b1[0x02] = samples[0x02] + samples[0x1D];
    b1[0x03] = samples[0x03] + samples[0x1C];
    b1[0x04] = samples[0x04] + samples[0x1B];
    b1[0x05] = samples[0x05] + samples[0x1A];
    b1[0x06] = samples[0x06] + samples[0x19];
    b1[0x07] = samples[0x07] + samples[0x18];
    b1[0x08] = samples[0x08] + samples[0x17];
    b1[0x09] = samples[0x09] + samples[0x16];
    b1[0x0A] = samples[0x0A] + samples[0x15];
    b1[0x0B] = samples[0x0B] + samples[0x14];
    b1[0x0C] = samples[0x0C] + samples[0x13];
    b1[0x0D] = samples[0x0D] + samples[0x12];
    b1[0x0E] = samples[0x0E] + samples[0x11];
    b1[0x0F] = samples[0x0F] + samples[0x10];
    b1[0x10] = (samples[0x0F] - samples[0x10]) * costab[0xF];
    b1[0x11] = (samples[0x0E] - samples[0x11]) * costab[0xE];
    b1[0x12] = (samples[0x0D] - samples[0x12]) * costab[0xD];
    b1[0x13] = (samples[0x0C] - samples[0x13]) * costab[0xC];
    b1[0x14] = (samples[0x0B] - samples[0x14]) * costab[0xB];
    b1[0x15] = (samples[0x0A] - samples[0x15]) * costab[0xA];
    b1[0x16] = (samples[0x09] - samples[0x16]) * costab[0x9];
    b1[0x17] = (samples[0x08] - samples[0x17]) * costab[0x8];
    b1[0x18] = (samples[0x07] - samples[0x18]) * costab[0x7];
    b1[0x19] = (samples[0x06] - samples[0x19]) * costab[0x6];
    b1[0x1A] = (samples[0x05] - samples[0x1A]) * costab[0x5];
    b1[0x1B] = (samples[0x04] - samples[0x1B]) * costab[0x4];
    b1[0x1C] = (samples[0x03] - samples[0x1C]) * costab[0x3];
    b1[0x1D] = (samples[0x02] - samples[0x1D]) * costab[0x2];
    b1[0x1E] = (samples[0x01] - samples[0x1E]) * costab[0x1];
    b1[0x1F] = (samples[0x00] - samples[0x1F]) * costab[0x0];

  }
  {
    register real *costab = mp3lib_pnts[1];

    b2[0x00] = b1[0x00] + b1[0x0F];
    b2[0x01] = b1[0x01] + b1[0x0E];
    b2[0x02] = b1[0x02] + b1[0x0D];
    b2[0x03] = b1[0x03] + b1[0x0C];
    b2[0x04] = b1[0x04] + b1[0x0B];
    b2[0x05] = b1[0x05] + b1[0x0A];
    b2[0x06] = b1[0x06] + b1[0x09];
    b2[0x07] = b1[0x07] + b1[0x08];
    b2[0x08] = (b1[0x07] - b1[0x08]) * costab[7];
    b2[0x09] = (b1[0x06] - b1[0x09]) * costab[6];
    b2[0x0A] = (b1[0x05] - b1[0x0A]) * costab[5];
    b2[0x0B] = (b1[0x04] - b1[0x0B]) * costab[4];
    b2[0x0C] = (b1[0x03] - b1[0x0C]) * costab[3];
    b2[0x0D] = (b1[0x02] - b1[0x0D]) * costab[2];
    b2[0x0E] = (b1[0x01] - b1[0x0E]) * costab[1];
    b2[0x0F] = (b1[0x00] - b1[0x0F]) * costab[0];
    b2[0x10] = b1[0x10] + b1[0x1F];
    b2[0x11] = b1[0x11] + b1[0x1E];
    b2[0x12] = b1[0x12] + b1[0x1D];
    b2[0x13] = b1[0x13] + b1[0x1C];
    b2[0x14] = b1[0x14] + b1[0x1B];
    b2[0x15] = b1[0x15] + b1[0x1A];
    b2[0x16] = b1[0x16] + b1[0x19];
    b2[0x17] = b1[0x17] + b1[0x18];
    b2[0x18] = (b1[0x18] - b1[0x17]) * costab[7];
    b2[0x19] = (b1[0x19] - b1[0x16]) * costab[6];
    b2[0x1A] = (b1[0x1A] - b1[0x15]) * costab[5];
    b2[0x1B] = (b1[0x1B] - b1[0x14]) * costab[4];
    b2[0x1C] = (b1[0x1C] - b1[0x13]) * costab[3];
    b2[0x1D] = (b1[0x1D] - b1[0x12]) * costab[2];
    b2[0x1E] = (b1[0x1E] - b1[0x11]) * costab[1];
    b2[0x1F] = (b1[0x1F] - b1[0x10]) * costab[0];

  }

  {
    register real *costab = mp3lib_pnts[2];

    b1[0x00] = b2[0x00] + b2[0x07];
    b1[0x01] = b2[0x01] + b2[0x06];
    b1[0x02] = b2[0x02] + b2[0x05];
    b1[0x03] = b2[0x03] + b2[0x04];
    b1[0x04] = (b2[0x03] - b2[0x04]) * costab[3];
    b1[0x05] = (b2[0x02] - b2[0x05]) * costab[2];
    b1[0x06] = (b2[0x01] - b2[0x06]) * costab[1];
    b1[0x07] = (b2[0x00] - b2[0x07]) * costab[0];
    b1[0x08] = b2[0x08] + b2[0x0F];
    b1[0x09] = b2[0x09] + b2[0x0E];
    b1[0x0A] = b2[0x0A] + b2[0x0D];
    b1[0x0B] = b2[0x0B] + b2[0x0C];
    b1[0x0C] = (b2[0x0C] - b2[0x0B]) * costab[3];
    b1[0x0D] = (b2[0x0D] - b2[0x0A]) * costab[2];
    b1[0x0E] = (b2[0x0E] - b2[0x09]) * costab[1];
    b1[0x0F] = (b2[0x0F] - b2[0x08]) * costab[0];
    b1[0x10] = b2[0x10] + b2[0x17];
    b1[0x11] = b2[0x11] + b2[0x16];
    b1[0x12] = b2[0x12] + b2[0x15];
    b1[0x13] = b2[0x13] + b2[0x14];
    b1[0x14] = (b2[0x13] - b2[0x14]) * costab[3];
    b1[0x15] = (b2[0x12] - b2[0x15]) * costab[2];
    b1[0x16] = (b2[0x11] - b2[0x16]) * costab[1];
    b1[0x17] = (b2[0x10] - b2[0x17]) * costab[0];
    b1[0x18] = b2[0x18] + b2[0x1F];
    b1[0x19] = b2[0x19] + b2[0x1E];
    b1[0x1A] = b2[0x1A] + b2[0x1D];
    b1[0x1B] = b2[0x1B] + b2[0x1C];
    b1[0x1C] = (b2[0x1C] - b2[0x1B]) * costab[3];
    b1[0x1D] = (b2[0x1D] - b2[0x1A]) * costab[2];
    b1[0x1E] = (b2[0x1E] - b2[0x19]) * costab[1];
    b1[0x1F] = (b2[0x1F] - b2[0x18]) * costab[0];
  }

#else /* ALTIVEC_USE_REFERENCE_C_CODE */

  // How does it work ?
  // the first three passes are reproducted in the three block below
  // all computations are done on a 4 elements vector
  // 'reverse' is a special perumtation vector used to reverse
  // the order of the elements inside a vector.
  // note that all loads/stores to b1 (b2) between passes 1 and 2 (2 and 3)
  // have been removed, all elements are stored inside b1vX (b2vX)
  {
    register vector float
      b1v0, b1v1, b1v2, b1v3,
      b1v4, b1v5, b1v6, b1v7;
    register vector float
      temp1, temp2;

    {
      register real *costab = mp3lib_pnts[0];

      register vector float
        samplesv1, samplesv2, samplesv3, samplesv4,
        samplesv5, samplesv6, samplesv7, samplesv8,
        samplesv9;
      register vector unsigned char samples_perm = vec_lvsl(0, samples);
      register vector float costabv1, costabv2, costabv3, costabv4, costabv5;
      register vector unsigned char costab_perm = vec_lvsl(0, costab);

      samplesv1 = vec_ld(0, samples);
      samplesv2 = vec_ld(16, samples);
      samplesv1 = vec_perm(samplesv1, samplesv2, samples_perm);
      samplesv3 = vec_ld(32, samples);
      samplesv2 = vec_perm(samplesv2, samplesv3, samples_perm);
      samplesv4 = vec_ld(48, samples);
      samplesv3 = vec_perm(samplesv3, samplesv4, samples_perm);
      samplesv5 = vec_ld(64, samples);
      samplesv4 = vec_perm(samplesv4, samplesv5, samples_perm);
      samplesv6 = vec_ld(80, samples);
      samplesv5 = vec_perm(samplesv5, samplesv6, samples_perm);
      samplesv7 = vec_ld(96, samples);
      samplesv6 = vec_perm(samplesv6, samplesv7, samples_perm);
      samplesv8 = vec_ld(112, samples);
      samplesv7 = vec_perm(samplesv7, samplesv8, samples_perm);
      samplesv9 = vec_ld(128, samples);
      samplesv8 = vec_perm(samplesv8, samplesv9, samples_perm);

      temp1 = vec_add(samplesv1,
                      vec_perm(samplesv8, samplesv8, reverse));
      //vec_st(temp1, 0, b1);
      b1v0 = temp1;
      temp1 = vec_add(samplesv2,
                      vec_perm(samplesv7, samplesv7, reverse));
      //vec_st(temp1, 16, b1);
      b1v1 = temp1;
      temp1 = vec_add(samplesv3,
                      vec_perm(samplesv6, samplesv6, reverse));
      //vec_st(temp1, 32, b1);
      b1v2 = temp1;
      temp1 = vec_add(samplesv4,
                      vec_perm(samplesv5, samplesv5, reverse));
      //vec_st(temp1, 48, b1);
      b1v3 = temp1;

      costabv1 = vec_ld(0, costab);
      costabv2 = vec_ld(16, costab);
      costabv1 = vec_perm(costabv1, costabv2, costab_perm);
      costabv3 = vec_ld(32, costab);
      costabv2 = vec_perm(costabv2, costabv3, costab_perm);
      costabv4 = vec_ld(48, costab);
      costabv3 = vec_perm(costabv3, costabv4, costab_perm);
      costabv5 = vec_ld(64, costab);
      costabv4 = vec_perm(costabv4, costabv5, costab_perm);

      temp1 = vec_sub(vec_perm(samplesv4, samplesv4, reverse),
                      samplesv5);
      temp2 = vec_madd(temp1,
                       vec_perm(costabv4, costabv4, reverse),
                       vczero);
      //vec_st(temp2, 64, b1);
      b1v4 = temp2;

      temp1 = vec_sub(vec_perm(samplesv3, samplesv3, reverse),
                      samplesv6);
      temp2 = vec_madd(temp1,
                       vec_perm(costabv3, costabv3, reverse),
                       vczero);
      //vec_st(temp2, 80, b1);
      b1v5 = temp2;
      temp1 = vec_sub(vec_perm(samplesv2, samplesv2, reverse),
                      samplesv7);
      temp2 = vec_madd(temp1,
                       vec_perm(costabv2, costabv2, reverse),
                       vczero);
      //vec_st(temp2, 96, b1);
      b1v6 = temp2;

      temp1 = vec_sub(vec_perm(samplesv1, samplesv1, reverse),
                      samplesv8);
      temp2 = vec_madd(temp1,
                       vec_perm(costabv1, costabv1, reverse),
                       vczero);
      //vec_st(temp2, 112, b1);
      b1v7 = temp2;

    }

    {
      register vector float
        b2v0, b2v1, b2v2, b2v3,
        b2v4, b2v5, b2v6, b2v7;
      {
        register real *costab = mp3lib_pnts[1];
        register vector float costabv1r, costabv2r, costabv1, costabv2, costabv3;
        register vector unsigned char costab_perm = vec_lvsl(0, costab);

        costabv1 = vec_ld(0, costab);
        costabv2 = vec_ld(16, costab);
        costabv1 = vec_perm(costabv1, costabv2, costab_perm);
        costabv3  = vec_ld(32, costab);
        costabv2 = vec_perm(costabv2, costabv3 , costab_perm);
        costabv1r = vec_perm(costabv1, costabv1, reverse);
        costabv2r = vec_perm(costabv2, costabv2, reverse);

        temp1 = vec_add(b1v0, vec_perm(b1v3, b1v3, reverse));
        //vec_st(temp1, 0, b2);
        b2v0 = temp1;
        temp1 = vec_add(b1v1, vec_perm(b1v2, b1v2, reverse));
        //vec_st(temp1, 16, b2);
        b2v1 = temp1;
        temp2 = vec_sub(vec_perm(b1v1, b1v1, reverse), b1v2);
        temp1 = vec_madd(temp2, costabv2r, vczero);
        //vec_st(temp1, 32, b2);
        b2v2 = temp1;
        temp2 = vec_sub(vec_perm(b1v0, b1v0, reverse), b1v3);
        temp1 = vec_madd(temp2, costabv1r, vczero);
        //vec_st(temp1, 48, b2);
        b2v3 = temp1;
        temp1 = vec_add(b1v4, vec_perm(b1v7, b1v7, reverse));
        //vec_st(temp1, 64, b2);
        b2v4 = temp1;
        temp1 = vec_add(b1v5, vec_perm(b1v6, b1v6, reverse));
        //vec_st(temp1, 80, b2);
        b2v5 = temp1;
        temp2 = vec_sub(b1v6, vec_perm(b1v5, b1v5, reverse));
        temp1 = vec_madd(temp2, costabv2r, vczero);
        //vec_st(temp1, 96, b2);
        b2v6 = temp1;
        temp2 = vec_sub(b1v7, vec_perm(b1v4, b1v4, reverse));
        temp1 = vec_madd(temp2, costabv1r, vczero);
        //vec_st(temp1, 112, b2);
        b2v7 = temp1;
      }

      {
        register real *costab = mp3lib_pnts[2];


        vector float costabv1r, costabv1, costabv2;
        vector unsigned char costab_perm = vec_lvsl(0, costab);

        costabv1 = vec_ld(0, costab);
        costabv2 = vec_ld(16, costab);
        costabv1 = vec_perm(costabv1, costabv2, costab_perm);
        costabv1r = vec_perm(costabv1, costabv1, reverse);

        temp1 = vec_add(b2v0, vec_perm(b2v1, b2v1, reverse));
        vec_st(temp1, 0, b1);
        temp2 = vec_sub(vec_perm(b2v0, b2v0, reverse), b2v1);
        temp1 = vec_madd(temp2, costabv1r, vczero);
        vec_st(temp1, 16, b1);

        temp1 = vec_add(b2v2, vec_perm(b2v3, b2v3, reverse));
        vec_st(temp1, 32, b1);
        temp2 = vec_sub(b2v3, vec_perm(b2v2, b2v2, reverse));
        temp1 = vec_madd(temp2, costabv1r, vczero);
        vec_st(temp1, 48, b1);

        temp1 = vec_add(b2v4, vec_perm(b2v5, b2v5, reverse));
        vec_st(temp1, 64, b1);
        temp2 = vec_sub(vec_perm(b2v4, b2v4, reverse), b2v5);
        temp1 = vec_madd(temp2, costabv1r, vczero);
        vec_st(temp1, 80, b1);

        temp1 = vec_add(b2v6, vec_perm(b2v7, b2v7, reverse));
        vec_st(temp1, 96, b1);
        temp2 = vec_sub(b2v7, vec_perm(b2v6, b2v6, reverse));
        temp1 = vec_madd(temp2, costabv1r, vczero);
        vec_st(temp1, 112, b1);

      }
    }
  }

#endif /* ALTIVEC_USE_REFERENCE_C_CODE */

  {
    register real const cos0 = mp3lib_pnts[3][0];
    register real const cos1 = mp3lib_pnts[3][1];

    b2[0x00] = b1[0x00] + b1[0x03];
    b2[0x01] = b1[0x01] + b1[0x02];
    b2[0x02] = (b1[0x01] - b1[0x02]) * cos1;
    b2[0x03] = (b1[0x00] - b1[0x03]) * cos0;
    b2[0x04] = b1[0x04] + b1[0x07];
    b2[0x05] = b1[0x05] + b1[0x06];
    b2[0x06] = (b1[0x06] - b1[0x05]) * cos1;
    b2[0x07] = (b1[0x07] - b1[0x04]) * cos0;
    b2[0x08] = b1[0x08] + b1[0x0B];
    b2[0x09] = b1[0x09] + b1[0x0A];
    b2[0x0A] = (b1[0x09] - b1[0x0A]) * cos1;
    b2[0x0B] = (b1[0x08] - b1[0x0B]) * cos0;
    b2[0x0C] = b1[0x0C] + b1[0x0F];
    b2[0x0D] = b1[0x0D] + b1[0x0E];
    b2[0x0E] = (b1[0x0E] - b1[0x0D]) * cos1;
    b2[0x0F] = (b1[0x0F] - b1[0x0C]) * cos0;
    b2[0x10] = b1[0x10] + b1[0x13];
    b2[0x11] = b1[0x11] + b1[0x12];
    b2[0x12] = (b1[0x11] - b1[0x12]) * cos1;
    b2[0x13] = (b1[0x10] - b1[0x13]) * cos0;
    b2[0x14] = b1[0x14] + b1[0x17];
    b2[0x15] = b1[0x15] + b1[0x16];
    b2[0x16] = (b1[0x16] - b1[0x15]) * cos1;
    b2[0x17] = (b1[0x17] - b1[0x14]) * cos0;
    b2[0x18] = b1[0x18] + b1[0x1B];
    b2[0x19] = b1[0x19] + b1[0x1A];
    b2[0x1A] = (b1[0x19] - b1[0x1A]) * cos1;
    b2[0x1B] = (b1[0x18] - b1[0x1B]) * cos0;
    b2[0x1C] = b1[0x1C] + b1[0x1F];
    b2[0x1D] = b1[0x1D] + b1[0x1E];
    b2[0x1E] = (b1[0x1E] - b1[0x1D]) * cos1;
    b2[0x1F] = (b1[0x1F] - b1[0x1C]) * cos0;
  }

  {
    register real const cos0 = mp3lib_pnts[4][0];

    b1[0x00] = b2[0x00] + b2[0x01];
    b1[0x01] = (b2[0x00] - b2[0x01]) * cos0;
    b1[0x02] = b2[0x02] + b2[0x03];
    b1[0x03] = (b2[0x03] - b2[0x02]) * cos0;
    b1[0x02] += b1[0x03];

    b1[0x04] = b2[0x04] + b2[0x05];
    b1[0x05] = (b2[0x04] - b2[0x05]) * cos0;
    b1[0x06] = b2[0x06] + b2[0x07];
    b1[0x07] = (b2[0x07] - b2[0x06]) * cos0;
    b1[0x06] += b1[0x07];
    b1[0x04] += b1[0x06];
    b1[0x06] += b1[0x05];
    b1[0x05] += b1[0x07];

    b1[0x08] = b2[0x08] + b2[0x09];
    b1[0x09] = (b2[0x08] - b2[0x09]) * cos0;
    b1[0x0A] = b2[0x0A] + b2[0x0B];
    b1[0x0B] = (b2[0x0B] - b2[0x0A]) * cos0;
    b1[0x0A] += b1[0x0B];

    b1[0x0C] = b2[0x0C] + b2[0x0D];
    b1[0x0D] = (b2[0x0C] - b2[0x0D]) * cos0;
    b1[0x0E] = b2[0x0E] + b2[0x0F];
    b1[0x0F] = (b2[0x0F] - b2[0x0E]) * cos0;
    b1[0x0E] += b1[0x0F];
    b1[0x0C] += b1[0x0E];
    b1[0x0E] += b1[0x0D];
    b1[0x0D] += b1[0x0F];

    b1[0x10] = b2[0x10] + b2[0x11];
    b1[0x11] = (b2[0x10] - b2[0x11]) * cos0;
    b1[0x12] = b2[0x12] + b2[0x13];
    b1[0x13] = (b2[0x13] - b2[0x12]) * cos0;
    b1[0x12] += b1[0x13];

    b1[0x14] = b2[0x14] + b2[0x15];
    b1[0x15] = (b2[0x14] - b2[0x15]) * cos0;
    b1[0x16] = b2[0x16] + b2[0x17];
    b1[0x17] = (b2[0x17] - b2[0x16]) * cos0;
    b1[0x16] += b1[0x17];
    b1[0x14] += b1[0x16];
    b1[0x16] += b1[0x15];
    b1[0x15] += b1[0x17];

    b1[0x18] = b2[0x18] + b2[0x19];
    b1[0x19] = (b2[0x18] - b2[0x19]) * cos0;
    b1[0x1A] = b2[0x1A] + b2[0x1B];
    b1[0x1B] = (b2[0x1B] - b2[0x1A]) * cos0;
    b1[0x1A] += b1[0x1B];

    b1[0x1C] = b2[0x1C] + b2[0x1D];
    b1[0x1D] = (b2[0x1C] - b2[0x1D]) * cos0;
    b1[0x1E] = b2[0x1E] + b2[0x1F];
    b1[0x1F] = (b2[0x1F] - b2[0x1E]) * cos0;
    b1[0x1E] += b1[0x1F];
    b1[0x1C] += b1[0x1E];
    b1[0x1E] += b1[0x1D];
    b1[0x1D] += b1[0x1F];
  }

  out0[0x10*16] = b1[0x00];
  out0[0x10*12] = b1[0x04];
  out0[0x10* 8] = b1[0x02];
  out0[0x10* 4] = b1[0x06];
  out0[0x10* 0] = b1[0x01];
  out1[0x10* 0] = b1[0x01];
  out1[0x10* 4] = b1[0x05];
  out1[0x10* 8] = b1[0x03];
  out1[0x10*12] = b1[0x07];

  b1[0x08] += b1[0x0C];
  out0[0x10*14] = b1[0x08];
  b1[0x0C] += b1[0x0a];
  out0[0x10*10] = b1[0x0C];
  b1[0x0A] += b1[0x0E];
  out0[0x10* 6] = b1[0x0A];
  b1[0x0E] += b1[0x09];
  out0[0x10* 2] = b1[0x0E];
  b1[0x09] += b1[0x0D];
  out1[0x10* 2] = b1[0x09];
  b1[0x0D] += b1[0x0B];
  out1[0x10* 6] = b1[0x0D];
  b1[0x0B] += b1[0x0F];
  out1[0x10*10] = b1[0x0B];
  out1[0x10*14] = b1[0x0F];

  b1[0x18] += b1[0x1C];
  out0[0x10*15] = b1[0x10] + b1[0x18];
  out0[0x10*13] = b1[0x18] + b1[0x14];
  b1[0x1C] += b1[0x1a];
  out0[0x10*11] = b1[0x14] + b1[0x1C];
  out0[0x10* 9] = b1[0x1C] + b1[0x12];
  b1[0x1A] += b1[0x1E];
  out0[0x10* 7] = b1[0x12] + b1[0x1A];
  out0[0x10* 5] = b1[0x1A] + b1[0x16];
  b1[0x1E] += b1[0x19];
  out0[0x10* 3] = b1[0x16] + b1[0x1E];
  out0[0x10* 1] = b1[0x1E] + b1[0x11];
  b1[0x19] += b1[0x1D];
  out1[0x10* 1] = b1[0x11] + b1[0x19];
  out1[0x10* 3] = b1[0x19] + b1[0x15];
  b1[0x1D] += b1[0x1B];
  out1[0x10* 5] = b1[0x15] + b1[0x1D];
  out1[0x10* 7] = b1[0x1D] + b1[0x13];
  b1[0x1B] += b1[0x1F];
  out1[0x10* 9] = b1[0x13] + b1[0x1B];
  out1[0x10*11] = b1[0x1B] + b1[0x17];
  out1[0x10*13] = b1[0x17] + b1[0x1F];
  out1[0x10*15] = b1[0x1F];
}