Exemple #1
0
void mpir_ifft_trunc(mp_ptr * ii, mp_size_t n, mp_bitcnt_t w, 
                   mp_ptr * t1, mp_ptr * t2, mp_size_t trunc)
{
    mp_size_t i;
    mp_size_t limbs = (w*n)/GMP_LIMB_BITS;
   
    if (trunc == 2*n)
        mpir_ifft_radix2(ii, n, w, t1, t2);
    else if (trunc <= n)
    {
        mpir_ifft_trunc(ii, n/2, 2*w, t1, t2, trunc);

        for (i = 0; i < trunc; i++)
            mpn_add_n(ii[i], ii[i], ii[i], limbs + 1);
    } else
    {
        mpir_ifft_radix2(ii, n/2, 2*w, t1, t2);

        for (i = trunc - n; i < n; i++)
            mpir_fft_adjust(ii[i+n], ii[i], i, limbs, w);
        
        mpir_ifft_trunc1(ii+n, n/2, 2*w, t1, t2, trunc - n);

        for (i = 0; i < trunc - n; i++) 
        {   
            mpir_ifft_butterfly(*t1, *t2, ii[i], ii[n+i], i, limbs, w);
   
            MP_PTR_SWAP(ii[i],   *t1);
            MP_PTR_SWAP(ii[n+i], *t2);
        }

        for (i = trunc - n; i < n; i++)
            mpn_add_n(ii[i], ii[i], ii[i], limbs + 1);
    }
}
void mpir_fft_trunc1_twiddle(mp_ptr * ii, mp_size_t is,
      mp_size_t n, mp_bitcnt_t w, mp_ptr * t1, mp_ptr * t2,
      mp_size_t ws, mp_size_t r, mp_size_t c, mp_size_t rs, mp_size_t trunc)
{
   mp_size_t i;
   mp_size_t limbs = (w*n)/GMP_LIMB_BITS;
   
   if (trunc == 2*n)
      mpir_fft_radix2_twiddle(ii, is, n, w, t1, t2, ws, r, c, rs);
   else if (trunc <= n)
   {
      for (i = 0; i < n; i++)
         mpn_add_n(ii[i*is], ii[i*is], ii[(i+n)*is], limbs + 1);
      
      mpir_fft_trunc1_twiddle(ii, is, n/2, 2*w, t1, t2, ws, r, c, 2*rs, trunc);
   } else
   {
      for (i = 0; i < n; i++) 
      {   
         mpir_fft_butterfly(*t1, *t2, ii[i*is], ii[(n+i)*is], i, limbs, w);
   
         MP_PTR_SWAP(ii[i*is],     *t1);
         MP_PTR_SWAP(ii[(n+i)*is], *t2);
      }

      mpir_fft_radix2_twiddle(ii, is, n/2, 2*w, t1, t2, ws, r, c, 2*rs);  
      mpir_fft_trunc1_twiddle(ii + n*is, is, n/2, 2*w, 
                                     t1, t2, ws, r + rs, c, 2*rs, trunc - n);
   }
}
Exemple #3
0
void mpir_ifft_trunc1(mp_ptr * ii, mp_size_t n, mp_bitcnt_t w, 
                    mp_ptr * t1, mp_ptr * t2, mp_size_t trunc)
{
    mp_size_t i;
    mp_size_t limbs = (w*n)/GMP_LIMB_BITS;
    
    if (trunc == 2*n)
        mpir_ifft_radix2(ii, n, w, t1, t2);
    else if (trunc <= n)
    {
        for (i = trunc; i < n; i++)
        {
            mpn_add_n(ii[i], ii[i], ii[i+n], limbs + 1);
            mpn_div_2expmod_2expp1(ii[i], ii[i], limbs, 1);
        }
      
        mpir_ifft_trunc1(ii, n/2, 2*w, t1, t2, trunc);

        for (i = 0; i < trunc; i++)
        {
#if  HAVE_NATIVE_mpn_addsub_n
            mpn_addsub_n(ii[i], ii[i], ii[i], ii[n+i], limbs + 1);
#else
            mpn_add_n(ii[i], ii[i], ii[i], limbs + 1);
            mpn_sub_n(ii[i], ii[i], ii[n+i], limbs + 1);
#endif
        }
    } else
    {
        mpir_ifft_radix2(ii, n/2, 2*w, t1, t2);

        for (i = trunc - n; i < n; i++)
        {
            mpn_sub_n(ii[i+n], ii[i], ii[i+n], limbs + 1);
            mpir_fft_adjust(*t1, ii[i+n], i, limbs, w);
            mpn_add_n(ii[i], ii[i], ii[i+n], limbs + 1);
            MP_PTR_SWAP(ii[i+n], *t1);
        }
   
        mpir_ifft_trunc1(ii+n, n/2, 2*w, t1, t2, trunc - n);

        for (i = 0; i < trunc - n; i++) 
        {   
            mpir_ifft_butterfly(*t1, *t2, ii[i], ii[n+i], i, limbs, w);
   
            MP_PTR_SWAP(ii[i],   *t1);
            MP_PTR_SWAP(ii[n+i], *t2);
        }
    }
}
Exemple #4
0
static __inline__ void
_20141102_shift_rows(tmod_mat_t R, tmod_mat_t Q, mp_limb_t* P, slong n, 
  int P_parity)
// multiply R by permutation matrice on the left. Use Q as scratch
 {
  // if P_parity is negative then P is not identity
  if( (1==P_parity) && is_identity_permutation(P,n) )
   return;
  {
   mp_limb_t** C=Q->rows;
   mp_limb_t** B=R->rows;
   memcpy(C,B,n*sizeof(mp_limb_t**));
   slong i;
   for(i=0;i<n;i++)
    B[ P[i] ]=C[ i ];
   // C destroyed
   tmod_mat_virginize(Q);
   // C restored to virgin state
   slong size=n*sizeof(mp_limb_t);
   for(i=0;i<n;i++)
    memcpy(C[i],B[i],size);
   // matrix contents copied
   R->rows=C; Q->rows=B;
  }
  {
   MP_PTR_SWAP(R->entries,Q->entries);
  }
 }
void mpir_fft_radix2_twiddle(mp_ptr * ii, mp_size_t is,
      mp_size_t n, mp_bitcnt_t w, mp_ptr * t1, mp_ptr * t2,
      mp_size_t ws, mp_size_t r, mp_size_t c, mp_size_t rs)
{
   mp_size_t i;
   mp_size_t limbs;
start:
   limbs = (w*n)/GMP_LIMB_BITS;
   
   if (n == 1) 
   {
      mp_size_t tw1 = r*c;
      mp_size_t tw2 = tw1 + rs*c;

      mpir_fft_butterfly_twiddle(*t1, *t2, ii[0], ii[is], limbs, tw1*ws, tw2*ws);
      MP_PTR_SWAP(ii[0],  *t1);
      MP_PTR_SWAP(ii[is], *t2);

      return;
   }

   for (i = 0; i < n; i++) 
   {   
      mpir_fft_butterfly(*t1, *t2, ii[i*is], ii[(n+i)*is], i, limbs, w);
   
      MP_PTR_SWAP(ii[i*is],     *t1);
      MP_PTR_SWAP(ii[(n+i)*is], *t2);
   }

   mpir_fft_radix2_twiddle(ii, is, n/2, 2*w, t1, t2, ws, r, c, 2*rs);
#if 0
   ii += n * is;
   n /= 2;
   w += w;
   r += rs;
   rs += rs;
   goto start;
#else
   mpir_fft_radix2_twiddle(ii+n*is, is, n/2, 2*w, t1, t2, ws, r + rs, c, 2*rs);
#endif
}
Exemple #6
0
void
mpq_inv (mpq_ptr dest, mpq_srcptr src)
{
  mp_size_t num_size = SIZ(NUM(src));
  mp_size_t den_size = SIZ(DEN(src));

  if (num_size < 0)
    {
      num_size = -num_size;
      den_size = -den_size;
    }
  else if (UNLIKELY (num_size == 0))
    DIVIDE_BY_ZERO;

  SIZ(DEN(dest)) = num_size;
  SIZ(NUM(dest)) = den_size;

  /* If dest == src we may just swap the numerator and denominator;
     we ensured that the new denominator is positive.  */

  if (dest == src)
    {
      MP_PTR_SWAP (PTR(NUM(dest)), PTR(DEN(dest)));
      MP_SIZE_T_SWAP (ALLOC(NUM(dest)), ALLOC(DEN(dest)));
    }
  else
    {
      mp_ptr dp;

      den_size = ABS (den_size);
      dp = MPZ_NEWALLOC (NUM(dest), den_size);
      MPN_COPY (dp, PTR(DEN(src)), den_size);

      dp = MPZ_NEWALLOC (DEN(dest), num_size);
      MPN_COPY (dp, PTR(NUM(src)), num_size);
    }
}
void
mpn_toom_interpolate_12pts (mp_ptr pp, mp_ptr r1, mp_ptr r3, mp_ptr r5,
			mp_size_t n, mp_size_t spt, int half, mp_ptr wsi)
{
  mp_limb_t cy;
  mp_size_t n3;
  mp_size_t n3p1;
  n3 = 3 * n;
  n3p1 = n3 + 1;

#define   r4    (pp + n3)			/* 3n+1 */
#define   r2    (pp + 7 * n)			/* 3n+1 */
#define   r0    (pp +11 * n)			/* s+t <= 2*n */

  /******************************* interpolation *****************************/
  if (half != 0) {
    cy = mpn_sub_n (r3, r3, r0, spt);
    MPN_DECR_U (r3 + spt, n3p1 - spt, cy);

    cy = DO_mpn_sublsh_n (r2, r0, spt, 10, wsi);
    MPN_DECR_U (r2 + spt, n3p1 - spt, cy);
    DO_mpn_subrsh(r5, n3p1, r0, spt, 2, wsi);

    cy = DO_mpn_sublsh_n (r1, r0, spt, 20, wsi);
    MPN_DECR_U (r1 + spt, n3p1 - spt, cy);
    DO_mpn_subrsh(r4, n3p1, r0, spt, 4, wsi);
  };

  r4[n3] -= DO_mpn_sublsh_n (r4 + n, pp, 2 * n, 20, wsi);
  DO_mpn_subrsh(r1 + n, 2 * n + 1, pp, 2 * n, 4, wsi);

#if HAVE_NATIVE_mpn_add_n_sub_n
  mpn_add_n_sub_n (r1, r4, r4, r1, n3p1);
#else
  ASSERT_NOCARRY(mpn_add_n (wsi, r1, r4, n3p1));
  mpn_sub_n (r4, r4, r1, n3p1); /* can be negative */
  MP_PTR_SWAP(r1, wsi);
#endif

  r5[n3] -= DO_mpn_sublsh_n (r5 + n, pp, 2 * n, 10, wsi);
  DO_mpn_subrsh(r2 + n, 2 * n + 1, pp, 2 * n, 2, wsi);

#if HAVE_NATIVE_mpn_add_n_sub_n
  mpn_add_n_sub_n (r2, r5, r5, r2, n3p1);
#else
  mpn_sub_n (wsi, r5, r2, n3p1); /* can be negative */
  ASSERT_NOCARRY(mpn_add_n (r2, r2, r5, n3p1));
  MP_PTR_SWAP(r5, wsi);
#endif

  r3[n3] -= mpn_sub_n (r3+n, r3+n, pp, 2 * n);

#if AORSMUL_FASTER_AORS_AORSLSH
  mpn_submul_1 (r4, r5, n3p1, 257); /* can be negative */
#else
  mpn_sub_n (r4, r4, r5, n3p1); /* can be negative */
  DO_mpn_sublsh_n (r4, r5, n3p1, 8, wsi); /* can be negative */
#endif
  /* A division by 2835x4 follows. Warning: the operand can be negative! */
  mpn_divexact_by2835x4(r4, r4, n3p1);
  if ((r4[n3] & (GMP_NUMB_MAX << (GMP_NUMB_BITS-3))) != 0)
    r4[n3] |= (GMP_NUMB_MAX << (GMP_NUMB_BITS-2));

#if AORSMUL_FASTER_2AORSLSH
  mpn_addmul_1 (r5, r4, n3p1, 60); /* can be negative */
#else
  DO_mpn_sublsh_n (r5, r4, n3p1, 2, wsi); /* can be negative */
  DO_mpn_addlsh_n (r5, r4, n3p1, 6, wsi); /* can give a carry */
#endif
  mpn_divexact_by255(r5, r5, n3p1);

  ASSERT_NOCARRY(DO_mpn_sublsh_n (r2, r3, n3p1, 5, wsi));

#if AORSMUL_FASTER_3AORSLSH
  ASSERT_NOCARRY(mpn_submul_1 (r1, r2, n3p1, 100));
#else
  ASSERT_NOCARRY(DO_mpn_sublsh_n (r1, r2, n3p1, 6, wsi));
  ASSERT_NOCARRY(DO_mpn_sublsh_n (r1, r2, n3p1, 5, wsi));
  ASSERT_NOCARRY(DO_mpn_sublsh_n (r1, r2, n3p1, 2, wsi));
#endif
  ASSERT_NOCARRY(DO_mpn_sublsh_n (r1, r3, n3p1, 9, wsi));
  mpn_divexact_by42525(r1, r1, n3p1);

#if AORSMUL_FASTER_AORS_2AORSLSH
  ASSERT_NOCARRY(mpn_submul_1 (r2, r1, n3p1, 225));
#else
  ASSERT_NOCARRY(mpn_sub_n (r2, r2, r1, n3p1));
  ASSERT_NOCARRY(DO_mpn_addlsh_n (r2, r1, n3p1, 5, wsi));
  ASSERT_NOCARRY(DO_mpn_sublsh_n (r2, r1, n3p1, 8, wsi));
#endif
  mpn_divexact_by9x4(r2, r2, n3p1);

  ASSERT_NOCARRY(mpn_sub_n (r3, r3, r2, n3p1));

  mpn_sub_n (r4, r2, r4, n3p1);
  ASSERT_NOCARRY(mpn_rshift(r4, r4, n3p1, 1));
  ASSERT_NOCARRY(mpn_sub_n (r2, r2, r4, n3p1));

  mpn_add_n (r5, r5, r1, n3p1);
  ASSERT_NOCARRY(mpn_rshift(r5, r5, n3p1, 1));

  /* last interpolation steps... */
  ASSERT_NOCARRY(mpn_sub_n (r3, r3, r1, n3p1));
  ASSERT_NOCARRY(mpn_sub_n (r1, r1, r5, n3p1));
  /* ... could be mixed with recomposition
	||H-r5|M-r5|L-r5|   ||H-r1|M-r1|L-r1|
  */

  /***************************** recomposition *******************************/
  /*
    pp[] prior to operations:
    |M r0|L r0|___||H r2|M r2|L r2|___||H r4|M r4|L r4|____|H_r6|L r6|pp

    summation scheme for remaining operations:
    |__12|n_11|n_10|n__9|n__8|n__7|n__6|n__5|n__4|n__3|n__2|n___|n___|pp
    |M r0|L r0|___||H r2|M r2|L r2|___||H r4|M r4|L r4|____|H_r6|L r6|pp
	||H r1|M r1|L r1|   ||H r3|M r3|L r3|   ||H_r5|M_r5|L_r5|
  */

  cy = mpn_add_n (pp + n, pp + n, r5, n);
  cy = mpn_add_1 (pp + 2 * n, r5 + n, n, cy);
#if HAVE_NATIVE_mpn_add_nc
  cy = r5[n3] + mpn_add_nc(pp + n3, pp + n3, r5 + 2 * n, n, cy);
#else
  MPN_INCR_U (r5 + 2 * n, n + 1, cy);
  cy = r5[n3] + mpn_add_n (pp + n3, pp + n3, r5 + 2 * n, n);
#endif
  MPN_INCR_U (pp + n3 + n, 2 * n + 1, cy);

  pp[2 * n3]+= mpn_add_n (pp + 5 * n, pp + 5 * n, r3, n);
  cy = mpn_add_1 (pp + 2 * n3, r3 + n, n, pp[2 * n3]);
#if HAVE_NATIVE_mpn_add_nc
  cy = r3[n3] + mpn_add_nc(pp + 7 * n, pp + 7 * n, r3 + 2 * n, n, cy);
#else
  MPN_INCR_U (r3 + 2 * n, n + 1, cy);
  cy = r3[n3] + mpn_add_n (pp + 7 * n, pp + 7 * n, r3 + 2 * n, n);
#endif
  MPN_INCR_U (pp + 8 * n, 2 * n + 1, cy);

  pp[10*n]+=mpn_add_n (pp + 9 * n, pp + 9 * n, r1, n);
  if (half) {
    cy = mpn_add_1 (pp + 10 * n, r1 + n, n, pp[10 * n]);
#if HAVE_NATIVE_mpn_add_nc
    if (LIKELY (spt > n)) {
      cy = r1[n3] + mpn_add_nc(pp + 11 * n, pp + 11 * n, r1 + 2 * n, n, cy);
      MPN_INCR_U (pp + 4 * n3, spt - n, cy);
    } else {
      ASSERT_NOCARRY(mpn_add_nc(pp + 11 * n, pp + 11 * n, r1 + 2 * n, spt, cy));
    }
#else
    MPN_INCR_U (r1 + 2 * n, n + 1, cy);
    if (LIKELY (spt > n)) {
      cy = r1[n3] + mpn_add_n (pp + 11 * n, pp + 11 * n, r1 + 2 * n, n);
      MPN_INCR_U (pp + 4 * n3, spt - n, cy);
    } else {
      ASSERT_NOCARRY(mpn_add_n (pp + 11 * n, pp + 11 * n, r1 + 2 * n, spt));
    }
#endif
  } else {
    ASSERT_NOCARRY(mpn_add_1 (pp + 10 * n, r1 + n, spt, pp[10 * n]));
  }

#undef   r0
#undef   r2
#undef   r4
}
Exemple #8
0
/* Perform a few steps, using some of mpn_nhgcd2, subtraction and division.
   Reduces the size by almost one limb or more, but never below the
   given size s. Return new size for a and b, or 0 if no more steps
   are possible. M = NULL is allowed, if M is not needed.

   Needs temporary space for division, n + 1 limbs, and for
   ngcd_matrix1_vector, n limbs. */
mp_size_t
mpn_ngcd_step (mp_size_t n, mp_ptr ap, mp_ptr bp, mp_size_t s,
	       struct ngcd_matrix *M, mp_ptr tp)
{
  struct ngcd_matrix1 M1;
  mp_limb_t mask;
  mp_limb_t ah, al, bh, bl;
  mp_size_t an, bn, qn;
  mp_ptr qp;
  mp_ptr rp;
  int col;

  ASSERT (n > s);

  mask = ap[n-1] | bp[n-1];
  ASSERT (mask > 0);

  if (n == s + 1)
    {
      if (mask < 4)
	goto subtract;

      ah = ap[n-1]; al = ap[n-2];
      bh = bp[n-1]; bl = bp[n-2];
    }
  else if (mask & GMP_NUMB_HIGHBIT)
    {
      ah = ap[n-1]; al = ap[n-2];
      bh = bp[n-1]; bl = bp[n-2];
    }
  else
    {
      int shift;

      count_leading_zeros (shift, mask);
      ah = MPN_EXTRACT_LIMB (shift, ap[n-1], ap[n-2]);
      al = MPN_EXTRACT_LIMB (shift, ap[n-2], ap[n-3]);
      bh = MPN_EXTRACT_LIMB (shift, bp[n-1], bp[n-2]);
      bl = MPN_EXTRACT_LIMB (shift, bp[n-2], bp[n-3]);
    }

  /* Try an mpn_nhgcd2 step */
  if (mpn_nhgcd2 (ah, al, bh, bl, &M1))
    {
      /* Multiply M <- M * M1 */
      if (M)
	ngcd_matrix_mul_1 (M, &M1);

      /* Multiply M1^{-1} (a;b) */
      return mpn_ngcd_matrix1_vector (&M1, n, ap, bp, tp);
    }

 subtract:
  /* There are two ways in which mpn_nhgcd2 can fail. Either one of ah and
     bh was too small, or ah, bh were (almost) equal. Perform one
     subtraction step (for possible cancellation of high limbs),
     followed by one division. */

  /* Since we must ensure that #(a-b) > s, we handle cancellation of
     high limbs explicitly up front. (FIXME: Or is it better to just
     subtract, normalize, and use an addition to undo if it turns out
     the the difference is too small?) */
  for (an = n; an > s; an--)
    if (ap[an-1] != bp[an-1])
      break;

  if (an == s)
    return 0;

  /* Maintain a > b. When needed, swap a and b, and let col keep track
     of how to update M. */
  if (ap[an-1] > bp[an-1])
    {
      /* a is largest. In the subtraction step, we need to update
	 column 1 of M */
      col = 1;
    }
  else
    {
      MP_PTR_SWAP (ap, bp);
      col = 0;
    }

  bn = n;
  MPN_NORMALIZE (bp, bn);  
  if (bn <= s)
    return 0;
  
  /* We have #a, #b > s. When is it possible that #(a-b) < s? For
     cancellation to happen, the numbers must be of the form

       a = x + 1, 0,            ..., 0,            al
       b = x    , GMP_NUMB_MAX, ..., GMP_NUMB_MAX, bl

     where al, bl denotes the least significant k limbs. If al < bl,
     then #(a-b) < k, and if also high(al) != 0, high(bl) != GMP_NUMB_MAX,
     then #(a-b) = k. If al >= bl, then #(a-b) = k + 1. */

  if (ap[an-1] == bp[an-1] + 1)
    {
      mp_size_t k;
      int c;
      for (k = an-1; k > s; k--)
	if (ap[k-1] != 0 || bp[k-1] != GMP_NUMB_MAX)
	  break;

      MPN_CMP (c, ap, bp, k);
      if (c < 0)
	{
	  mp_limb_t cy;
	  
	  /* The limbs from k and up are cancelled. */
	  if (k == s)
	    return 0;
	  cy = mpn_sub_n (ap, ap, bp, k);
	  ASSERT (cy == 1);
	  an = k;
	}
      else
	{
	  ASSERT_NOCARRY (mpn_sub_n (ap, ap, bp, k));
	  ap[k] = 1;
	  an = k + 1;
	}
    }
  else
    ASSERT_NOCARRY (mpn_sub_n (ap, ap, bp, an));
  
  ASSERT (an > s);
  ASSERT (ap[an-1] > 0);
  ASSERT (bn > s);
  ASSERT (bp[bn-1] > 0);
  
  if (M)
    ngcd_matrix_update_1 (M, col);

  if (an < bn)
    {
      MPN_PTR_SWAP (ap, an, bp, bn);
      col ^= 1;
    }
  else if (an == bn)
    {
      int c;
      MPN_CMP (c, ap, bp, an);
      if (c < 0)
	{
	  MP_PTR_SWAP (ap, bp);
	  col ^= 1;
	}
    }

  /* Divide a / b. Store first the quotient (qn limbs) and then the
     remainder (bn limbs) starting at tp. */
  qn = an + 1 - bn;
  qp = tp;
  rp = tp + qn;

  /* FIXME: We could use an approximate division, that may return a
     too small quotient, and only guarantess that the size of r is
     almost the size of b. */
  mpn_tdiv_qr (qp, rp, 0, ap, an, bp, bn);
  qn -= (qp[qn -1] == 0);

  /* Normalize remainder */
  an = bn;
  for ( ; an > s; an--)
    if (rp[an-1] > 0)
      break;

  if (an > s)
    /* Include leading zero limbs */
    MPN_COPY (ap, rp, bn);
  else
    {
      /* Quotient is too large */
      mp_limb_t cy;

      cy = mpn_add (ap, bp, bn, rp, an);

      if (cy > 0)
	{
	  ASSERT (bn < n);
	  ap[bn] = cy;
	  bp[bn] = 0;
	  bn++;
	}

      MPN_DECR_U (qp, qn, 1);
      qn -= (qp[qn-1] == 0);
    }

  if (qn > 0 && M)
    ngcd_matrix_update_q (M, qp, qn, col);

  return bn;
}
Exemple #9
0
mp_size_t
mpn_pow_1 (mp_ptr rp, mp_srcptr bp, mp_size_t bn, mp_limb_t exp, mp_ptr tp)
{
    mp_limb_t x;
    int cnt, i;
    mp_size_t rn;
    int par;

    if (exp <= 1)
    {
        if (exp == 0)
        {
            rp[0] = 1;
            return 1;
        }
        else
        {
            MPN_COPY (rp, bp, bn);
            return bn;
        }
    }

    /* Count number of bits in exp, and compute where to put initial square in
       order to magically get results in the entry rp.  Use simple code,
       optimized for small exp.  For large exp, the bignum operations will take
       so much time that the slowness of this code will be negligible.  */
    par = 0;
    cnt = GMP_LIMB_BITS;
    for (x = exp; x != 0; x >>= 1)
    {
        par ^= x & 1;
        cnt--;
    }
    exp <<= cnt;

    if (bn == 1)
    {
        mp_limb_t bl = bp[0];

        if ((cnt & 1) != 0)
            MP_PTR_SWAP (rp, tp);

        mpn_sqr_n (rp, bp, bn);
        rn = 2 * bn;
        rn -= rp[rn - 1] == 0;

        for (i = GMP_LIMB_BITS - cnt - 1;;)
        {
            exp <<= 1;
            if ((exp & GMP_LIMB_HIGHBIT) != 0)
            {
                rp[rn] = mpn_mul_1 (rp, rp, rn, bl);
                rn += rp[rn] != 0;
            }

            if (--i == 0)
                break;

            mpn_sqr_n (tp, rp, rn);
            rn = 2 * rn;
            rn -= tp[rn - 1] == 0;
            MP_PTR_SWAP (rp, tp);
        }
    }
    else
    {
        if (((par ^ cnt) & 1) == 0)
            MP_PTR_SWAP (rp, tp);

        mpn_sqr_n (rp, bp, bn);
        rn = 2 * bn;
        rn -= rp[rn - 1] == 0;

        for (i = GMP_LIMB_BITS - cnt - 1;;)
        {
            exp <<= 1;
            if ((exp & GMP_LIMB_HIGHBIT) != 0)
            {
                rn = rn + bn - (mpn_mul (tp, rp, rn, bp, bn) == 0);
                MP_PTR_SWAP (rp, tp);
            }

            if (--i == 0)
                break;

            mpn_sqr_n (tp, rp, rn);
            rn = 2 * rn;
            rn -= tp[rn - 1] == 0;
            MP_PTR_SWAP (rp, tp);
        }
    }

    return rn;
}
/* Temporary storage: Needs n limbs for the quotient, at qp. tp must
   point to an area large enough for the resulting cofactor, plus one
   limb extra. All in all, 2N + 1 if N is a bound for both inputs and
   outputs. */
mp_size_t
mpn_gcdext_subdiv_step (mp_ptr gp, mp_size_t *gn, mp_ptr up, mp_size_t *usizep,
			mp_ptr ap, mp_ptr bp, mp_size_t n,
			mp_ptr u0, mp_ptr u1, mp_size_t *unp,
			mp_ptr qp, mp_ptr tp)
{
  mp_size_t an, bn, un;
  mp_size_t qn;
  mp_size_t u0n;

  int swapped;

  an = bn = n;

  ASSERT (an > 0);
  ASSERT (ap[an-1] > 0 || bp[an-1] > 0);

  MPN_NORMALIZE (ap, an);
  MPN_NORMALIZE (bp, bn);

  un = *unp;

  swapped = 0;

  if (UNLIKELY (an == 0))
    {
    return_b:
      MPN_COPY (gp, bp, bn);
      *gn = bn;

      MPN_NORMALIZE (u0, un);
      MPN_COPY (up, u0, un);

      *usizep = swapped ? un : -un;

      return 0;
    }
  else if (UNLIKELY (bn == 0))
    {
      MPN_COPY (gp, ap, an);
      *gn = an;

      MPN_NORMALIZE (u1, un);
      MPN_COPY (up, u1, un);

      *usizep = swapped ? -un : un;

      return 0;
    }

  /* Arrange so that a > b, subtract an -= bn, and maintain
     normalization. */
  if (an < bn)
    {
      MPN_PTR_SWAP (ap, an, bp, bn);
      MP_PTR_SWAP (u0, u1);
      swapped ^= 1;
    }
  else if (an == bn)
    {
      int c;
      MPN_CMP (c, ap, bp, an);
      if (UNLIKELY (c == 0))
	{
	  MPN_COPY (gp, ap, an);
	  *gn = an;

	  /* Must return the smallest cofactor, +u1 or -u0 */
	  MPN_CMP (c, u0, u1, un);
	  ASSERT (c != 0 || (un == 1 && u0[0] == 1 && u1[0] == 1));

	  if (c < 0)
	    {
	      MPN_NORMALIZE (u0, un);
	      MPN_COPY (up, u0, un);
	      swapped ^= 1;
	    }
	  else
	    {
	      MPN_NORMALIZE_NOT_ZERO (u1, un);
	      MPN_COPY (up, u1, un);
	    }

	  *usizep = swapped ? -un : un;
	  return 0;
	}
      else if (c < 0)
	{
	  MP_PTR_SWAP (ap, bp);
	  MP_PTR_SWAP (u0, u1);
	  swapped ^= 1;
	}
    }
  /* Reduce a -= b, u1 += u0 */
  ASSERT_NOCARRY (mpn_sub (ap, ap, an, bp, bn));
  MPN_NORMALIZE (ap, an);
  ASSERT (an > 0);

  u1[un] = mpn_add_n (u1, u1, u0, un);
  un += (u1[un] > 0);

  /* Arrange so that a > b, and divide a = q b + r */
  if (an < bn)
    {
      MPN_PTR_SWAP (ap, an, bp, bn);
      MP_PTR_SWAP (u0, u1);
      swapped ^= 1;
    }
  else if (an == bn)
    {
      int c;
      MPN_CMP (c, ap, bp, an);
      if (UNLIKELY (c == 0))
	goto return_b;
      else if (c < 0)
	{
	  MP_PTR_SWAP (ap, bp);
	  MP_PTR_SWAP (u0, u1);
	  swapped ^= 1;
	}
    }

  /* Reduce a -= q b, u1 += q u0 */
  qn = an - bn + 1;
  mpn_tdiv_qr (qp, ap, 0, ap, an, bp, bn);

  if (mpn_zero_p (ap, bn))
    goto return_b;

  n = bn;

  /* Update u1 += q u0 */
  u0n = un;
  MPN_NORMALIZE (u0, u0n);

  if (u0n > 0)
    {
      qn -= (qp[qn - 1] == 0);

      if (qn > u0n)
	mpn_mul (tp, qp, qn, u0, u0n);
      else
	mpn_mul (tp, u0, u0n, qp, qn);

      if (qn + u0n > un)
	{
	  mp_size_t u1n = un;
	  un = qn + u0n;
	  un -= (tp[un-1] == 0);
	  u1[un] = mpn_add (u1, tp, un, u1, u1n);
	}
      else
	{
	  u1[un] = mpn_add (u1, u1, un, tp, qn + u0n);
	}

      un += (u1[un] > 0);
    }

  *unp = un;
  return n;
}
Exemple #11
0
mp_bitcnt_t
mpn_remove (mp_ptr wp, mp_size_t *wn,
	    mp_ptr up, mp_size_t un, mp_ptr vp, mp_size_t vn,
	    mp_bitcnt_t cap)
{
  mp_ptr    pwpsp[LOG];
  mp_size_t pwpsn[LOG];
  mp_size_t npowers;
  mp_ptr tp, qp, np, pp, qp2;
  mp_size_t pn, nn, qn, i;
  mp_bitcnt_t pwr;
  TMP_DECL;

  ASSERT (un > 0);
  ASSERT (vn > 0);
  ASSERT (vp[0] % 2 != 0);	/* 2-adic division wants odd numbers */
  ASSERT (vn > 1 || vp[0] > 1);	/* else we would loop indefinitely */

  TMP_MARK;

  tp = TMP_ALLOC_LIMBS ((un + 1 + vn) / 2); /* remainder */
  qp = TMP_ALLOC_LIMBS (un + 1);	/* quotient, alternating */
  qp2 = TMP_ALLOC_LIMBS (un + 1);	/* quotient, alternating */
  np = TMP_ALLOC_LIMBS (un + LOG);	/* powers of V */
  pp = vp;
  pn = vn;

  MPN_COPY (qp, up, un);
  qn = un;

  npowers = 0;
  while (qn >= pn)
    {
      qp[qn] = 0;
      mpn_bdiv_qr_wrap (qp2, tp, qp, qn + 1, pp, pn);
      if (!mpn_zero_p (tp, pn))
	break;			/* could not divide by V^npowers */

      MP_PTR_SWAP (qp, qp2);
      qn = qn - pn;
      qn += qp[qn] != 0;

      pwpsp[npowers] = pp;
      pwpsn[npowers] = pn;
      npowers++;

      if (((mp_bitcnt_t) 2 << npowers) - 1 > cap)
	break;

      nn = 2 * pn - 1;		/* next power will be at least this large */
      if (nn > qn)
	break;			/* next power would be overlarge */

      mpn_sqr (np, pp, pn);
      nn += np[nn] != 0;
      pp = np;
      pn = nn;
      np += nn;
    }

  pwr = ((mp_bitcnt_t) 1 << npowers) - 1;

  for (i = npowers - 1; i >= 0; i--)
    {
      pp = pwpsp[i];
      pn = pwpsn[i];
      if (qn < pn)
	continue;

      if (pwr + ((mp_bitcnt_t) 1 << i) > cap)
	continue;		/* V^i would bring us past cap */

      qp[qn] = 0;
      mpn_bdiv_qr_wrap (qp2, tp, qp, qn + 1, pp, pn);
      if (!mpn_zero_p (tp, pn))
	continue;		/* could not divide by V^i */

      MP_PTR_SWAP (qp, qp2);
      qn = qn - pn;
      qn += qp[qn] != 0;

      pwr += (mp_bitcnt_t) 1 << i;
    }

  MPN_COPY (wp, qp, qn);
  *wn = qn;

  TMP_FREE;

  return pwr;
}
void mpir_fft_mfa_trunc_sqrt2_outer(mp_ptr * ii, mp_size_t n, 
                   mp_bitcnt_t w, mp_ptr * t1, mp_ptr * t2, 
                             mp_ptr * temp, mp_size_t n1, mp_size_t trunc)
{
   mp_size_t i, j;
   mp_size_t n2 = (2*n)/n1;
   mp_size_t trunc2 = (trunc - 2*n)/n1;
   mp_size_t limbs = (n*w)/GMP_LIMB_BITS;
   mp_bitcnt_t depth = 0;
   mp_bitcnt_t depth2 = 0;
   
   while ((((mp_size_t)1)<<depth) < n2) depth++;
   while ((((mp_size_t)1)<<depth2) < n1) depth2++;

   /* first half matrix fourier FFT : n2 rows, n1 cols */
   
   /* FFTs on columns */
   for (i = 0; i < n1; i++)
   {   
      /* relevant part of first layer of full sqrt2 FFT */
      if (w & 1)
      {
         for (j = i; j < trunc - 2*n; j+=n1) 
         {   
            if (j & 1)
               mpir_fft_butterfly_sqrt2(*t1, *t2, ii[j], ii[2*n+j], j, limbs, w, *temp);
            else
               mpir_fft_butterfly(*t1, *t2, ii[j], ii[2*n+j], j/2, limbs, w);     

            MP_PTR_SWAP(ii[j],     *t1);
            MP_PTR_SWAP(ii[2*n+j], *t2);
         }

         for ( ; j < 2*n; j+=n1)
         {
             if (i & 1)
                mpir_fft_adjust_sqrt2(ii[j + 2*n], ii[j], j, limbs, w, *temp); 
             else
                mpir_fft_adjust(ii[j + 2*n], ii[j], j/2, limbs, w); 
         }
      } else
      {
         for (j = i; j < trunc - 2*n; j+=n1) 
         {   
            mpir_fft_butterfly(*t1, *t2, ii[j], ii[2*n+j], j, limbs, w/2);
   
            MP_PTR_SWAP(ii[j],     *t1);
            MP_PTR_SWAP(ii[2*n+j], *t2);
         }

         for ( ; j < 2*n; j+=n1)
            mpir_fft_adjust(ii[j + 2*n], ii[j], j, limbs, w/2);
      }
   
      /* 
         FFT of length n2 on column i, applying z^{r*i} for rows going up in steps 
         of 1 starting at row 0, where z => w bits
      */
      
      mpir_fft_radix2_twiddle(ii + i, n1, n2/2, w*n1, t1, t2, w, 0, i, 1);
      for (j = 0; j < n2; j++)
      {
         mp_size_t s = mpir_revbin(j, depth);
         if (j < s) MP_PTR_SWAP(ii[i+j*n1], ii[i+s*n1]);
      }
   }
      
   /* second half matrix fourier FFT : n2 rows, n1 cols */
   ii += 2*n;

   /* FFTs on columns */
   for (i = 0; i < n1; i++)
   {   
      /*
         FFT of length n2 on column i, applying z^{r*i} for rows going up in steps 
         of 1 starting at row 0, where z => w bits
      */
      
      mpir_fft_trunc1_twiddle(ii + i, n1, n2/2, w*n1, t1, t2, w, 0, i, 1, trunc2);
      for (j = 0; j < n2; j++)
      {
         mp_size_t s = mpir_revbin(j, depth);
         if (j < s) MP_PTR_SWAP(ii[i+j*n1], ii[i+s*n1]);
      }
   }
}
Exemple #13
0
mp_size_t
mpn_ngcdext_subdiv_step (mp_ptr gp, mp_size_t *gn, mp_ptr s0p, mp_ptr u0, mp_ptr u1, 
		             mp_size_t *un, mp_ptr ap, mp_ptr bp, mp_size_t n, mp_ptr tp)
{
  /* Called when nhgcd or mpn_nhgcd2 has failed. Then either one of a or b
     is very small, or the difference is very small. Perform one
     subtraction followed by one division. */

  mp_size_t an, bn, cy, qn, qn2, u0n, u1n;
  int negate = 0;
  int c;

  ASSERT (n > 0);
  ASSERT (ap[n-1] > 0 || bp[n-1] > 0);

  /* See to what extend ap and bp are the same */
  for (an = n; an > 0; an--)
    if (ap[an-1] != bp[an-1])
      break;

  if (an == 0)
    {
      /* ap OR bp is the gcd, two possible normalisations
	     u1 or -u0, pick the smallest
	  */
      MPN_COPY (gp, ap, n);
	  (*gn) = n;

      MPN_CMP(c, u1, u0, *un);
	  if (c <= 0) // u1 is smallest
	  {
		 MPN_NORMALIZE(u1, (*un));
         MPN_COPY (s0p, u1, (*un));
	  } else // -u0 is smallest
	  {
		 MPN_NORMALIZE(u0, (*un));
         MPN_COPY (s0p, u0, (*un));
		 (*un) = -(*un);
	  }
	  
	  return 0;
    }

  if (ap[an-1] < bp[an-1]) /* swap so that ap >= bp */
  {
	 MP_PTR_SWAP (ap, bp);
    MP_PTR_SWAP (u0, u1);
	 negate = ~negate;
  }

  bn = n;
  MPN_NORMALIZE (bp, bn);
  if (bn == 0)
    {
      /* ap is the gcd */
		MPN_COPY (gp, ap, n);
      MPN_NORMALIZE(u1, (*un));
      MPN_COPY (s0p, u1, (*un));
		if (negate) (*un) = -(*un);
      (*gn) = n;
	  
	  return 0;
    }

  ASSERT_NOCARRY (mpn_sub_n (ap, ap, bp, an)); /* ap -= bp, u1 += u0 */
  MPN_NORMALIZE (ap, an);
  
  ASSERT (an > 0);
	
  cy = mpn_add_n(u1, u1, u0, *un);
  if (cy) u1[(*un)++] = cy;

  if (an < bn) /* make an >= bn */
  {
	  MPN_PTR_SWAP (ap, an, bp, bn);
	  MP_PTR_SWAP(u0, u1);
	  negate = ~negate;
  }
  else if (an == bn)
    {
      MPN_CMP (c, ap, bp, an);
      if (c < 0)
		{
			MP_PTR_SWAP (ap, bp);
		   MP_PTR_SWAP(u0, u1);
	      negate = ~negate;
      } else if (c == 0) /* gcd is ap OR bp */
		{
		 /* this case seems to never occur 
			it should happen only if ap = 2*bp
		 */
		 MPN_COPY (gp, ap, an);
         (*gn) = an;
		 /* As the gcd is ap OR bp, there are two possible 
		    cofactors here u1 or -u0, and we want the 
			least of the two.
		 */
		 MPN_CMP(c, u1, u0, *un);
		 if (c < 0) // u1 is less
		 {
			MPN_NORMALIZE(u1, (*un));
            MPN_COPY (s0p, u1, (*un));
            if (negate) (*un) = -(*un);
		 } else if (c > 0) // -u0 is less
		 {
			MPN_NORMALIZE(u0, (*un));
            MPN_COPY (s0p, u0, (*un));
            if (!negate) (*un) = -(*un);
		 } else // same
		 {
		    MPN_NORMALIZE(u0, (*un));
            MPN_COPY (s0p, u0, (*un));
		 }
         
		 return 0;
		}
    }

  ASSERT (an >= bn);

  qn = an - bn + 1;
  mpn_tdiv_qr (tp, ap, 0, ap, an, bp, bn); /* ap -= q * bp, u1 += q * u0 */

  /* Normalizing seems to be the simplest way to test if the remainder
     is zero. */
  an = bn;
  MPN_NORMALIZE (ap, an);
  if (an == 0)
    {
      /* this case never seems to occur*/
	  /* gcd = bp */
	  MPN_COPY (gp, bp, bn);
      MPN_NORMALIZE(u0, (*un));
      MPN_COPY (s0p, u0, (*un));
      if (!negate) (*un) = -(*un);
      (*gn) = bn;
      
	  return 0;
    }

  qn2 = qn;
  u0n = (*un);
  MPN_NORMALIZE (tp, qn2);
  MPN_NORMALIZE (u0, u0n);

  if (u0n > 0)
  {

  if (qn2 > u0n) mpn_mul(tp + qn, tp, qn2, u0, u0n);
  else mpn_mul(tp + qn, u0, u0n, tp, qn2);

  u0n += qn2;
  MPN_NORMALIZE(tp + qn, u0n);

  if ((*un) >= u0n) 
  {
	  cy = mpn_add(u1, u1, (*un), tp + qn, u0n);
	  if (cy) u1[(*un)++] = cy;
  } else
  {
	  cy = mpn_add(u1, tp + qn, u0n, u1, (*un));
	  (*un) = u0n;
	  if (cy) u1[(*un)++] = cy;
  }
  }

  return bn;
}
Exemple #14
0
mp_size_t
mpn_gcdext (mp_ptr gp, mp_ptr s0p, mp_size_t *s0size,
	    mp_ptr ap, mp_size_t an, mp_ptr bp, mp_size_t n)
{
  mp_size_t init_scratch, orig_n = n;
  mp_size_t scratch, un, u0n, u1n;
  mp_limb_t t;
  mp_ptr tp, u0, u1;
  int swapped = 0;
    struct ngcd_matrix M;
    mp_size_t p;
    mp_size_t nn;
  mp_limb_signed_t a;
  int c;
  TMP_DECL;
  
  ASSERT (an >= n);
  
  if (an == 1)
  {
    if (!n)
    {
       /* shouldn't ever occur, but we include for completeness */
		gp[0] = ap[0];
       s0p[0] = 1;
       *s0size = 1;
       
	   return 1;
    }
    
	gp[0] = mpn_gcdinv_1(&a, ap[0], bp[0]);
    if (a < (mp_limb_signed_t) 0)
	{
	   s0p[0] = -a;
       (*s0size) = -1;
	} else
    {
	   s0p[0] = a;
       (*s0size) = 1 - (s0p[0] == 0);
	}
	
	return 1;
  }

  init_scratch = MPN_NGCD_MATRIX_INIT_ITCH (n-P_SIZE(n));
  scratch = mpn_nhgcd_itch ((n+1)/2);

  /* Space needed for mpn_ngcd_matrix_adjust */
  if (scratch < 2*n)
    scratch = 2*n;
  if (scratch < an - n + 1) /* the first division can sometimes be selfish!! */
	 scratch = an - n + 1;

 /* Space needed for cofactor adjust */
  scratch = MAX(scratch, 2*(n+1) + P_SIZE(n) + 1);

  TMP_MARK;
  
  if (5*n + 2 + MPN_GCD_LEHMER_N_ITCH(n) > init_scratch + scratch) 
    tp = TMP_ALLOC_LIMBS (7*n+4+MPN_GCD_LEHMER_N_ITCH(n)); /* 2n+2 for u0, u1, 5*n+2 + MPN_GCD_LEHMER_N_ITCH(n) for Lehmer
                                                              and copies of ap and bp and s (and finally 3*n+1 for t and get_t) */
  else
    tp = TMP_ALLOC_LIMBS (2*(n+1) + init_scratch + scratch);
    
  if (an > n)
    {
      mp_ptr qp = tp;

      mpn_tdiv_qr (qp, ap, 0, ap, an, bp, n);
      
      an = n;
      MPN_NORMALIZE (ap, an);
      if (an == 0)
	{	  
	  MPN_COPY (gp, bp, n);
	  TMP_FREE;
	  (*s0size) = 0;
	  
	  return n;
	}
    }
    
    if (BELOW_THRESHOLD (n, GCDEXT_THRESHOLD))
    {
      n = mpn_ngcdext_lehmer (gp, s0p, s0size, ap, bp, n, tp);
      TMP_FREE;
      
	  return n;
    }
  
    u0 = tp; /* Cofactor space */
    u1 = tp + n + 1;

    MPN_ZERO(tp, 2*(n+1));

    tp += 2*(n+1);
  
    /* First iteration, setup u0 and u1 */

    p = P_SIZE(n);
  
    mpn_ngcd_matrix_init (&M, n - p, tp);
	 ASSERT(tp + init_scratch > M.p[1][1] + M.n);
	 nn = mpn_nhgcd (ap + p, bp + p, n - p, &M, tp + init_scratch);
  if (nn > 0)
	 {
		 n = mpn_ngcd_matrix_adjust (&M, p + nn, ap, bp, p, tp + init_scratch);
		 
		 /* 
            (ap'', bp'')^T = M^-1(ap', bp')^T 
		    and (ap', bp') = (1*ap + ?*bp, 0*ap + ?*bp) 
		    We let u0 be minus the factor of ap appearing 
            in the expression for bp'' and u1 be the 
            factor of ap appearing in the expression for ap''
        */

       MPN_COPY(u0, M.p[1][0], M.n);
	    MPN_COPY(u1, M.p[1][1], M.n);

	    un = M.n;
	    while ((u0[un-1] == 0) && (u1[un-1] == 0)) un--; /* normalise u0, u1, both cannot be zero as det = 1*/
     }
  else	
	 {
	   mp_size_t gn;

		un = 1;
	   u0[0] = 0; /* bp = 0*ap + ?*bp, thus u0 = -0 */
	   u1[0] = 1; /* ap = 1*ap + ?*bp, thus u1 = 1 */
   
	   n = mpn_ngcdext_subdiv_step (gp, &gn, s0p, u0, u1, &un, ap, bp, n, tp);
	 if (n == 0)
	   {
	      /* never observed to occur */
		   (*s0size) = un;
			ASSERT(s0p[*s0size - 1] != 0);
		   TMP_FREE;
	       
		   return gn;
	   }
	 } 

  while (ABOVE_THRESHOLD (n, GCDEXT_THRESHOLD))
    {
      struct ngcd_matrix M;
      mp_size_t p = P_SIZE(n);
      mp_size_t nn;
      
      mpn_ngcd_matrix_init (&M, n - p, tp);
      nn = mpn_nhgcd (ap + p, bp + p, n - p, &M, tp + init_scratch);
		if (nn > 0)
	{
	   n = mpn_ngcd_matrix_adjust (&M, p + nn, ap, bp, p, tp + init_scratch);

		ngcdext_cofactor_adjust(u0, u1, &un, &M, tp + init_scratch);
		
		/* 
            (ap'', bp'')^T = M^-1(ap', bp')^T 
		    and (ap', bp') = (u1*ap + ?*bp, -u0*ap + ?*bp) 
		    So we need u0' = -(-c*u1 + a*-u0) = a*u0 + c*u1
            and we need u1' = (d*u1 -b*-u0) = b*u0 + d*u1 
        */

     
		ASSERT(un <= orig_n + 1);

	}  else	
	{
	  mp_size_t gn;
	  n = mpn_ngcdext_subdiv_step (gp, &gn, s0p, u0, u1, &un, ap, bp, n, tp);
	  ASSERT(un <= orig_n + 1);
	  if (n == 0)
	    {
	      (*s0size) = un;
			ASSERT(((*s0size) == 0) || (s0p[ABS(*s0size) - 1] != 0));
		   TMP_FREE;
		   
		   return gn;
	    }
	}
    }

  ASSERT (ap[n-1] > 0 || bp[n-1] > 0);
  ASSERT (u0[un-1] > 0 || u1[un-1] > 0);

  if (ap[n-1] < bp[n-1])
  {
	  MP_PTR_SWAP (ap, bp);
	  MP_PTR_SWAP (u0, u1);
	  swapped = 1;
  }
   
  an = n; /* {ap, an} and {bp, bn} are normalised, {ap, an} >= {bp, bn} */
  MPN_NORMALIZE (bp, n);

  if (n == 0)
    {
      /* If bp == 0 then gp = ap
		   with cofactor u1
			If we swapped then cofactor is -u1
			This case never seems to happen
		*/
		MPN_COPY (gp, ap, an);
		MPN_NORMALIZE(u1, un);
		MPN_COPY(s0p, u1, un);
      (*s0size) = un;
		if (swapped) (*s0size) = -(*s0size);
      TMP_FREE;
      
	  return an;
    }

  /* 
     If at this point we have s*ap' + t*bp' = gp where gp is the gcd
	  and (ap', bp') = (u1*ap + ?*bp, -u0*ap + ?*bp)
	  then gp = s*u1*ap - t*u0*ap + ?*bp
	  and the cofactor we want is (s*u1-t*u0).

	  First there is the special case u0 = 0, u1 = 1 in which case we do not need 
	  to compute t...
  */
    
  ASSERT(u1 + un <= tp);
  u0n = un;
  MPN_NORMALIZE(u0, u0n);  /* {u0, u0n} is now normalised */

  if (u0n == 0) /* u1 = 1 case is rare*/
  {
	  mp_size_t gn;
	 
	  gn = mpn_ngcdext_lehmer (gp, s0p, s0size, ap, bp, n, tp);
	  if (swapped) (*s0size) = -(*s0size);
	  TMP_FREE;
	  
	  return gn;
  }
  else
  {
	  /* Compute final gcd. */
  
	  mp_size_t gn, sn, tn;
	  mp_ptr s, t;
	  mp_limb_t cy;
	  int negate = 0;
	  
      /* Save an, bn first as gcdext destroys inputs */
	  s = tp;
	  tp += an;
	  
     MPN_COPY(tp, ap, an);
	  MPN_COPY(tp + an, bp, an);
	  
	  if (mpn_cmp(tp, tp + an, an) == 0) 
	  {
	     /* gcd is tp or tp + an 
		    return smallest cofactor, either -u0 or u1
		 */
	     gn = an;
		 MPN_NORMALIZE(tp, gn);
		 MPN_COPY(gp, tp, gn);
		 
		 MPN_CMP(c, u0, u1, un);
		 if (c < (mp_limb_signed_t) 0)
		 {
		    MPN_COPY(s0p, u0, u0n);
			(*s0size) = -u0n;
		 } else
		 {
		    MPN_NORMALIZE(u1, un);
			MPN_COPY(s0p, u1, un);
			(*s0size) = un;
		 }
		 TMP_FREE;
		  
		 return gn;
	  }

      gn = mpn_ngcdext_lehmer (gp, s, &sn, tp, tp + an, an, tp + 2*an);
      
	  /* Special case, s == 0, t == 1, cofactor = -u0 case is rare*/

	  if (sn == 0)
	  {
		  MPN_COPY(s0p, u0, u0n);
		  (*s0size) = -u0n;
		  if (swapped) (*s0size) = -(*s0size);
		  TMP_FREE;
		  
		  return gn;
	  }

	  /* We'll need the other cofactor t = (gp - s*ap)/bp 
		*/

	  t = tp;
	  tp += (an + 1);
		 
	  gcdext_get_t(t, &tn, gp, gn, ap, an, bp, n, s, sn, tp);

	  ASSERT((tn == 0) || (t[tn - 1] > 0)); /* {t, tn} is normalised */

	  ASSERT(tn <= an + 1);

	  /* We want to compute s*u1 - t*u0, so if s is negative
	     t will be positive, so we'd be dealing with negative
		  numbers. We fix that here.
	  */

	  if (sn < 0)
	  {
		  sn = -sn;
		  negate = 1;
	  }

	  /* Now we can deal with the special case u1 = 0 */

	  u1n = un; 
	  MPN_NORMALIZE(u1, u1n); /* {u1, u1n} is now normalised */
     
	  if (u1n == 0) /* case is rare */
	  {
		  MPN_COPY(s0p, t, tn);
		  (*s0size) = -tn;
		  if (swapped ^ negate) (*s0size) = -(*s0size);
		  TMP_FREE;
		  
		  return gn;
	  }

	  /* t may be zero, but we need to compute s*u1 anyway */
	  if (sn >= u1n)
		  mpn_mul(s0p, s, sn, u1, u1n);
	  else
		  mpn_mul(s0p, u1, u1n, s, sn);

	  (*s0size) = sn + u1n;
	  (*s0size) -= (s0p[sn + u1n - 1] == 0);

	  ASSERT(s0p[*s0size - 1] > 0); /* {s0p, *s0size} is normalised now */

	  if (tn == 0) /* case is rare */
	  {
		  if (swapped ^ negate) (*s0size) = -(*s0size);
        TMP_FREE;
	    
		return gn;
	  }

	  /* Now compute the rest of the cofactor, t*u0
	     and subtract it
		  We're done with u1 and s which happen to be
		  consecutive, so use that space
	  */

	  ASSERT(u1 + tn + u0n <= t);

     if (tn > u0n)
		  mpn_mul(u1, t, tn, u0, u0n);
	  else
		  mpn_mul(u1, u0, u0n, t, tn);

	  u1n = tn + u0n;
	  u1n -= (u1[tn + u0n - 1] == 0);

	  ASSERT(u1[u1n - 1] > 0);

	  /* Recall t is now negated so s*u1 - t*u0 
	     involves an *addition* 
	  */

	  if ((*s0size) >= u1n)
	  {
		  cy = mpn_add(s0p, s0p, *s0size, u1, u1n);
		  if (cy) s0p[(*s0size)++] = cy;
	  }
	  else
	  {
		  cy = mpn_add(s0p, u1, u1n, s0p, *s0size);
        (*s0size) = u1n;
	     if (cy) s0p[(*s0size)++] = cy;
	  }

	  if (swapped ^ negate) (*s0size) = -(*s0size);
     TMP_FREE;  
     
	 return gn;
  }
}
Exemple #15
0
int
mpn_jacobi_n (mp_ptr ap, mp_ptr bp, mp_size_t n, unsigned bits)
{
  mp_size_t scratch;
  mp_size_t matrix_scratch;
  mp_ptr tp;

  TMP_DECL;

  ASSERT (n > 0);
  ASSERT ( (ap[n-1] | bp[n-1]) > 0);
  ASSERT ( (bp[0] | ap[0]) & 1);

  /* FIXME: Check for small sizes first, before setting up temporary
     storage etc. */
  scratch = MPN_GCD_SUBDIV_STEP_ITCH(n);

  if (ABOVE_THRESHOLD (n, GCD_DC_THRESHOLD))
    {
      mp_size_t hgcd_scratch;
      mp_size_t update_scratch;
      mp_size_t p = CHOOSE_P (n);
      mp_size_t dc_scratch;

      matrix_scratch = MPN_HGCD_MATRIX_INIT_ITCH (n - p);
      hgcd_scratch = mpn_hgcd_itch (n - p);
      update_scratch = p + n - 1;

      dc_scratch = matrix_scratch + MAX(hgcd_scratch, update_scratch);
      if (dc_scratch > scratch)
	scratch = dc_scratch;
    }

  TMP_MARK;
  tp = TMP_ALLOC_LIMBS(scratch);

  while (ABOVE_THRESHOLD (n, JACOBI_DC_THRESHOLD))
    {
      struct hgcd_matrix M;
      mp_size_t p = 2*n/3;
      mp_size_t matrix_scratch = MPN_HGCD_MATRIX_INIT_ITCH (n - p);
      mp_size_t nn;
      mpn_hgcd_matrix_init (&M, n - p, tp);

      nn = mpn_hgcd_jacobi (ap + p, bp + p, n - p, &M, &bits,
			    tp + matrix_scratch);
      if (nn > 0)
	{
	  ASSERT (M.n <= (n - p - 1)/2);
	  ASSERT (M.n + p <= (p + n - 1) / 2);
	  /* Temporary storage 2 (p + M->n) <= p + n - 1. */
	  n = mpn_hgcd_matrix_adjust (&M, p + nn, ap, bp, p, tp + matrix_scratch);
	}
      else
	{
	  /* Temporary storage n */
	  n = mpn_gcd_subdiv_step (ap, bp, n, 0, jacobi_hook, &bits, tp);
	  if (!n)
	    {
	      TMP_FREE;
	      return bits == BITS_FAIL ? 0 : mpn_jacobi_finish (bits);
	    }
	}
    }

  while (n > 2)
    {
      struct hgcd_matrix1 M;
      mp_limb_t ah, al, bh, bl;
      mp_limb_t mask;

      mask = ap[n-1] | bp[n-1];
      ASSERT (mask > 0);

      if (mask & GMP_NUMB_HIGHBIT)
	{
	  ah = ap[n-1]; al = ap[n-2];
	  bh = bp[n-1]; bl = bp[n-2];
	}
      else
	{
	  int shift;

	  count_leading_zeros (shift, mask);
	  ah = MPN_EXTRACT_NUMB (shift, ap[n-1], ap[n-2]);
	  al = MPN_EXTRACT_NUMB (shift, ap[n-2], ap[n-3]);
	  bh = MPN_EXTRACT_NUMB (shift, bp[n-1], bp[n-2]);
	  bl = MPN_EXTRACT_NUMB (shift, bp[n-2], bp[n-3]);
	}

      /* Try an mpn_nhgcd2 step */
      if (mpn_hgcd2_jacobi (ah, al, bh, bl, &M, &bits))
	{
	  n = mpn_matrix22_mul1_inverse_vector (&M, tp, ap, bp, n);
	  MP_PTR_SWAP (ap, tp);
	}
      else
	{
	  /* mpn_hgcd2 has failed. Then either one of a or b is very
	     small, or the difference is very small. Perform one
	     subtraction followed by one division. */
	  n = mpn_gcd_subdiv_step (ap, bp, n, 0, &jacobi_hook, &bits, tp);
	  if (!n)
	    {
	      TMP_FREE;
	      return bits == BITS_FAIL ? 0 : mpn_jacobi_finish (bits);
	    }
	}
    }

  if (bits >= 16)
    MP_PTR_SWAP (ap, bp);

  ASSERT (bp[0] & 1);

  if (n == 1)
    {
      mp_limb_t al, bl;
      al = ap[0];
      bl = bp[0];

      TMP_FREE;
      if (bl == 1)
	return 1 - 2*(bits & 1);
      else
	return mpn_jacobi_base (al, bl, bits << 1);
    }

  else
    {
      int res = mpn_jacobi_2 (ap, bp, bits & 1);
      TMP_FREE;
      return res;
    }
}