Ejemplo n.º 1
0
void
mpn_redc_n (mp_ptr rp, mp_ptr up, mp_srcptr mp, mp_size_t n, mp_srcptr ip)
{
  mp_ptr xp, yp, scratch;
  mp_limb_t cy;
  mp_size_t rn;
  TMP_DECL;
  TMP_MARK;

  ASSERT (n > 8);

  rn = mpn_mulmod_bnm1_next_size (n);

  scratch = TMP_ALLOC_LIMBS (n + rn + mpn_mulmod_bnm1_itch (rn, n, n));

  xp = scratch;
  mpn_mullo_n (xp, up, ip, n);

  yp = scratch + n;
  mpn_mulmod_bnm1 (yp, rn, xp, n, mp, n, scratch + n + rn);

  ASSERT_ALWAYS (2 * n > rn);				/* could handle this */

  cy = mpn_sub_n (yp + rn, yp, up, 2*n - rn);		/* undo wrap around */
  MPN_DECR_U (yp + 2*n - rn, rn, cy);

  cy = mpn_sub_n (rp, up + n, yp + n, n);
  if (cy != 0)
    mpn_add_n (rp, rp, mp, n);

  TMP_FREE;
}
Ejemplo n.º 2
0
void tc4_addmul_1(mp_ptr wp, mp_size_t * wn, mp_srcptr xp, mp_size_t xn, mp_limb_t y)
{
  mp_size_t  sign, wu, xu, ws, new_wn, min_size, dsize;
  mp_limb_t  cy;

  /* w unaffected if x==0 or y==0 */
  if (xn == 0 || y == 0)
    return;

  sign = xn;
  xu = ABS (xn);

  ws = *wn;
  if (*wn == 0)
  {
      /* nothing to add to, just set x*y, "sign" gives the sign */
      cy = mpn_mul_1 (wp, xp, xu, y);
      if (cy)
		{
			wp[xu] = cy;
         xu = xu + 1;
		} 
      *wn = (sign >= 0 ? xu : -xu);
      return;
  }
  
  sign ^= *wn;
  wu = ABS (*wn);

  new_wn = MAX (wu, xu);
  min_size = MIN (wu, xu);

  if (sign >= 0)
  {
      /* addmul of absolute values */

      cy = mpn_addmul_1 (wp, xp, min_size, y);
      
      dsize = xu - wu;
#if HAVE_NATIVE_mpn_mul_1c
      if (dsize > 0)
        cy = mpn_mul_1c (wp + min_size, xp + min_size, dsize, y, cy);
      else if (dsize < 0)
      {
          dsize = -dsize;
          cy = mpn_add_1 (wp + min_size, wp + min_size, dsize, cy);
      }
#else
      if (dsize != 0)
      {
          mp_limb_t cy2;
          if (dsize > 0)
            cy2 = mpn_mul_1 (wp + min_size, xp + min_size, dsize, y);
          else
          {
              dsize = -dsize;
              cy2 = 0;
          }
          cy = cy2 + mpn_add_1 (wp + min_size, wp + min_size, dsize, cy);
      }
#endif

      if (cy)
		{
			wp[dsize + min_size] = cy;
         new_wn ++;
		}
   } else
   {
      /* submul of absolute values */

      cy = mpn_submul_1 (wp, xp, min_size, y);
      if (wu >= xu)
      {
          /* if w bigger than x, then propagate borrow through it */
          if (wu != xu)
            cy = mpn_sub_1 (wp + xu, wp + xu, wu - xu, cy);

          if (cy != 0)
          {
              /* Borrow out of w, take twos complement negative to get
                 absolute value, flip sign of w.  */
              wp[new_wn] = ~-cy;  /* extra limb is 0-cy */
              mpn_not (wp, new_wn);
              new_wn++;
              MPN_INCR_U (wp, new_wn, CNST_LIMB(1));
              ws = -*wn;
          }
      } else /* wu < xu */
      {
          /* x bigger than w, so want x*y-w.  Submul has given w-x*y, so
             take twos complement and use an mpn_mul_1 for the rest.  */

          mp_limb_t  cy2;

          /* -(-cy*b^n + w-x*y) = (cy-1)*b^n + ~(w-x*y) + 1 */
          mpn_not (wp, wu);
          cy += mpn_add_1 (wp, wp, wu, CNST_LIMB(1));
          cy -= 1;

          /* If cy-1 == -1 then hold that -1 for latter.  mpn_submul_1 never
             returns cy==MP_LIMB_T_MAX so that value always indicates a -1. */
          cy2 = (cy == MP_LIMB_T_MAX);
          cy += cy2;
          MPN_MUL_1C (cy, wp + wu, xp + wu, xu - wu, y, cy);
          wp[new_wn] = cy;
          new_wn += (cy != 0);

          /* Apply any -1 from above.  The value at wp+wsize is non-zero
             because y!=0 and the high limb of x will be non-zero.  */
          if (cy2)
            MPN_DECR_U (wp+wu, new_wn - wu, CNST_LIMB(1));

          ws = -*wn;
        }

      /* submul can produce high zero limbs due to cancellation, both when w
         has more limbs or x has more  */
      MPN_NORMALIZE (wp, new_wn);
  }

  *wn = (ws >= 0 ? new_wn : -new_wn);

  ASSERT (new_wn == 0 || wp[new_wn - 1] != 0);
}
Ejemplo n.º 3
0
/* FIXME:
    x Take scratch parameter, and figure out scratch need.

    x Use some fallback for small M->n?
*/
static mp_size_t
hgcd_matrix_apply (const struct hgcd_matrix *M,
		   mp_ptr ap, mp_ptr bp,
		   mp_size_t n)
{
  mp_size_t an, bn, un, vn, nn;
  mp_size_t mn[2][2];
  mp_size_t modn;
  mp_ptr tp, sp, scratch;
  mp_limb_t cy;
  unsigned i, j;

  TMP_DECL;

  ASSERT ( (ap[n-1] | bp[n-1]) > 0);

  an = n;
  MPN_NORMALIZE (ap, an);
  bn = n;
  MPN_NORMALIZE (bp, bn);

  for (i = 0; i < 2; i++)
    for (j = 0; j < 2; j++)
      {
	mp_size_t k;
	k = M->n;
	MPN_NORMALIZE (M->p[i][j], k);
	mn[i][j] = k;
      }

  ASSERT (mn[0][0] > 0);
  ASSERT (mn[1][1] > 0);
  ASSERT ( (mn[0][1] | mn[1][0]) > 0);

  TMP_MARK;

  if (mn[0][1] == 0)
    {
      /* A unchanged, M = (1, 0; q, 1) */
      ASSERT (mn[0][0] == 1);
      ASSERT (M->p[0][0][0] == 1);
      ASSERT (mn[1][1] == 1);
      ASSERT (M->p[1][1][0] == 1);

      /* Put B <-- B - q A */
      nn = submul (bp, bn, ap, an, M->p[1][0], mn[1][0]);
    }
  else if (mn[1][0] == 0)
    {
      /* B unchanged, M = (1, q; 0, 1) */
      ASSERT (mn[0][0] == 1);
      ASSERT (M->p[0][0][0] == 1);
      ASSERT (mn[1][1] == 1);
      ASSERT (M->p[1][1][0] == 1);

      /* Put A  <-- A - q * B */
      nn = submul (ap, an, bp, bn, M->p[0][1], mn[0][1]);
    }
  else
    {
      /* A = m00 a + m01 b  ==> a <= A / m00, b <= A / m01.
	 B = m10 a + m11 b  ==> a <= B / m10, b <= B / m11. */
      un = MIN (an - mn[0][0], bn - mn[1][0]) + 1;
      vn = MIN (an - mn[0][1], bn - mn[1][1]) + 1;

      nn = MAX (un, vn);
      /* In the range of interest, mulmod_bnm1 should always beat mullo. */
      modn = mpn_mulmod_bnm1_next_size (nn + 1);

      scratch = TMP_ALLOC_LIMBS (mpn_mulmod_bnm1_itch (modn, modn, M->n));
      tp = TMP_ALLOC_LIMBS (modn);
      sp = TMP_ALLOC_LIMBS (modn);

      ASSERT (n <= 2*modn);

      if (n > modn)
	{
	  cy = mpn_add (ap, ap, modn, ap + modn, n - modn);
	  MPN_INCR_U (ap, modn, cy);

	  cy = mpn_add (bp, bp, modn, bp + modn, n - modn);
	  MPN_INCR_U (bp, modn, cy);

	  n = modn;
	}

      mpn_mulmod_bnm1 (tp, modn, ap, n, M->p[1][1], mn[1][1], scratch);
      mpn_mulmod_bnm1 (sp, modn, bp, n, M->p[0][1], mn[0][1], scratch);

      /* FIXME: Handle the small n case in some better way. */
      if (n + mn[1][1] < modn)
	MPN_ZERO (tp + n + mn[1][1], modn - n - mn[1][1]);
      if (n + mn[0][1] < modn)
	MPN_ZERO (sp + n + mn[0][1], modn - n - mn[0][1]);

      cy = mpn_sub_n (tp, tp, sp, modn);
      MPN_DECR_U (tp, modn, cy);

      ASSERT (mpn_zero_p (tp + nn, modn - nn));

      mpn_mulmod_bnm1 (sp, modn, ap, n, M->p[1][0], mn[1][0], scratch);
      MPN_COPY (ap, tp, nn);
      mpn_mulmod_bnm1 (tp, modn, bp, n, M->p[0][0], mn[0][0], scratch);

      if (n + mn[1][0] < modn)
	MPN_ZERO (sp + n + mn[1][0], modn - n - mn[1][0]);
      if (n + mn[0][0] < modn)
	MPN_ZERO (tp + n + mn[0][0], modn - n - mn[0][0]);

      cy = mpn_sub_n (tp, tp, sp, modn);
      MPN_DECR_U (tp, modn, cy);

      ASSERT (mpn_zero_p (tp + nn, modn - nn));
      MPN_COPY (bp, tp, nn);

      while ( (ap[nn-1] | bp[nn-1]) == 0)
	{
	  nn--;
	  ASSERT (nn > 0);
	}
    }
  TMP_FREE;

  return nn;
}
void
mpn_toom_interpolate_8pts (mp_ptr pp, mp_size_t n,
			   mp_ptr r3, mp_ptr r7,
			   mp_size_t spt, mp_ptr ws)
{
  mp_limb_signed_t cy;
  mp_ptr r5, r1;
  r5 = (pp + 3 * n);			/* 3n+1 */
  r1 = (pp + 7 * n);			/* spt */

  /******************************* interpolation *****************************/

  DO_mpn_subrsh(r3+n, 2 * n + 1, pp, 2 * n, 4, ws);
  cy = DO_mpn_sublsh_n (r3, r1, spt, 12, ws);
  MPN_DECR_U (r3 + spt, 3 * n + 1 - spt, cy);

  DO_mpn_subrsh(r5+n, 2 * n + 1, pp, 2 * n, 2, ws);
  cy = DO_mpn_sublsh_n (r5, r1, spt, 6, ws);
  MPN_DECR_U (r5 + spt, 3 * n + 1 - spt, cy);

  r7[3*n] -= mpn_sub_n (r7+n, r7+n, pp, 2 * n);
  cy = mpn_sub_n (r7, r7, r1, spt);
  MPN_DECR_U (r7 + spt, 3 * n + 1 - spt, cy);

  ASSERT_NOCARRY(mpn_sub_n (r3, r3, r5, 3 * n + 1));
  ASSERT_NOCARRY(mpn_rshift(r3, r3, 3 * n + 1, 2));

  ASSERT_NOCARRY(mpn_sub_n (r5, r5, r7, 3 * n + 1));

  ASSERT_NOCARRY(mpn_sub_n (r3, r3, r5, 3 * n + 1));

  mpn_divexact_by45 (r3, r3, 3 * n + 1);

  ASSERT_NOCARRY(mpn_divexact_by3 (r5, r5, 3 * n + 1));

  ASSERT_NOCARRY(DO_mpn_sublsh2_n (r5, r3, 3 * n + 1, ws));

  /* last interpolation steps... */
  /* ... are mixed with recomposition */

  /***************************** recomposition *******************************/
  /*
    pp[] prior to operations:
     |_H r1|_L r1|____||_H r5|_M_r5|_L r5|_____|_H r8|_L r8|pp

    summation scheme for remaining operations:
     |____8|n___7|n___6|n___5|n___4|n___3|n___2|n____|n____|pp
     |_H r1|_L r1|____||_H*r5|_M r5|_L r5|_____|_H_r8|_L r8|pp
	  ||_H r3|_M r3|_L*r3|
				  ||_H_r7|_M_r7|_L_r7|
		      ||-H r3|-M r3|-L*r3|
				  ||-H*r5|-M_r5|-L_r5|
  */

  cy = mpn_add_n (pp + n, pp + n, r7, n); /* Hr8+Lr7-Lr5 */
  cy-= mpn_sub_n (pp + n, pp + n, r5, n);
  if (0 > cy)
    MPN_DECR_U (r7 + n, 2*n + 1, 1);
  else
    MPN_INCR_U (r7 + n, 2*n + 1, cy);

  cy = mpn_sub_n (pp + 2*n, r7 + n, r5 + n, n); /* Mr7-Mr5 */
  MPN_DECR_U (r7 + 2*n, n + 1, cy);

  cy = mpn_add_n (pp + 3*n, r5, r7+ 2*n, n+1); /* Hr7+Lr5 */
  r5[3*n]+= mpn_add_n (r5 + 2*n, r5 + 2*n, r3, n); /* Hr5+Lr3 */
  cy-= mpn_sub_n (pp + 3*n, pp + 3*n, r5 + 2*n, n+1); /* Hr7-Hr5+Lr5-Lr3 */
  if (UNLIKELY(0 > cy))
    MPN_DECR_U (r5 + n + 1, 2*n, 1);
  else
    MPN_INCR_U (r5 + n + 1, 2*n, cy);

  ASSERT_NOCARRY(mpn_sub_n(pp + 4*n, r5 + n, r3 + n, 2*n +1)); /* Mr5-Mr3,Hr5-Hr3 */

  cy = mpn_add_1 (pp + 6*n, r3 + n, n, pp[6*n]);
  MPN_INCR_U (r3 + 2*n, n + 1, cy);
  cy = mpn_add_n (pp + 7*n, pp + 7*n, r3 + 2*n, n);
  if (LIKELY(spt != n))
    MPN_INCR_U (pp + 8*n, spt - n, cy + r3[3*n]);
  else
    ASSERT (r3[3*n] | cy == 0);
}
Ejemplo n.º 5
0
/* Computes {rp,MIN(rn,an+bn)} <- {ap,an}*{bp,bn} Mod(B^rn-1)
 *
 * The result is expected to be ZERO if and only if one of the operand
 * already is. Otherwise the class [0] Mod(B^rn-1) is represented by
 * B^rn-1. This should not be a problem if mulmod_bnm1 is used to
 * combine results and obtain a natural number when one knows in
 * advance that the final value is less than (B^rn-1).
 * Moreover it should not be a problem if mulmod_bnm1 is used to
 * compute the full product with an+bn <= rn, because this condition
 * implies (B^an-1)(B^bn-1) < (B^rn-1) .
 *
 * Requires 0 < bn <= an <= rn and an + bn > rn/2
 * Scratch need: rn + (need for recursive call OR rn + 4). This gives
 *
 * S(n) <= rn + MAX (rn + 4, S(n/2)) <= 2rn + 4
 */
void
mpn_mulmod_bnm1 (mp_ptr rp, mp_size_t rn, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn, mp_ptr tp)
{
  ASSERT (0 < bn);
  ASSERT (bn <= an);
  ASSERT (an <= rn);

  if ((rn & 1) != 0 || BELOW_THRESHOLD (rn, MULMOD_BNM1_THRESHOLD))
    {
      if (UNLIKELY (bn < rn))
	{
	  if (UNLIKELY (an + bn <= rn))
	    {
	      mpn_mul (rp, ap, an, bp, bn);
	    }
	  else
	    {
	      mp_limb_t cy;
	      mpn_mul (tp, ap, an, bp, bn);
	      cy = mpn_add (rp, tp, rn, tp + rn, an + bn - rn);
	      MPN_INCR_U (rp, rn, cy);
	    }
	}
      else
	mpn_bc_mulmod_bnm1 (rp, ap, bp, rn, tp);
    }
  else
    {
      mp_size_t n;
      mp_limb_t cy;
      mp_limb_t hi;

      n = rn >> 1;

      /* We need at least an + bn >= n, to be able to fit one of the
	 recursive products at rp. Requiring strict inequality makes
	 the coded slightly simpler. If desired, we could avoid this
	 restriction by initially halving rn as long as rn is even and
	 an + bn <= rn/2. */

      ASSERT (an + bn > n);

      /* Compute xm = a*b mod (B^n - 1), xp = a*b mod (B^n + 1)
	 and crt together as

	 x = -xp * B^n + (B^n + 1) * [ (xp + xm)/2 mod (B^n-1)]
      */

#define a0 ap
#define a1 (ap + n)
#define b0 bp
#define b1 (bp + n)

#define xp  tp	/* 2n + 2 */
      /* am1  maybe in {xp, n} */
      /* bm1  maybe in {xp + n, n} */
#define sp1 (tp + 2*n + 2)
      /* ap1  maybe in {sp1, n + 1} */
      /* bp1  maybe in {sp1 + n + 1, n + 1} */

      {
	mp_srcptr am1, bm1;
	mp_size_t anm, bnm;
	mp_ptr so;

	bm1 = b0;
	bnm = bn;
	if (LIKELY (an > n))
	  {
	    am1 = xp;
	    cy = mpn_add (xp, a0, n, a1, an - n);
	    MPN_INCR_U (xp, n, cy);
	    anm = n;
	    so = xp + n;
	    if (LIKELY (bn > n))
	      {
		bm1 = so;
		cy = mpn_add (so, b0, n, b1, bn - n);
		MPN_INCR_U (so, n, cy);
		bnm = n;
		so += n;
	      }
	  }
	else
	  {
	    so = xp;
	    am1 = a0;
	    anm = an;
	  }

	mpn_mulmod_bnm1 (rp, n, am1, anm, bm1, bnm, so);
      }

      {
	int       k;
	mp_srcptr ap1, bp1;
	mp_size_t anp, bnp;

	bp1 = b0;
	bnp = bn;
	if (LIKELY (an > n)) {
	  ap1 = sp1;
	  cy = mpn_sub (sp1, a0, n, a1, an - n);
	  sp1[n] = 0;
	  MPN_INCR_U (sp1, n + 1, cy);
	  anp = n + ap1[n];
	  if (LIKELY (bn > n)) {
	    bp1 = sp1 + n + 1;
	    cy = mpn_sub (sp1 + n + 1, b0, n, b1, bn - n);
	    sp1[2*n+1] = 0;
	    MPN_INCR_U (sp1 + n + 1, n + 1, cy);
	    bnp = n + bp1[n];
	  }
	} else {
	  ap1 = a0;
	  anp = an;
	}

	if (BELOW_THRESHOLD (n, MUL_FFT_MODF_THRESHOLD))
	  k=0;
	else
	  {
	    int mask;
	    k = mpn_fft_best_k (n, 0);
	    mask = (1<<k) - 1;
	    while (n & mask) {k--; mask >>=1;};
	  }
	if (k >= FFT_FIRST_K)
	  xp[n] = mpn_mul_fft (xp, n, ap1, anp, bp1, bnp, k);
	else if (UNLIKELY (bp1 == b0))
	  {
	    ASSERT (anp + bnp <= 2*n+1);
	    ASSERT (anp + bnp > n);
	    ASSERT (anp >= bnp);
	    mpn_mul (xp, ap1, anp, bp1, bnp);
	    anp = anp + bnp - n;
	    ASSERT (anp <= n || xp[2*n]==0);
	    anp-= anp > n;
	    cy = mpn_sub (xp, xp, n, xp + n, anp);
	    xp[n] = 0;
	    MPN_INCR_U (xp, n+1, cy);
	  }
	else
	  mpn_bc_mulmod_bnp1 (xp, ap1, bp1, n, xp);
      }

      /* Here the CRT recomposition begins.

	 xm <- (xp + xm)/2 = (xp + xm)B^n/2 mod (B^n-1)
	 Division by 2 is a bitwise rotation.

	 Assumes xp normalised mod (B^n+1).

	 The residue class [0] is represented by [B^n-1]; except when
	 both input are ZERO.
      */

#if HAVE_NATIVE_mpn_rsh1add_n || HAVE_NATIVE_mpn_rsh1add_nc
#if HAVE_NATIVE_mpn_rsh1add_nc
      cy = mpn_rsh1add_nc(rp, rp, xp, n, xp[n]); /* B^n = 1 */
      hi = cy << (GMP_NUMB_BITS - 1);
      cy = 0;
      /* next update of rp[n-1] will set cy = 1 only if rp[n-1]+=hi
	 overflows, i.e. a further increment will not overflow again. */
#else /* ! _nc */
      cy = xp[n] + mpn_rsh1add_n(rp, rp, xp, n); /* B^n = 1 */
      hi = (cy<<(GMP_NUMB_BITS-1))&GMP_NUMB_MASK; /* (cy&1) << ... */
      cy >>= 1;
      /* cy = 1 only if xp[n] = 1 i.e. {xp,n} = ZERO, this implies that
	 the rsh1add was a simple rshift: the top bit is 0. cy=1 => hi=0. */
#endif
#if GMP_NAIL_BITS == 0
      add_ssaaaa(cy, rp[n-1], cy, rp[n-1], 0, hi);
#else
      cy += (hi & rp[n-1]) >> (GMP_NUMB_BITS-1);
      rp[n-1] ^= hi;
#endif
#else /* ! HAVE_NATIVE_mpn_rsh1add_n */
#if HAVE_NATIVE_mpn_add_nc
      cy = mpn_add_nc(rp, rp, xp, n, xp[n]);
#else /* ! _nc */
      cy = xp[n] + mpn_add_n(rp, rp, xp, n); /* xp[n] == 1 implies {xp,n} == ZERO */
#endif
      cy += (rp[0]&1);
      mpn_rshift(rp, rp, n, 1);
      ASSERT (cy <= 2);
      hi = (cy<<(GMP_NUMB_BITS-1))&GMP_NUMB_MASK; /* (cy&1) << ... */
      cy >>= 1;
      /* We can have cy != 0 only if hi = 0... */
      ASSERT ((rp[n-1] & GMP_NUMB_HIGHBIT) == 0);
      rp[n-1] |= hi;
      /* ... rp[n-1] + cy can not overflow, the following INCR is correct. */
#endif
      ASSERT (cy <= 1);
      /* Next increment can not overflow, read the previous comments about cy. */
      ASSERT ((cy == 0) || ((rp[n-1] & GMP_NUMB_HIGHBIT) == 0));
      MPN_INCR_U(rp, n, cy);

      /* Compute the highest half:
	 ([(xp + xm)/2 mod (B^n-1)] - xp ) * B^n
       */
      if (UNLIKELY (an + bn < rn))
	{
	  /* Note that in this case, the only way the result can equal
	     zero mod B^{rn} - 1 is if one of the inputs is zero, and
	     then the output of both the recursive calls and this CRT
	     reconstruction is zero, not B^{rn} - 1. Which is good,
	     since the latter representation doesn't fit in the output
	     area.*/
	  cy = mpn_sub_n (rp + n, rp, xp, an + bn - n);

	  /* FIXME: This subtraction of the high parts is not really
	     necessary, we do it to get the carry out, and for sanity
	     checking. */
	  cy = xp[n] + mpn_sub_nc (xp + an + bn - n, rp + an + bn - n,
				   xp + an + bn - n, rn - (an + bn), cy);
	  ASSERT (an + bn == rn - 1 ||
		  mpn_zero_p (xp + an + bn - n + 1, rn - 1 - (an + bn)));
	  cy = mpn_sub_1 (rp, rp, an + bn, cy);
	  ASSERT (cy == (xp + an + bn - n)[0]);
	}
      else
	{
	  cy = xp[n] + mpn_sub_n (rp + n, rp, xp, n);
	  /* cy = 1 only if {xp,n+1} is not ZERO, i.e. {rp,n} is not ZERO.
	     DECR will affect _at most_ the lowest n limbs. */
	  MPN_DECR_U (rp, 2*n, cy);
	}
#undef a0
#undef a1
#undef b0
#undef b1
#undef xp
#undef sp1
    }
}
Ejemplo n.º 6
0
/*
   We have

	{v0,2k} {v1,2k+1} {c+4k+1,r+r2-1} 
		v0 	v1       {-}vinf

	vinf0 is the first limb of vinf, which is overwritten by v1

	{vm1,2k+1} {v2, 2k+1}

	ws is temporary space

	sa is the sign of vm1

	rr2 is r+r2

	We want to compute

     t1   <- (3*v0+2*vm1+v2)/6-2*vinf
     t2   <- (v1+vm1)/2
  then the result is c0+c1*t+c2*t^2+c3*t^3+c4*t^4 where
     c0   <- v0
     c1   <- v1 - t1
     c2   <- t2 - v0 - vinf
     c3   <- t1 - t2
     c4   <- vinf
*/ 
void
mpn_toom3_interpolate (mp_ptr c, mp_ptr v1, mp_ptr v2, mp_ptr vm1,
		                 mp_ptr vinf, mp_size_t k, mp_size_t rr2, int sa,
		                                       mp_limb_t vinf0, mp_ptr ws)
{
  mp_limb_t cy, saved;
  mp_size_t twok = k + k;
  mp_size_t kk1 = twok + 1;
  mp_ptr c1, c2, c3, c4, c5;
  mp_limb_t cout; /* final carry, should be zero at the end */

  c1 = c + k;
  c2 = c1 + k;
  c3 = c2 + k;
  c4 = c3 + k;
  c5 = c4 + k;

#define v0 (c)

/* {c,2k} {c+2k,2k+1} {c+4k+1,r+r2-1} 
		v0 	 v1          {-}vinf

	{vm1,2k+1} {v2, 2k+1}
*/

  /* v2 <- v2 - vm1 */
  if (sa < 0)
  {
	  mpn_add_n(v2, v2, vm1, kk1);
  } else
  {
	  mpn_sub_n(v2, v2, vm1, kk1);
  }

  ASSERT_NOCARRY (mpn_divexact_by3 (v2, v2, kk1));    /* v2 <- v2 / 3 */

 /* vm1 <- t2 := (v1 - sa*vm1) / 2
 */
  if (sa < 0)
    {
#ifdef HAVE_NATIVE_mpn_rsh1add_n
      mpn_rsh1add_n (vm1, v1, vm1, kk1);
#else
      mpn_add_n (vm1, vm1, v1, kk1);
      mpn_half (vm1, kk1);
#endif
    }
  else
    {
#ifdef HAVE_NATIVE_mpn_rsh1sub_n
      mpn_rsh1sub_n (vm1, v1, vm1, kk1);
#else
      mpn_sub_n (vm1, v1, vm1, kk1);
      mpn_half (vm1, kk1);
#endif
    }

  /* v1 <- v1 - v0 - vinf */

  saved = c4[0];
  c4[0] = vinf0;
#if HAVE_NATIVE_mpn_subadd_n
  cy = mpn_subadd_n(v1, v1, v0, c4, rr2);
#else
  cy = mpn_sub_n(v1, v1, v0, rr2);
  cy += mpn_sub_n(v1, v1, c4, rr2);
#endif
  c4[0] = saved;
  if (rr2 < twok)
  {
	  v1[twok] -= mpn_sub_n(v1 + rr2, v1 + rr2, v0 + rr2, twok - rr2); 
	  MPN_DECR_U(v1 + rr2, kk1 - rr2, cy);
  }
  else v1[twok] -= cy;

  saved = c4[0];
  c4[0] = vinf0;
/* subtract 5*vinf from v2,
  */
  cy = mpn_submul_1 (v2, c4, rr2, CNST_LIMB(5));
  MPN_DECR_U (v2 + rr2, kk1 - rr2, cy);
  c4[0] = saved;

  /* v2 = (v2 - v1)/2 (exact)
  */
#ifdef HAVE_NATIVE_mpn_rsh1sub_n
  mpn_rsh1sub_n (v2, v2, v1, kk1);
#else
  mpn_sub_n (v2, v2, v1, kk1);
  mpn_half (v2, kk1);
#endif

  /* v1 = v1 - vm1
  */
  mpn_sub_n(v1, v1, vm1, kk1);

  /* vm1 = vm1 - v2 and add vm1 in {c+k, ...} */
#if HAVE_NATIVE_mpn_addsub_n
  cy = mpn_addsub_n(c1, c1, vm1, v2, kk1);
#else
  mpn_sub_n(vm1, vm1, v2, kk1);
  cy = mpn_add_n (c1, c1, vm1, kk1);
#endif
  ASSERT_NOCARRY (mpn_add_1(c3 + 1, c3 + 1, rr2 + k - 1, cy)); /* 4k+rr2-(3k+1) = rr2+k-1 */

  /* don't forget to add vinf0 in {c+4k, ...} */
  ASSERT_NOCARRY (mpn_add_1(c4, c4, rr2, vinf0));

  /* add v2 in {c+3k, ...} */
  if (rr2 <= k + 1)
     ASSERT_NOCARRY (mpn_add_n (c3, c3, v2, k+rr2));
  else
  {
	  cy = mpn_add_n (c3, c3, v2, kk1);
     if (cy) ASSERT_NOCARRY (mpn_add_1(c5 + 1, c5 + 1, rr2 - k - 1, cy)); /* 4k+rr2-(5k+1) = rr2-k-1 */
  }

#undef v0
}
Ejemplo n.º 7
0
void
mpn_toom_interpolate_5pts (mp_ptr c, mp_ptr v2, mp_ptr vm1,
			   mp_size_t k, mp_size_t twor, int sa,
			   mp_limb_t vinf0)
{
  mp_limb_t cy, saved;
  mp_size_t twok;
  mp_size_t kk1;
  mp_ptr c1, v1, c3, vinf;

  twok = k + k;
  kk1 = twok + 1;

  c1 = c  + k;
  v1 = c1 + k;
  c3 = v1 + k;
  vinf = c3 + k;

#define v0 (c)
  /* (1) v2 <- v2-vm1 < v2+|vm1|,       (16 8 4 2 1) - (1 -1 1 -1  1) =
     thus 0 <= v2 < 50*B^(2k) < 2^6*B^(2k)             (15 9 3  3  0)
  */
  if (sa)
    ASSERT_NOCARRY (mpn_add_n (v2, v2, vm1, kk1));
  else
    ASSERT_NOCARRY (mpn_sub_n (v2, v2, vm1, kk1));

  /* {c,2k} {c+2k,2k+1} {c+4k+1,2r-1} {t,2k+1} {t+2k+1,2k+1} {t+4k+2,2r}
       v0       v1       hi(vinf)       |vm1|     v2-vm1      EMPTY */

  ASSERT_NOCARRY (mpn_divexact_by3 (v2, v2, kk1));    /* v2 <- v2 / 3 */
						      /* (5 3 1 1 0)*/

  /* {c,2k} {c+2k,2k+1} {c+4k+1,2r-1} {t,2k+1} {t+2k+1,2k+1} {t+4k+2,2r}
       v0       v1      hi(vinf)       |vm1|     (v2-vm1)/3    EMPTY */

  /* (2) vm1 <- tm1 := (v1 - vm1) / 2  [(1 1 1 1 1) - (1 -1 1 -1 1)] / 2 =
     tm1 >= 0                                         (0  1 0  1 0)
     No carry comes out from {v1, kk1} +/- {vm1, kk1},
     and the division by two is exact.
     If (sa!=0) the sign of vm1 is negative */
  if (sa)
    {
#ifdef HAVE_NATIVE_mpn_rsh1add_n
      mpn_rsh1add_n (vm1, v1, vm1, kk1);
#else
      ASSERT_NOCARRY (mpn_add_n (vm1, v1, vm1, kk1));
      ASSERT_NOCARRY (mpn_rshift (vm1, vm1, kk1, 1));
#endif
    }
  else
    {
#ifdef HAVE_NATIVE_mpn_rsh1sub_n
      mpn_rsh1sub_n (vm1, v1, vm1, kk1);
#else
      ASSERT_NOCARRY (mpn_sub_n (vm1, v1, vm1, kk1));
      ASSERT_NOCARRY (mpn_rshift (vm1, vm1, kk1, 1));
#endif
    }

  /* {c,2k} {c+2k,2k+1} {c+4k+1,2r-1} {t,2k+1} {t+2k+1,2k+1} {t+4k+2,2r}
       v0       v1        hi(vinf)       tm1     (v2-vm1)/3    EMPTY */

  /* (3) v1 <- t1 := v1 - v0    (1 1 1 1 1) - (0 0 0 0 1) = (1 1 1 1 0)
     t1 >= 0
  */
  vinf[0] -= mpn_sub_n (v1, v1, c, twok);

  /* {c,2k} {c+2k,2k+1} {c+4k+1,2r-1} {t,2k+1} {t+2k+1,2k+1} {t+4k+2,2r}
       v0     v1-v0        hi(vinf)       tm1     (v2-vm1)/3    EMPTY */

  /* (4) v2 <- t2 := ((v2-vm1)/3-t1)/2 = (v2-vm1-3*t1)/6
     t2 >= 0                  [(5 3 1 1 0) - (1 1 1 1 0)]/2 = (2 1 0 0 0)
  */
#ifdef HAVE_NATIVE_mpn_rsh1sub_n
  mpn_rsh1sub_n (v2, v2, v1, kk1);
#else
  ASSERT_NOCARRY (mpn_sub_n (v2, v2, v1, kk1));
  ASSERT_NOCARRY (mpn_rshift (v2, v2, kk1, 1));
#endif

  /* {c,2k} {c+2k,2k+1} {c+4k+1,2r-1} {t,2k+1} {t+2k+1,2k+1} {t+4k+2,2r}
       v0     v1-v0        hi(vinf)     tm1    (v2-vm1-3t1)/6    EMPTY */

  /* (5) v1 <- t1-tm1           (1 1 1 1 0) - (0 1 0 1 0) = (1 0 1 0 0)
     result is v1 >= 0
  */
  ASSERT_NOCARRY (mpn_sub_n (v1, v1, vm1, kk1));

  /* We do not need to read the value in vm1, so we add it in {c+k, ...} */
  cy = mpn_add_n (c1, c1, vm1, kk1);
  MPN_INCR_U (c3 + 1, twor + k - 1, cy); /* 2n-(3k+1) = 2r+k-1 */
  /* Memory allocated for vm1 is now free, it can be recycled ...*/

  /* (6) v2 <- v2 - 2*vinf,     (2 1 0 0 0) - 2*(1 0 0 0 0) = (0 1 0 0 0)
     result is v2 >= 0 */
  saved = vinf[0];       /* Remember v1's highest byte (will be overwritten). */
  vinf[0] = vinf0;       /* Set the right value for vinf0                     */
#ifdef HAVE_NATIVE_mpn_sublsh1_n
  cy = mpn_sublsh1_n (v2, v2, vinf, twor);
#else
  /* Overwrite unused vm1 */
  cy = mpn_lshift (vm1, vinf, twor, 1);
  cy += mpn_sub_n (v2, v2, vm1, twor);
#endif
  MPN_DECR_U (v2 + twor, kk1 - twor, cy);

  /* Current matrix is
     [1 0 0 0 0; vinf
      0 1 0 0 0; v2
      1 0 1 0 0; v1
      0 1 0 1 0; vm1
      0 0 0 0 1] v0
     Some vaues already are in-place (we added vm1 in the correct position)
     | vinf|  v1 |  v0 |
	      | vm1 |
     One still is in a separated area
	| +v2 |
     We have to compute v1-=vinf; vm1 -= v2,
	   |-vinf|
	      | -v2 |
     Carefully reordering operations we can avoid to compute twice the sum
     of the high half of v2 plus the low half of vinf.
  */

  /* Add the high half of t2 in {vinf} */
  if ( LIKELY(twor > k + 1) ) { /* This is the expected flow  */
    cy = mpn_add_n (vinf, vinf, v2 + k, k + 1);
    MPN_INCR_U (c3 + kk1, twor - k - 1, cy); /* 2n-(5k+1) = 2r-k-1 */
  } else { /* triggered only by very unbalanced cases like
	      (k+k+(k-2))x(k+k+1) , should be handled by toom32 */
    ASSERT_NOCARRY (mpn_add_n (vinf, vinf, v2 + k, twor));
  }
  /* (7) v1 <- v1 - vinf,       (1 0 1 0 0) - (1 0 0 0 0) = (0 0 1 0 0)
     result is >= 0 */
  /* Side effect: we also subtracted (high half) vm1 -= v2 */
  cy = mpn_sub_n (v1, v1, vinf, twor);          /* vinf is at most twor long.  */
  vinf0 = vinf[0];                     /* Save again the right value for vinf0 */
  vinf[0] = saved;
  MPN_DECR_U (v1 + twor, kk1 - twor, cy);       /* Treat the last bytes.       */

  /* (8) vm1 <- vm1-v2          (0 1 0 1 0) - (0 1 0 0 0) = (0 0 0 1 0)
     Operate only on the low half.
  */
  cy = mpn_sub_n (c1, c1, v2, k);
  MPN_DECR_U (v1, kk1, cy);

  /********************* Beginning the final phase **********************/

  /* Most of the recomposition was done */

  /* add t2 in {c+3k, ...}, but only the low half */
  cy = mpn_add_n (c3, c3, v2, k);
  vinf[0] += cy;
  ASSERT(vinf[0] >= cy); /* No carry */
  MPN_INCR_U (vinf, twor, vinf0); /* Add vinf0, propagate carry. */

#undef v0
}
Ejemplo n.º 8
0
void
mpn_toom22_mul (mp_ptr pp,
                mp_srcptr ap, mp_size_t an,
                mp_srcptr bp, mp_size_t bn,
                mp_ptr scratch)
{
    const int __gmpn_cpuvec_initialized = 1;
    mp_size_t n, s, t;
    int vm1_neg;
    mp_limb_t cy, cy2;
    mp_ptr asm1;
    mp_ptr bsm1;

#define a0  ap
#define a1  (ap + n)
#define b0  bp
#define b1  (bp + n)

    s = an >> 1;
    n = an - s;
    t = bn - n;

    ASSERT (an >= bn);

    ASSERT (0 < s && s <= n && s >= n - 1);
    ASSERT (0 < t && t <= s);

    asm1 = pp;
    bsm1 = pp + n;

    vm1_neg = 0;

    /* Compute asm1.  */
    if (s == n)
    {
        if (mpn_cmp (a0, a1, n) < 0)
        {
            mpn_sub_n (asm1, a1, a0, n);
            vm1_neg = 1;
        }
        else
        {
            mpn_sub_n (asm1, a0, a1, n);
        }
    }
    else /* n - s == 1 */
    {
        if (a0[s] == 0 && mpn_cmp (a0, a1, s) < 0)
        {
            mpn_sub_n (asm1, a1, a0, s);
            asm1[s] = 0;
            vm1_neg = 1;
        }
        else
        {
            asm1[s] = a0[s] - mpn_sub_n (asm1, a0, a1, s);
        }
    }

    /* Compute bsm1.  */
    if (t == n)
    {
        if (mpn_cmp (b0, b1, n) < 0)
        {
            mpn_sub_n (bsm1, b1, b0, n);
            vm1_neg ^= 1;
        }
        else
        {
            mpn_sub_n (bsm1, b0, b1, n);
        }
    }
    else
    {
        if (mpn_zero_p (b0 + t, n - t) && mpn_cmp (b0, b1, t) < 0)
        {
            mpn_sub_n (bsm1, b1, b0, t);
            MPN_ZERO (bsm1 + t, n - t);
            vm1_neg ^= 1;
        }
        else
        {
            mpn_sub (bsm1, b0, n, b1, t);
        }
    }

#define v0	pp				/* 2n */
#define vinf	(pp + 2 * n)			/* s+t */
#define vm1	scratch				/* 2n */
#define scratch_out	scratch + 2 * n

    /* vm1, 2n limbs */
    TOOM22_MUL_N_REC (vm1, asm1, bsm1, n, scratch_out);

    if (s > t)  TOOM22_MUL_REC (vinf, a1, s, b1, t, scratch_out);
    else        TOOM22_MUL_N_REC (vinf, a1, b1, s, scratch_out);

    /* v0, 2n limbs */
    TOOM22_MUL_N_REC (v0, ap, bp, n, scratch_out);

    /* H(v0) + L(vinf) */
    cy = mpn_add_n (pp + 2 * n, v0 + n, vinf, n);

    /* L(v0) + H(v0) */
    cy2 = cy + mpn_add_n (pp + n, pp + 2 * n, v0, n);

    /* L(vinf) + H(vinf) */
    cy += mpn_add (pp + 2 * n, pp + 2 * n, n, vinf + n, s + t - n);

    if (vm1_neg)
        cy += mpn_add_n (pp + n, pp + n, vm1, 2 * n);
    else
        cy -= mpn_sub_n (pp + n, pp + n, vm1, 2 * n);

    ASSERT (cy + 1  <= 3);
    ASSERT (cy2 <= 2);

    MPN_INCR_U (pp + 2 * n, s + t, cy2);
    if (LIKELY (cy <= 2))
        /* if s+t==n, cy is zero, but we should not acces pp[3*n] at all. */
        MPN_INCR_U (pp + 3 * n, s + t - n, cy);
    else
        MPN_DECR_U (pp + 3 * n, s + t - n, 1);
}
Ejemplo n.º 9
0
/* Perform a few steps, using some of mpn_nhgcd2, subtraction and division.
   Reduces the size by almost one limb or more, but never below the
   given size s. Return new size for a and b, or 0 if no more steps
   are possible. M = NULL is allowed, if M is not needed.

   Needs temporary space for division, n + 1 limbs, and for
   ngcd_matrix1_vector, n limbs. */
mp_size_t
mpn_ngcd_step (mp_size_t n, mp_ptr ap, mp_ptr bp, mp_size_t s,
	       struct ngcd_matrix *M, mp_ptr tp)
{
  struct ngcd_matrix1 M1;
  mp_limb_t mask;
  mp_limb_t ah, al, bh, bl;
  mp_size_t an, bn, qn;
  mp_ptr qp;
  mp_ptr rp;
  int col;

  ASSERT (n > s);

  mask = ap[n-1] | bp[n-1];
  ASSERT (mask > 0);

  if (n == s + 1)
    {
      if (mask < 4)
	goto subtract;

      ah = ap[n-1]; al = ap[n-2];
      bh = bp[n-1]; bl = bp[n-2];
    }
  else if (mask & GMP_NUMB_HIGHBIT)
    {
      ah = ap[n-1]; al = ap[n-2];
      bh = bp[n-1]; bl = bp[n-2];
    }
  else
    {
      int shift;

      count_leading_zeros (shift, mask);
      ah = MPN_EXTRACT_LIMB (shift, ap[n-1], ap[n-2]);
      al = MPN_EXTRACT_LIMB (shift, ap[n-2], ap[n-3]);
      bh = MPN_EXTRACT_LIMB (shift, bp[n-1], bp[n-2]);
      bl = MPN_EXTRACT_LIMB (shift, bp[n-2], bp[n-3]);
    }

  /* Try an mpn_nhgcd2 step */
  if (mpn_nhgcd2 (ah, al, bh, bl, &M1))
    {
      /* Multiply M <- M * M1 */
      if (M)
	ngcd_matrix_mul_1 (M, &M1);

      /* Multiply M1^{-1} (a;b) */
      return mpn_ngcd_matrix1_vector (&M1, n, ap, bp, tp);
    }

 subtract:
  /* There are two ways in which mpn_nhgcd2 can fail. Either one of ah and
     bh was too small, or ah, bh were (almost) equal. Perform one
     subtraction step (for possible cancellation of high limbs),
     followed by one division. */

  /* Since we must ensure that #(a-b) > s, we handle cancellation of
     high limbs explicitly up front. (FIXME: Or is it better to just
     subtract, normalize, and use an addition to undo if it turns out
     the the difference is too small?) */
  for (an = n; an > s; an--)
    if (ap[an-1] != bp[an-1])
      break;

  if (an == s)
    return 0;

  /* Maintain a > b. When needed, swap a and b, and let col keep track
     of how to update M. */
  if (ap[an-1] > bp[an-1])
    {
      /* a is largest. In the subtraction step, we need to update
	 column 1 of M */
      col = 1;
    }
  else
    {
      MP_PTR_SWAP (ap, bp);
      col = 0;
    }

  bn = n;
  MPN_NORMALIZE (bp, bn);  
  if (bn <= s)
    return 0;
  
  /* We have #a, #b > s. When is it possible that #(a-b) < s? For
     cancellation to happen, the numbers must be of the form

       a = x + 1, 0,            ..., 0,            al
       b = x    , GMP_NUMB_MAX, ..., GMP_NUMB_MAX, bl

     where al, bl denotes the least significant k limbs. If al < bl,
     then #(a-b) < k, and if also high(al) != 0, high(bl) != GMP_NUMB_MAX,
     then #(a-b) = k. If al >= bl, then #(a-b) = k + 1. */

  if (ap[an-1] == bp[an-1] + 1)
    {
      mp_size_t k;
      int c;
      for (k = an-1; k > s; k--)
	if (ap[k-1] != 0 || bp[k-1] != GMP_NUMB_MAX)
	  break;

      MPN_CMP (c, ap, bp, k);
      if (c < 0)
	{
	  mp_limb_t cy;
	  
	  /* The limbs from k and up are cancelled. */
	  if (k == s)
	    return 0;
	  cy = mpn_sub_n (ap, ap, bp, k);
	  ASSERT (cy == 1);
	  an = k;
	}
      else
	{
	  ASSERT_NOCARRY (mpn_sub_n (ap, ap, bp, k));
	  ap[k] = 1;
	  an = k + 1;
	}
    }
  else
    ASSERT_NOCARRY (mpn_sub_n (ap, ap, bp, an));
  
  ASSERT (an > s);
  ASSERT (ap[an-1] > 0);
  ASSERT (bn > s);
  ASSERT (bp[bn-1] > 0);
  
  if (M)
    ngcd_matrix_update_1 (M, col);

  if (an < bn)
    {
      MPN_PTR_SWAP (ap, an, bp, bn);
      col ^= 1;
    }
  else if (an == bn)
    {
      int c;
      MPN_CMP (c, ap, bp, an);
      if (c < 0)
	{
	  MP_PTR_SWAP (ap, bp);
	  col ^= 1;
	}
    }

  /* Divide a / b. Store first the quotient (qn limbs) and then the
     remainder (bn limbs) starting at tp. */
  qn = an + 1 - bn;
  qp = tp;
  rp = tp + qn;

  /* FIXME: We could use an approximate division, that may return a
     too small quotient, and only guarantess that the size of r is
     almost the size of b. */
  mpn_tdiv_qr (qp, rp, 0, ap, an, bp, bn);
  qn -= (qp[qn -1] == 0);

  /* Normalize remainder */
  an = bn;
  for ( ; an > s; an--)
    if (rp[an-1] > 0)
      break;

  if (an > s)
    /* Include leading zero limbs */
    MPN_COPY (ap, rp, bn);
  else
    {
      /* Quotient is too large */
      mp_limb_t cy;

      cy = mpn_add (ap, bp, bn, rp, an);

      if (cy > 0)
	{
	  ASSERT (bn < n);
	  ap[bn] = cy;
	  bp[bn] = 0;
	  bn++;
	}

      MPN_DECR_U (qp, qn, 1);
      qn -= (qp[qn-1] == 0);
    }

  if (qn > 0 && M)
    ngcd_matrix_update_q (M, qp, qn, col);

  return bn;
}
Ejemplo n.º 10
0
void
mpn_toom_interpolate_5pts (mp_ptr c, mp_ptr v2, mp_ptr vm1,
			   mp_size_t k, mp_size_t twor, int sa,
			   mp_limb_t vinf0, mp_ptr ws)
{
  mp_limb_t cy, saved;
  mp_size_t twok = k + k;
  mp_size_t kk1 = twok + 1;
  mp_ptr c1, v1, c3, vinf, c5;
  mp_limb_t cout; /* final carry, should be zero at the end */

  c1 = c  + k;
  v1 = c1 + k;
  c3 = v1 + k;
  vinf = c3 + k;
  c5 = vinf + k;

#define v0 (c)
  /* (1) v2 <- v2-vm1 < v2+|vm1|,       (16 8 4 2 1) - (1 -1 1 -1  1) =
     thus 0 <= v2 < 50*B^(2k) < 2^6*B^(2k)             (15 9 3  3  0)
  */
  if (sa <= 0)
    mpn_add_n (v2, v2, vm1, kk1);
  else
    mpn_sub_n (v2, v2, vm1, kk1);

  /* {c,2k} {c+2k,2k+1} {c+4k+1,2r-1} {t,2k+1} {t+2k+1,2k+1} {t+4k+2,2r}
       v0       v1       hi(vinf)       |vm1|     v2-vm1      EMPTY */

  ASSERT_NOCARRY (mpn_divexact_by3 (v2, v2, kk1));    /* v2 <- v2 / 3 */
						      /* (5 3 1 1 0)*/

  /* {c,2k} {c+2k,2k+1} {c+4k+1,2r-1} {t,2k+1} {t+2k+1,2k+1} {t+4k+2,2r}
       v0       v1      hi(vinf)       |vm1|     (v2-vm1)/3    EMPTY */

  /* (2) vm1 <- tm1 := (v1 - sa*vm1) / 2  [(1 1 1 1 1) - (1 -1 1 -1 1)] / 2 =
     tm1 >= 0                                            (0  1 0  1 0)
     No carry comes out from {v1, kk1} +/- {vm1, kk1},
     and the division by two is exact */
  if (sa <= 0)
    {
#ifdef HAVE_NATIVE_mpn_rsh1add_n
      mpn_rsh1add_n (vm1, v1, vm1, kk1);
#else
      mpn_add_n (vm1, v1, vm1, kk1);
      mpn_rshift (vm1, vm1, kk1, 1);
#endif
    }
  else
    {
#ifdef HAVE_NATIVE_mpn_rsh1sub_n
      mpn_rsh1sub_n (vm1, v1, vm1, kk1);
#else
      mpn_sub_n (vm1, v1, vm1, kk1);
      mpn_rshift (vm1, vm1, kk1, 1);
#endif
    }

  /* {c,2k} {c+2k,2k+1} {c+4k+1,2r-1} {t,2k+1} {t+2k+1,2k+1} {t+4k+2,2r}
       v0       v1        hi(vinf)       tm1     (v2-vm1)/3    EMPTY */

  /* (3) v1 <- t1 := v1 - v0    (1 1 1 1 1) - (0 0 0 0 1) = (1 1 1 1 0)
     t1 >= 0
  */
  vinf[0] -= mpn_sub_n (v1, v1, c, twok);

  /* {c,2k} {c+2k,2k+1} {c+4k+1,2r-1} {t,2k+1} {t+2k+1,2k+1} {t+4k+2,2r}
       v0     v1-v0        hi(vinf)       tm1     (v2-vm1)/3    EMPTY */

  /* (4) v2 <- t2 := ((v2-vm1)/3-t1)/2 = (v2-vm1-3*t1)/6
     t2 >= 0                  [(5 3 1 1 0) - (1 1 1 1 0)]/2 = (2 1 0 0 0)
  */
#ifdef HAVE_NATIVE_mpn_rsh1sub_n
  mpn_rsh1sub_n (v2, v2, v1, kk1);
#else
  mpn_sub_n (v2, v2, v1, kk1);
  mpn_rshift (v2, v2, kk1, 1);
#endif

  /* {c,2k} {c+2k,2k+1} {c+4k+1,2r-1} {t,2k+1} {t+2k+1,2k+1} {t+4k+2,2r}
       v0     v1-v0        hi(vinf)     tm1    (v2-vm1-3t1)/6    EMPTY */

  /* (5) v1 <- t1-tm1           (1 1 1 1 0) - (0 1 0 1 0) = (1 0 1 0 0)
     result is v1 >= 0
  */
  mpn_sub_n (v1, v1, vm1, kk1);

  /* {c,2k} {c+2k,2k+1} {c+4k+1,2r-1} {t,2k+1} {t+2k+1,2k+1} {t+4k+2,2r}
       v0   v1-v0-tm1      hi(vinf)     tm1    (v2-vm1-3t1)/6    EMPTY */

  /* (6) v2 <- v2 - 2*vinf,     (2 1 0 0 0) - 2*(1 0 0 0 0) = (0 1 0 0 0)
     result is v2 >= 0 */
  saved = vinf[0];       /* Remember v1's highest byte (will be overwritten). */
  vinf[0] = vinf0;       /* Set the right value for vinf0                     */
#ifdef HAVE_NATIVE_mpn_sublsh1_n
  cy = mpn_sublsh1_n (v2, v2, vinf, twor);
#else
  cy = mpn_lshift (ws, vinf, twor, 1);
  cy += mpn_sub_n (v2, v2, ws, twor);
#endif
  MPN_DECR_U (v2 + twor, kk1 - twor, cy);

  /* (7) v1 <- v1 - vinf,       (1 0 1 0 0) - (1 0 0 0 0) = (0 0 1 0 0)
     result is >= 0 */
  cy = mpn_sub_n (v1, v1, vinf, twor);          /* vinf is at most twor long.  */
  vinf[0] = saved;
  MPN_DECR_U (v1 + twor, kk1 - twor, cy);       /* Treat the last bytes.       */
  __GMPN_ADD_1 (cout, vinf, vinf, twor, vinf0); /* Add vinf0, propagate carry. */

  /* (8) vm1 <- vm1-t2          (0 1 0 1 0) - (0 1 0 0 0) = (0 0 0 1 0)
     vm1 >= 0
  */
  mpn_sub_n (vm1, vm1, v2, kk1);            /* No overlapping here.        */

  /********************* Beginning the final phase **********************/

  /* {c,2k} {c+2k,2k  } {c+4k ,2r } {t,2k+1} {t+2k+1,2k+1} {t+4k+2,2r}
       v0       t1      hi(t1)+vinf   tm1    (v2-vm1-3t1)/6    EMPTY */

  /* (9) add t2 in {c+3k, ...} */
  cy = mpn_add_n (c3, c3, v2, kk1);
  __GMPN_ADD_1 (cout, c5 + 1, c5 + 1, twor - k - 1, cy); /* 2n-(5k+1) = 2r-k-1 */

  /* {c,2k} {c+2k,2k  } {c+4k ,2r } {t,2k+1} {t+2k+1,2k+1} {t+4k+2,2r}
       v0       t1      hi(t1)+vinf   tm1    (v2-vm1-3t1)/6    EMPTY */
  /* c   c+k  c+2k  c+3k  c+4k      t   t+2k+1  t+4k+2
     v0       t1         vinf      tm1  t2
		    +t2 */

  /* add vm1 in {c+k, ...} */
  cy = mpn_add_n (c1, c1, vm1, kk1);
  __GMPN_ADD_1 (cout, c3 + 1, c3 + 1, twor + k - 1, cy); /* 2n-(3k+1) = 2r+k-1 */

  /* c   c+k  c+2k  c+3k  c+4k      t   t+2k+1  t+4k+2
     v0       t1         vinf      tm1  t2
	  +tm1      +t2    */

#undef v0
#undef t2
}
Ejemplo n.º 11
0
void
mpz_combit (mpz_ptr d, mp_bitcnt_t bit_index)
{
  mp_size_t dsize = SIZ(d);
  mp_ptr dp = PTR(d);

  mp_size_t limb_index = bit_index / GMP_NUMB_BITS;
  mp_limb_t bit = (CNST_LIMB (1) << (bit_index % GMP_NUMB_BITS));

  /* Check for the most common case: Positive input, no realloc or
     normalization needed. */
  if (limb_index + 1 < dsize)
    dp[limb_index] ^= bit;

  /* Check for the hairy case. d < 0, and we have all zero bits to the
     right of the bit to toggle. */
  else if (limb_index < -dsize
	   && (limb_index == 0 || mpn_zero_p (dp, limb_index))
	   && (dp[limb_index] & (bit - 1)) == 0)
    {
      ASSERT (dsize < 0);
      dsize = -dsize;

      if (dp[limb_index] & bit)
	{
	  /* We toggle the least significant one bit. Corresponds to
	     an add, with potential carry propagation, on the absolute
	     value. */
	  dp = MPZ_REALLOC (d, 1 + dsize);
	  dp[dsize] = 0;
	  MPN_INCR_U (dp + limb_index, 1 + dsize - limb_index, bit);
	  SIZ(d) = - dsize - dp[dsize];
	}
      else
	{
	  /* We toggle a zero bit, subtract from the absolute value. */
	  MPN_DECR_U (dp + limb_index, dsize - limb_index, bit);
	  /* The absolute value shrinked by at most one bit. */
	  dsize -= dp[dsize - 1] == 0;
	  ASSERT (dsize > 0 && dp[dsize - 1] != 0);
	  SIZ (d) = -dsize;
	}
    }
  else
    {
      /* Simple case: Toggle the bit in the absolute value. */
      dsize = ABS(dsize);
      if (limb_index < dsize)
	{
	  mp_limb_t	 dlimb;
	  dlimb = dp[limb_index] ^ bit;
	  dp[limb_index] = dlimb;

	  /* Can happen only when limb_index = dsize - 1. Avoid SIZ(d)
	     bookkeeping in the common case. */
	  if (UNLIKELY ((dlimb == 0) + limb_index == dsize)) /* dsize == limb_index + 1 */
	    {
	      /* high limb became zero, must normalize */
	      MPN_NORMALIZE (dp, limb_index);
	      SIZ (d) = SIZ (d) >= 0 ? limb_index : -limb_index;
	    }
	}
      else
	{
	  dp = MPZ_REALLOC (d, limb_index + 1);
	  MPN_ZERO(dp + dsize, limb_index - dsize);
	  dp[limb_index++] = bit;
	  SIZ(d) = SIZ(d) >= 0 ? limb_index : -limb_index;
	}
    }
}
Ejemplo n.º 12
0
void
mpn_toom_interpolate_6pts (mp_ptr pp, mp_size_t n, enum toom6_flags flags,
			   mp_ptr w4, mp_ptr w2, mp_ptr w1,
			   mp_size_t w0n)
{
  mp_limb_t cy;
  /* cy6 can be stored in w1[2*n], cy4 in w4[0], embankment in w2[0] */
  mp_limb_t cy4, cy6, embankment;

  ASSERT( n > 0 );
  ASSERT( 2*n >= w0n && w0n > 0 );

#define w5  pp					/* 2n   */
#define w3  (pp + 2 * n)			/* 2n+1 */
#define w0  (pp + 5 * n)			/* w0n  */

  /* Interpolate with sequence:
     W2 =(W1 - W2)>>2
     W1 =(W1 - W5)>>1
     W1 =(W1 - W2)>>1
     W4 =(W3 - W4)>>1
     W2 =(W2 - W4)/3
     W3 = W3 - W4 - W5
     W1 =(W1 - W3)/3
     // Last steps are mixed with recomposition...
     W2 = W2 - W0<<2
     W4 = W4 - W2
     W3 = W3 - W1
     W2 = W2 - W0
  */

  /* W2 =(W1 - W2)>>2 */
  if (flags & toom6_vm2_neg)
    mpn_add_n (w2, w1, w2, 2 * n + 1);
  else
    mpn_sub_n (w2, w1, w2, 2 * n + 1);
  mpn_rshift (w2, w2, 2 * n + 1, 2);

  /* W1 =(W1 - W5)>>1 */
  w1[2*n] -= mpn_sub_n (w1, w1, w5, 2*n);
  mpn_rshift (w1, w1, 2 * n + 1, 1);

  /* W1 =(W1 - W2)>>1 */
#if HAVE_NATIVE_mpn_rsh1sub_n
  mpn_rsh1sub_n (w1, w1, w2, 2 * n + 1);
#else
  mpn_sub_n (w1, w1, w2, 2 * n + 1);
  mpn_rshift (w1, w1, 2 * n + 1, 1);
#endif

  /* W4 =(W3 - W4)>>1 */
  if (flags & toom6_vm1_neg)
    {
#if HAVE_NATIVE_mpn_rsh1add_n
      mpn_rsh1add_n (w4, w3, w4, 2 * n + 1);
#else
      mpn_add_n (w4, w3, w4, 2 * n + 1);
      mpn_rshift (w4, w4, 2 * n + 1, 1);
#endif
    }
  else
    {
#if HAVE_NATIVE_mpn_rsh1sub_n
      mpn_rsh1sub_n (w4, w3, w4, 2 * n + 1);
#else
      mpn_sub_n (w4, w3, w4, 2 * n + 1);
      mpn_rshift (w4, w4, 2 * n + 1, 1);
#endif
    }

  /* W2 =(W2 - W4)/3 */
  mpn_sub_n (w2, w2, w4, 2 * n + 1);
  mpn_divexact_by3 (w2, w2, 2 * n + 1);

  /* W3 = W3 - W4 - W5 */
  mpn_sub_n (w3, w3, w4, 2 * n + 1);
  w3[2 * n] -= mpn_sub_n (w3, w3, w5, 2 * n);

  /* W1 =(W1 - W3)/3 */
  mpn_sub_n (w1, w1, w3, 2 * n + 1);
  mpn_divexact_by3 (w1, w1, 2 * n + 1);

  /*
    [1 0 0 0 0 0;
     0 1 0 0 0 0;
     1 0 1 0 0 0;
     0 1 0 1 0 0;
     1 0 1 0 1 0;
     0 0 0 0 0 1]

    pp[] prior to operations:
     |_H w0__|_L w0__|______||_H w3__|_L w3__|_H w5__|_L w5__|

    summation scheme for remaining operations:
     |______________5|n_____4|n_____3|n_____2|n______|n______|pp
     |_H w0__|_L w0__|______||_H w3__|_L w3__|_H w5__|_L w5__|
				    || H w4  | L w4  |
		    || H w2  | L w2  |
	    || H w1  | L w1  |
			    ||-H w1  |-L w1  |
		     |-H w0  |-L w0 ||-H w2  |-L w2  |
  */
  cy = mpn_add_n (pp + n, pp + n, w4, 2 * n + 1);
  MPN_INCR_U (pp + 3 * n + 1, n, cy);

  /* W2 -= W0<<2 */
#if HAVE_NATIVE_mpn_sublsh_n || HAVE_NATIVE_mpn_sublsh2_n_ip1
#if HAVE_NATIVE_mpn_sublsh2_n_ip1
  cy = mpn_sublsh2_n_ip1 (w2, w0, w0n);
#else
  cy = mpn_sublsh_n (w2, w2, w0, w0n, 2);
#endif
#else
  /* {W4,2*n+1} is now free and can be overwritten. */
  cy = mpn_lshift(w4, w0, w0n, 2);
  cy+= mpn_sub_n(w2, w2, w4, w0n);
#endif
  MPN_DECR_U (w2 + w0n, 2 * n + 1 - w0n, cy);

  /* W4L = W4L - W2L */
  cy = mpn_sub_n (pp + n, pp + n, w2, n);
  MPN_DECR_U (w3, 2 * n + 1, cy);

  /* W3H = W3H + W2L */
  cy4 = w3[2 * n] + mpn_add_n (pp + 3 * n, pp + 3 * n, w2, n);
  /* W1L + W2H */
  cy = w2[2 * n] + mpn_add_n (pp + 4 * n, w1, w2 + n, n);
  MPN_INCR_U (w1 + n, n + 1, cy);

  /* W0 = W0 + W1H */
  if (LIKELY (w0n > n))
    cy6 = w1[2 * n] + mpn_add_n (w0, w0, w1 + n, n);
  else
    cy6 = mpn_add_n (w0, w0, w1 + n, w0n);

  /*
    summation scheme for the next operation:
     |...____5|n_____4|n_____3|n_____2|n______|n______|pp
     |...w0___|_w1_w2_|_H w3__|_L w3__|_H w5__|_L w5__|
		     ...-w0___|-w1_w2 |
  */
  /* if(LIKELY(w0n>n)) the two operands below DO overlap! */
  cy = mpn_sub_n (pp + 2 * n, pp + 2 * n, pp + 4 * n, n + w0n);

  /* embankment is a "dirty trick" to avoid carry/borrow propagation
     beyond allocated memory */
  embankment = w0[w0n - 1] - 1;
  w0[w0n - 1] = 1;
  if (LIKELY (w0n > n)) {
    if (cy4 > cy6)
      MPN_INCR_U (pp + 4 * n, w0n + n, cy4 - cy6);
    else
      MPN_DECR_U (pp + 4 * n, w0n + n, cy6 - cy4);
    MPN_DECR_U (pp + 3 * n + w0n, 2 * n, cy);
    MPN_INCR_U (w0 + n, w0n - n, cy6);
  } else {
    MPN_INCR_U (pp + 4 * n, w0n + n, cy4);
    MPN_DECR_U (pp + 3 * n + w0n, 2 * n, cy + cy6);
  }
  w0[w0n - 1] += embankment;

#undef w5
#undef w3
#undef w0

}
Ejemplo n.º 13
0
void
mpz_setbit (mpz_ptr d, mp_bitcnt_t bit_idx)
{
  mp_size_t dsize = SIZ (d);
  mp_ptr dp = PTR (d);
  mp_size_t limb_idx;
  mp_limb_t mask;

  limb_idx = bit_idx / GMP_NUMB_BITS;
  mask = CNST_LIMB(1) << (bit_idx % GMP_NUMB_BITS);
  if (dsize >= 0)
    {
      if (limb_idx < dsize)
	{
	  dp[limb_idx] |= mask;
	}
      else
	{
	  /* Ugh.  The bit should be set outside of the end of the
	     number.  We have to increase the size of the number.  */
	  dp = MPZ_REALLOC (d, limb_idx + 1);
	  SIZ (d) = limb_idx + 1;
	  MPN_ZERO (dp + dsize, limb_idx - dsize);
	  dp[limb_idx] = mask;
	}
    }
  else
    {
      /* Simulate two's complement arithmetic, i.e. simulate
	 1. Set OP = ~(OP - 1) [with infinitely many leading ones].
	 2. Set the bit.
	 3. Set OP = ~OP + 1.  */

      dsize = -dsize;

      if (limb_idx < dsize)
	{
	  mp_size_t zero_bound;
	  /* No index upper bound on this loop, we're sure there's a non-zero limb
	     sooner or later.  */
	  zero_bound = 0;
	  while (dp[zero_bound] == 0)
	    zero_bound++;

	  if (limb_idx > zero_bound)
	    {
	      mp_limb_t	 dlimb;
	      dlimb = dp[limb_idx] & ~mask;
	      dp[limb_idx] = dlimb;

	      if (UNLIKELY ((dlimb == 0) + limb_idx == dsize)) /* dsize == limb_idx + 1 */
		{
		  /* high limb became zero, must normalize */
		  MPN_NORMALIZE (dp, limb_idx);
		  SIZ (d) = -limb_idx;
		}
	    }
	  else if (limb_idx == zero_bound)
	    {
	      dp[limb_idx] = ((dp[limb_idx] - 1) & ~mask) + 1;
	      ASSERT (dp[limb_idx] != 0);
	    }
	  else
	    {
	      MPN_DECR_U (dp + limb_idx, dsize - limb_idx, mask);
	      dsize -= dp[dsize - 1] == 0;
	      SIZ (d) = -dsize;
	    }
	}
    }
}
Ejemplo n.º 14
0
/* if approx is non-zero, does not compute the final remainder */
static mp_size_t
mpn_rootrem_internal (mp_ptr rootp, mp_ptr remp, mp_srcptr up, mp_size_t un,
		      mp_limb_t k, int approx)
{
  mp_ptr qp, rp, sp, wp, scratch;
  mp_size_t qn, rn, sn, wn, nl, bn;
  mp_limb_t save, save2, cy;
  unsigned long int unb; /* number of significant bits of {up,un} */
  unsigned long int xnb; /* number of significant bits of the result */
  unsigned int cnt;
  unsigned long b, kk;
  unsigned long sizes[GMP_NUMB_BITS + 1];
  int ni, i;
  int c;
  int logk;
  TMP_DECL;

  TMP_MARK;

  /* qp and wp need enough space to store S'^k where S' is an approximate
     root. Since S' can be as large as S+2, the worst case is when S=2 and
     S'=4. But then since we know the number of bits of S in advance, S'
     can only be 3 at most. Similarly for S=4, then S' can be 6 at most.
     So the worst case is S'/S=3/2, thus S'^k <= (3/2)^k * S^k. Since S^k
     fits in un limbs, the number of extra limbs needed is bounded by
     ceil(k*log2(3/2)/GMP_NUMB_BITS). */
#define EXTRA 2 + (mp_size_t) (0.585 * (double) k / (double) GMP_NUMB_BITS)
  qp = TMP_ALLOC_LIMBS (un + EXTRA); /* will contain quotient and remainder
					of R/(k*S^(k-1)), and S^k */
  if (remp == NULL)
    {
      rp = TMP_ALLOC_LIMBS (un + 1);     /* will contain the remainder */
      scratch = rp;			 /* used by mpn_div_q */
    }
  else
    {
      scratch = TMP_ALLOC_LIMBS (un + 1); /* used by mpn_div_q */
      rp = remp;
    }
  sp = rootp;
  wp = TMP_ALLOC_LIMBS (un + EXTRA); /* will contain S^(k-1), k*S^(k-1),
					and temporary for mpn_pow_1 */
  count_leading_zeros (cnt, up[un - 1]);
  unb = un * GMP_NUMB_BITS - cnt + GMP_NAIL_BITS;
  /* unb is the number of bits of the input U */

  xnb = (unb - 1) / k + 1;	/* ceil (unb / k) */
  /* xnb is the number of bits of the root R */

  if (xnb == 1) /* root is 1 */
    {
      if (remp == NULL)
	remp = rp;
      mpn_sub_1 (remp, up, un, (mp_limb_t) 1);
      MPN_NORMALIZE (remp, un);	/* There should be at most one zero limb,
				   if we demand u to be normalized  */
      rootp[0] = 1;
      TMP_FREE;
      return un;
    }

  /* We initialize the algorithm with a 1-bit approximation to zero: since we
     know the root has exactly xnb bits, we write r0 = 2^(xnb-1), so that
     r0^k = 2^(k*(xnb-1)), that we subtract to the input. */
  kk = k * (xnb - 1);		/* number of truncated bits in the input */
  rn = un - kk / GMP_NUMB_BITS; /* number of limbs of the non-truncated part */
  MPN_RSHIFT (cy, rp, up + kk / GMP_NUMB_BITS, rn, kk % GMP_NUMB_BITS);
  mpn_sub_1 (rp, rp, rn, 1);	/* subtract the initial approximation: since
				   the non-truncated part is less than 2^k, it
				   is <= k bits: rn <= ceil(k/GMP_NUMB_BITS) */
  sp[0] = 1;			/* initial approximation */
  sn = 1;			/* it has one limb */

  for (logk = 1; ((k - 1) >> logk) != 0; logk++)
    ;
  /* logk = ceil(log(k)/log(2)) */

  b = xnb - 1; /* number of remaining bits to determine in the kth root */
  ni = 0;
  while (b != 0)
    {
      /* invariant: here we want b+1 total bits for the kth root */
      sizes[ni] = b;
      /* if c is the new value of b, this means that we'll go from a root
	 of c+1 bits (say s') to a root of b+1 bits.
	 It is proved in the book "Modern Computer Arithmetic" from Brent
	 and Zimmermann, Chapter 1, that
	 if s' >= k*beta, then at most one correction is necessary.
	 Here beta = 2^(b-c), and s' >= 2^c, thus it suffices that
	 c >= ceil((b + log2(k))/2). */
      b = (b + logk + 1) / 2;
      if (b >= sizes[ni])
	b = sizes[ni] - 1;	/* add just one bit at a time */
      ni++;
    }
  sizes[ni] = 0;
  ASSERT_ALWAYS (ni < GMP_NUMB_BITS + 1);
  /* We have sizes[0] = b > sizes[1] > ... > sizes[ni] = 0 with
     sizes[i] <= 2 * sizes[i+1].
     Newton iteration will first compute sizes[ni-1] extra bits,
     then sizes[ni-2], ..., then sizes[0] = b. */

  wp[0] = 1; /* {sp,sn}^(k-1) = 1 */
  wn = 1;
  for (i = ni; i != 0; i--)
    {
      /* 1: loop invariant:
	 {sp, sn} is the current approximation of the root, which has
		  exactly 1 + sizes[ni] bits.
	 {rp, rn} is the current remainder
	 {wp, wn} = {sp, sn}^(k-1)
	 kk = number of truncated bits of the input
      */
      b = sizes[i - 1] - sizes[i]; /* number of bits to compute in that
				      iteration */

      /* Reinsert a low zero limb if we normalized away the entire remainder */
      if (rn == 0)
	{
	  rp[0] = 0;
	  rn = 1;
	}

      /* first multiply the remainder by 2^b */
      MPN_LSHIFT (cy, rp + b / GMP_NUMB_BITS, rp, rn, b % GMP_NUMB_BITS);
      rn = rn + b / GMP_NUMB_BITS;
      if (cy != 0)
	{
	  rp[rn] = cy;
	  rn++;
	}

      kk = kk - b;

      /* 2: current buffers: {sp,sn}, {rp,rn}, {wp,wn} */

      /* Now insert bits [kk,kk+b-1] from the input U */
      bn = b / GMP_NUMB_BITS; /* lowest limb from high part of rp[] */
      save = rp[bn];
      /* nl is the number of limbs in U which contain bits [kk,kk+b-1] */
      nl = 1 + (kk + b - 1) / GMP_NUMB_BITS - (kk / GMP_NUMB_BITS);
      /* nl  = 1 + floor((kk + b - 1) / GMP_NUMB_BITS)
		 - floor(kk / GMP_NUMB_BITS)
	     <= 1 + (kk + b - 1) / GMP_NUMB_BITS
		  - (kk - GMP_NUMB_BITS + 1) / GMP_NUMB_BITS
	     = 2 + (b - 2) / GMP_NUMB_BITS
	 thus since nl is an integer:
	 nl <= 2 + floor(b/GMP_NUMB_BITS) <= 2 + bn. */
      /* we have to save rp[bn] up to rp[nl-1], i.e. 1 or 2 limbs */
      if (nl - 1 > bn)
	save2 = rp[bn + 1];
      MPN_RSHIFT (cy, rp, up + kk / GMP_NUMB_BITS, nl, kk % GMP_NUMB_BITS);
      /* set to zero high bits of rp[bn] */
      rp[bn] &= ((mp_limb_t) 1 << (b % GMP_NUMB_BITS)) - 1;
      /* restore corresponding bits */
      rp[bn] |= save;
      if (nl - 1 > bn)
	rp[bn + 1] = save2; /* the low b bits go in rp[0..bn] only, since
			       they start by bit 0 in rp[0], so they use
			       at most ceil(b/GMP_NUMB_BITS) limbs */

      /* 3: current buffers: {sp,sn}, {rp,rn}, {wp,wn} */

      /* compute {wp, wn} = k * {sp, sn}^(k-1) */
      cy = mpn_mul_1 (wp, wp, wn, k);
      wp[wn] = cy;
      wn += cy != 0;

      /* 4: current buffers: {sp,sn}, {rp,rn}, {wp,wn} */

      /* now divide {rp, rn} by {wp, wn} to get the low part of the root */
      if (rn < wn)
	{
	  qn = 0;
	}
      else
	{
	  mp_ptr tp;
	  qn = rn - wn; /* expected quotient size */
	  /* tp must have space for wn limbs.
	     The quotient needs rn-wn+1 limbs, thus quotient+remainder
	     need altogether rn+1 limbs. */
	  tp = qp + qn + 1;	/* put remainder in Q buffer */
	  mpn_div_q (qp, rp, rn, wp, wn, scratch);
	  qn += qp[qn] != 0;
	}

      /* 5: current buffers: {sp,sn}, {qp,qn}.
	 Note: {rp,rn} is not needed any more since we'll compute it from
	 scratch at the end of the loop.
       */

      /* Number of limbs used by b bits, when least significant bit is
	 aligned to least limb */
      bn = (b - 1) / GMP_NUMB_BITS + 1;

      /* the quotient should be smaller than 2^b, since the previous
	 approximation was correctly rounded toward zero */
      if (qn > bn || (qn == bn && (b % GMP_NUMB_BITS != 0) &&
		      qp[qn - 1] >= ((mp_limb_t) 1 << (b % GMP_NUMB_BITS))))
	{
	  qn = b / GMP_NUMB_BITS + 1; /* b+1 bits */
	  MPN_ZERO (qp, qn);
	  qp[qn - 1] = (mp_limb_t) 1 << (b % GMP_NUMB_BITS);
	  MPN_DECR_U (qp, qn, 1);
	  qn -= qp[qn - 1] == 0;
	}

      /* 6: current buffers: {sp,sn}, {qp,qn} */

      /* multiply the root approximation by 2^b */
      MPN_LSHIFT (cy, sp + b / GMP_NUMB_BITS, sp, sn, b % GMP_NUMB_BITS);
      sn = sn + b / GMP_NUMB_BITS;
      if (cy != 0)
	{
	  sp[sn] = cy;
	  sn++;
	}

      /* 7: current buffers: {sp,sn}, {qp,qn} */

      ASSERT_ALWAYS (bn >= qn); /* this is ok since in the case qn > bn
				   above, q is set to 2^b-1, which has
				   exactly bn limbs */

      /* Combine sB and q to form sB + q.  */
      save = sp[b / GMP_NUMB_BITS];
      MPN_COPY (sp, qp, qn);
      MPN_ZERO (sp + qn, bn - qn);
      sp[b / GMP_NUMB_BITS] |= save;

      /* 8: current buffer: {sp,sn} */

      /* Since each iteration treats b bits from the root and thus k*b bits
	 from the input, and we already considered b bits from the input,
	 we now have to take another (k-1)*b bits from the input. */
      kk -= (k - 1) * b; /* remaining input bits */
      /* {rp, rn} = floor({up, un} / 2^kk) */
      MPN_RSHIFT (cy, rp, up + kk / GMP_NUMB_BITS, un - kk / GMP_NUMB_BITS, kk % GMP_NUMB_BITS);
      rn = un - kk / GMP_NUMB_BITS;
      rn -= rp[rn - 1] == 0;

      /* 9: current buffers: {sp,sn}, {rp,rn} */

     for (c = 0;; c++)
	{
	  /* Compute S^k in {qp,qn}. */
	  if (i == 1)
	    {
	      /* Last iteration: we don't need W anymore. */
	      /* mpn_pow_1 requires that both qp and wp have enough space to
		 store the result {sp,sn}^k + 1 limb */
	      approx = approx && (sp[0] > 1);
	      qn = (approx == 0) ? mpn_pow_1 (qp, sp, sn, k, wp) : 0;
	    }
	  else
	    {
	      /* W <- S^(k-1) for the next iteration,
		 and S^k = W * S. */
	      wn = mpn_pow_1 (wp, sp, sn, k - 1, qp);
	      mpn_mul (qp, wp, wn, sp, sn);
	      qn = wn + sn;
	      qn -= qp[qn - 1] == 0;
	    }

	  /* if S^k > floor(U/2^kk), the root approximation was too large */
	  if (qn > rn || (qn == rn && mpn_cmp (qp, rp, rn) > 0))
	    MPN_DECR_U (sp, sn, 1);
	  else
	    break;
	}

      /* 10: current buffers: {sp,sn}, {rp,rn}, {qp,qn}, {wp,wn} */

      ASSERT_ALWAYS (c <= 1);
      ASSERT_ALWAYS (rn >= qn);

      /* R = R - Q = floor(U/2^kk) - S^k */
      if ((i > 1) || (approx == 0))
	{
	  mpn_sub (rp, rp, rn, qp, qn);
	  MPN_NORMALIZE (rp, rn);
	}
      /* otherwise we have rn > 0, thus the return value is ok */

      /* 11: current buffers: {sp,sn}, {rp,rn}, {wp,wn} */
    }

  TMP_FREE;
  return rn;
}
void
mpn_toom_interpolate_12pts (mp_ptr pp, mp_ptr r1, mp_ptr r3, mp_ptr r5,
			mp_size_t n, mp_size_t spt, int half, mp_ptr wsi)
{
  mp_limb_t cy;
  mp_size_t n3;
  mp_size_t n3p1;
  n3 = 3 * n;
  n3p1 = n3 + 1;

#define   r4    (pp + n3)			/* 3n+1 */
#define   r2    (pp + 7 * n)			/* 3n+1 */
#define   r0    (pp +11 * n)			/* s+t <= 2*n */

  /******************************* interpolation *****************************/
  if (half != 0) {
    cy = mpn_sub_n (r3, r3, r0, spt);
    MPN_DECR_U (r3 + spt, n3p1 - spt, cy);

    cy = DO_mpn_sublsh_n (r2, r0, spt, 10, wsi);
    MPN_DECR_U (r2 + spt, n3p1 - spt, cy);
    DO_mpn_subrsh(r5, n3p1, r0, spt, 2, wsi);

    cy = DO_mpn_sublsh_n (r1, r0, spt, 20, wsi);
    MPN_DECR_U (r1 + spt, n3p1 - spt, cy);
    DO_mpn_subrsh(r4, n3p1, r0, spt, 4, wsi);
  };

  r4[n3] -= DO_mpn_sublsh_n (r4 + n, pp, 2 * n, 20, wsi);
  DO_mpn_subrsh(r1 + n, 2 * n + 1, pp, 2 * n, 4, wsi);

#if HAVE_NATIVE_mpn_add_n_sub_n
  mpn_add_n_sub_n (r1, r4, r4, r1, n3p1);
#else
  ASSERT_NOCARRY(mpn_add_n (wsi, r1, r4, n3p1));
  mpn_sub_n (r4, r4, r1, n3p1); /* can be negative */
  MP_PTR_SWAP(r1, wsi);
#endif

  r5[n3] -= DO_mpn_sublsh_n (r5 + n, pp, 2 * n, 10, wsi);
  DO_mpn_subrsh(r2 + n, 2 * n + 1, pp, 2 * n, 2, wsi);

#if HAVE_NATIVE_mpn_add_n_sub_n
  mpn_add_n_sub_n (r2, r5, r5, r2, n3p1);
#else
  mpn_sub_n (wsi, r5, r2, n3p1); /* can be negative */
  ASSERT_NOCARRY(mpn_add_n (r2, r2, r5, n3p1));
  MP_PTR_SWAP(r5, wsi);
#endif

  r3[n3] -= mpn_sub_n (r3+n, r3+n, pp, 2 * n);

#if AORSMUL_FASTER_AORS_AORSLSH
  mpn_submul_1 (r4, r5, n3p1, 257); /* can be negative */
#else
  mpn_sub_n (r4, r4, r5, n3p1); /* can be negative */
  DO_mpn_sublsh_n (r4, r5, n3p1, 8, wsi); /* can be negative */
#endif
  /* A division by 2835x4 follows. Warning: the operand can be negative! */
  mpn_divexact_by2835x4(r4, r4, n3p1);
  if ((r4[n3] & (GMP_NUMB_MAX << (GMP_NUMB_BITS-3))) != 0)
    r4[n3] |= (GMP_NUMB_MAX << (GMP_NUMB_BITS-2));

#if AORSMUL_FASTER_2AORSLSH
  mpn_addmul_1 (r5, r4, n3p1, 60); /* can be negative */
#else
  DO_mpn_sublsh_n (r5, r4, n3p1, 2, wsi); /* can be negative */
  DO_mpn_addlsh_n (r5, r4, n3p1, 6, wsi); /* can give a carry */
#endif
  mpn_divexact_by255(r5, r5, n3p1);

  ASSERT_NOCARRY(DO_mpn_sublsh_n (r2, r3, n3p1, 5, wsi));

#if AORSMUL_FASTER_3AORSLSH
  ASSERT_NOCARRY(mpn_submul_1 (r1, r2, n3p1, 100));
#else
  ASSERT_NOCARRY(DO_mpn_sublsh_n (r1, r2, n3p1, 6, wsi));
  ASSERT_NOCARRY(DO_mpn_sublsh_n (r1, r2, n3p1, 5, wsi));
  ASSERT_NOCARRY(DO_mpn_sublsh_n (r1, r2, n3p1, 2, wsi));
#endif
  ASSERT_NOCARRY(DO_mpn_sublsh_n (r1, r3, n3p1, 9, wsi));
  mpn_divexact_by42525(r1, r1, n3p1);

#if AORSMUL_FASTER_AORS_2AORSLSH
  ASSERT_NOCARRY(mpn_submul_1 (r2, r1, n3p1, 225));
#else
  ASSERT_NOCARRY(mpn_sub_n (r2, r2, r1, n3p1));
  ASSERT_NOCARRY(DO_mpn_addlsh_n (r2, r1, n3p1, 5, wsi));
  ASSERT_NOCARRY(DO_mpn_sublsh_n (r2, r1, n3p1, 8, wsi));
#endif
  mpn_divexact_by9x4(r2, r2, n3p1);

  ASSERT_NOCARRY(mpn_sub_n (r3, r3, r2, n3p1));

  mpn_sub_n (r4, r2, r4, n3p1);
  ASSERT_NOCARRY(mpn_rshift(r4, r4, n3p1, 1));
  ASSERT_NOCARRY(mpn_sub_n (r2, r2, r4, n3p1));

  mpn_add_n (r5, r5, r1, n3p1);
  ASSERT_NOCARRY(mpn_rshift(r5, r5, n3p1, 1));

  /* last interpolation steps... */
  ASSERT_NOCARRY(mpn_sub_n (r3, r3, r1, n3p1));
  ASSERT_NOCARRY(mpn_sub_n (r1, r1, r5, n3p1));
  /* ... could be mixed with recomposition
	||H-r5|M-r5|L-r5|   ||H-r1|M-r1|L-r1|
  */

  /***************************** recomposition *******************************/
  /*
    pp[] prior to operations:
    |M r0|L r0|___||H r2|M r2|L r2|___||H r4|M r4|L r4|____|H_r6|L r6|pp

    summation scheme for remaining operations:
    |__12|n_11|n_10|n__9|n__8|n__7|n__6|n__5|n__4|n__3|n__2|n___|n___|pp
    |M r0|L r0|___||H r2|M r2|L r2|___||H r4|M r4|L r4|____|H_r6|L r6|pp
	||H r1|M r1|L r1|   ||H r3|M r3|L r3|   ||H_r5|M_r5|L_r5|
  */

  cy = mpn_add_n (pp + n, pp + n, r5, n);
  cy = mpn_add_1 (pp + 2 * n, r5 + n, n, cy);
#if HAVE_NATIVE_mpn_add_nc
  cy = r5[n3] + mpn_add_nc(pp + n3, pp + n3, r5 + 2 * n, n, cy);
#else
  MPN_INCR_U (r5 + 2 * n, n + 1, cy);
  cy = r5[n3] + mpn_add_n (pp + n3, pp + n3, r5 + 2 * n, n);
#endif
  MPN_INCR_U (pp + n3 + n, 2 * n + 1, cy);

  pp[2 * n3]+= mpn_add_n (pp + 5 * n, pp + 5 * n, r3, n);
  cy = mpn_add_1 (pp + 2 * n3, r3 + n, n, pp[2 * n3]);
#if HAVE_NATIVE_mpn_add_nc
  cy = r3[n3] + mpn_add_nc(pp + 7 * n, pp + 7 * n, r3 + 2 * n, n, cy);
#else
  MPN_INCR_U (r3 + 2 * n, n + 1, cy);
  cy = r3[n3] + mpn_add_n (pp + 7 * n, pp + 7 * n, r3 + 2 * n, n);
#endif
  MPN_INCR_U (pp + 8 * n, 2 * n + 1, cy);

  pp[10*n]+=mpn_add_n (pp + 9 * n, pp + 9 * n, r1, n);
  if (half) {
    cy = mpn_add_1 (pp + 10 * n, r1 + n, n, pp[10 * n]);
#if HAVE_NATIVE_mpn_add_nc
    if (LIKELY (spt > n)) {
      cy = r1[n3] + mpn_add_nc(pp + 11 * n, pp + 11 * n, r1 + 2 * n, n, cy);
      MPN_INCR_U (pp + 4 * n3, spt - n, cy);
    } else {
      ASSERT_NOCARRY(mpn_add_nc(pp + 11 * n, pp + 11 * n, r1 + 2 * n, spt, cy));
    }
#else
    MPN_INCR_U (r1 + 2 * n, n + 1, cy);
    if (LIKELY (spt > n)) {
      cy = r1[n3] + mpn_add_n (pp + 11 * n, pp + 11 * n, r1 + 2 * n, n);
      MPN_INCR_U (pp + 4 * n3, spt - n, cy);
    } else {
      ASSERT_NOCARRY(mpn_add_n (pp + 11 * n, pp + 11 * n, r1 + 2 * n, spt));
    }
#endif
  } else {
    ASSERT_NOCARRY(mpn_add_1 (pp + 10 * n, r1 + n, spt, pp[10 * n]));
  }

#undef   r0
#undef   r2
#undef   r4
}
Ejemplo n.º 16
0
void
mpn_toom2_sqr (mp_ptr pp,
	       mp_srcptr ap, mp_size_t an,
	       mp_ptr scratch)
{
  const int __gmpn_cpuvec_initialized = 1;
  mp_size_t n, s;
  mp_limb_t cy, cy2;
  mp_ptr asm1;

#define a0  ap
#define a1  (ap + n)

  s = an >> 1;
  n = an - s;

  ASSERT (0 < s && s <= n && s >= n - 1);

  asm1 = pp;

  /* Compute asm1.  */
  if (s == n)
    {
      if (mpn_cmp (a0, a1, n) < 0)
	{
	  mpn_sub_n (asm1, a1, a0, n);
	}
      else
	{
	  mpn_sub_n (asm1, a0, a1, n);
	}
    }
  else /* n - s == 1 */
    {
      if (a0[s] == 0 && mpn_cmp (a0, a1, s) < 0)
	{
	  mpn_sub_n (asm1, a1, a0, s);
	  asm1[s] = 0;
	}
      else
	{
	  asm1[s] = a0[s] - mpn_sub_n (asm1, a0, a1, s);
	}
    }

#define v0	pp				/* 2n */
#define vinf	(pp + 2 * n)			/* s+s */
#define vm1	scratch				/* 2n */
#define scratch_out	scratch + 2 * n

  /* vm1, 2n limbs */
  TOOM2_SQR_REC (vm1, asm1, n, scratch_out);

  /* vinf, s+s limbs */
  TOOM2_SQR_REC (vinf, a1, s, scratch_out);

  /* v0, 2n limbs */
  TOOM2_SQR_REC (v0, ap, n, scratch_out);

  /* H(v0) + L(vinf) */
  cy = mpn_add_n (pp + 2 * n, v0 + n, vinf, n);

  /* L(v0) + H(v0) */
  cy2 = cy + mpn_add_n (pp + n, pp + 2 * n, v0, n);

  /* L(vinf) + H(vinf) */
  cy += mpn_add (pp + 2 * n, pp + 2 * n, n, vinf + n, s + s - n);

  cy -= mpn_sub_n (pp + n, pp + n, vm1, 2 * n);

  ASSERT (cy + 1  <= 3);
  ASSERT (cy2 <= 2);

  MPN_INCR_U (pp + 2 * n, s + s, cy2);
  if (LIKELY (cy <= 2))
    MPN_INCR_U (pp + 3 * n, s + s - n, cy);
  else
    MPN_DECR_U (pp + 3 * n, s + s - n, 1);
}
Ejemplo n.º 17
0
/* put in {c, 2n} where n = 2k+r the value of {v0,2k} (already in place)
   + B^k * [{v1, 2k+1} - {t1, 2k+1}]
   + B^(2k) * [{t2, 2k+1} - {v0+vinf, 2k}]
   + B^(3k) * [{t1, 2k+1} - {t2, 2k+1}]
   + B^(4k) * {vinf,2r} (high 2r-1 limbs already in place)
   where {t1, 2k+1} = (3*{v0,2k}+2*sa*{vm1,2k+1}+{v2,2k+1})/6-2*{vinf,2r}
	 {t2, 2k+1} = ({v1, 2k+1} + sa * {vm1, 2k+1})/2
   (sa is the sign of {vm1, 2k+1}).

   {vinf, 2r} stores the content of {v0, 2r} + {vinf, 2r}, with carry in cinf0.
   vinf0 is the low limb of vinf.

   ws is temporary space, and should have at least 2r limbs.

   Think about:

   The evaluated point a-b+c stands a good chance of having a zero carry
   limb, a+b+c would have a 1/4 chance, and 4*a+2*b+c a 1/8 chance, roughly.
   Perhaps this could be tested and stripped.  Doing so before recursing
   would be better than stripping at the start of mpn_toom3_mul_n/sqr_n,
   since then the recursion could be based on the new size.  Although in
   truth the kara vs toom3 crossover is never so exact that one limb either
   way makes a difference.

   A small value like 1 or 2 for the carry could perhaps also be handled
   with an add_n or addlsh1_n.  Would that be faster than an extra limb on a
   (recursed) multiply/square?
*/
static void
toom3_interpolate (mp_ptr c, mp_srcptr v1, mp_ptr v2, mp_ptr vm1,
		   mp_ptr vinf, mp_size_t k, mp_size_t r, int sa,
		   mp_limb_t vinf0, mp_limb_t cinf0, mp_ptr ws)
{
  mp_limb_t cy, saved;
  unsigned long twok = k + k;
  unsigned long kk1 = twok + 1;
  unsigned long twor = r + r;
  mp_ptr c1, c2, c3, c4, c5;
  mp_limb_t cout; /* final carry, should be zero at the end */

  c1 = c + k;
  c2 = c1 + k;
  c3 = c2 + k;
  c4 = c3 + k;
  c5 = c4 + k;

#define v0 (c)
  /* {c,2k} {c+2k,2k+1} {c+4k+1,2r-1} {t,2k+1} {t+2k+1,2k+1} {t+4k+2,2r}
       v0      |vm1|       hi(vinf)       v1       v2+2vm1      vinf
							      +lo(v0) */

  ASSERT_NOCARRY (mpn_divexact_by3 (v2, v2, kk1));    /* v2 <- v2 / 3 */
#ifdef HAVE_NATIVE_mpn_rsh1add_n
  mpn_rsh1add_n (v2, v2, v0, twok); /* v2 <- (lo(v2)+v0) / 2, exact */
  cy = v2[twok] & 1; /* add high limb of v2 divided by 2 */
  v2[twok] >>= 1;
  MPN_INCR_U (v2 + twok - 1, 2, cy << (GMP_NUMB_BITS - 1));
#else
  v2[twok] += mpn_add_n (v2, v2, v0, twok);
  mpn_rshift (v2, v2, kk1, 1);
#endif

  /* {c,2k} {c+2k,2k+1} {c+4k+1,2r-1} {t,2k+1} {t+2k+1,2k+1} {t+4k+2,2r}
       v0      |vm1|      hi(vinf)       v1    (3v0+2vm1+v2)    vinf
						    /6         +lo(v0) */

  /* vm1 <- t2 := (v1 + sa*vm1) / 2
     t2 = a0*b0+a0*b2+a1*b1+a2*b0+a2*b2 >= 0
     No carry comes out from {v1, kk1} +/- {vm1, kk1},
     and the division by two is exact */
  if (sa >= 0)
    {
#ifdef HAVE_NATIVE_mpn_rsh1add_n
      mpn_rsh1add_n (vm1, v1, vm1, kk1);
#else
      mpn_add_n (vm1, vm1, v1, kk1);
      mpn_rshift (vm1, vm1, kk1, 1);
#endif
    }
  else
    {
#ifdef HAVE_NATIVE_mpn_rsh1sub_n
      mpn_rsh1sub_n (vm1, v1, vm1, kk1);
#else
      mpn_sub_n (vm1, v1, vm1, kk1);
      mpn_rshift (vm1, vm1, kk1, 1);
#endif
    }

  /* {c,2k} {c+2k,2k+1} {c+4k+1,2r-1} {t,2k+1} {t+2k+1,2k+1} {t+4k+2,2r}
       v0       t2        hi(vinf)       v1         t1       vinf+lo(v0) */

  /* subtract 2*vinf to v2,
     result is t1 := a0*b0+a0*b2+a1*b1+a1*b2+a2*b0+a2*b1+a2*b2 >= 0 */
  saved = c4[0];
  c4[0] = vinf0;
#ifdef HAVE_NATIVE_mpn_sublsh1_n
  cy = mpn_sublsh1_n (v2, v2, c4, twor);
#else
  cy = mpn_lshift (ws, c4, twor, 1);
  cy += mpn_sub_n (v2, v2, ws, twor);
#endif
  MPN_DECR_U (v2 + twor, kk1 - twor, cy);
  c4[0] = saved;

  /* subtract {t2, 2k+1} in {c+3k, 2k+1} i.e. in {t2+k, 2k+1}:
     by chunks of k limbs from right to left to avoid overlap */
#define t2 (vm1)
  /* a borrow may occur in one of the 2 following __GMPN_SUB_1 calls, but since
     the final result is nonnegative, it will be compensated later on */
  __GMPN_SUB_1 (cout, c5, c5, twor - k, t2[twok]);
  cy = mpn_sub_n (c4, c4, t2 + k, k);
  __GMPN_SUB_1 (cout, c5, c5, twor - k, cy);
  cy = mpn_sub_n (c3, c3, t2, k);
  __GMPN_SUB_1 (cout, c4, c4, twor, cy);

  /* don't forget to add vinf0 in {c+4k, ...} */
  __GMPN_ADD_1 (cout, c4, c4, twor, vinf0);

  /* c  c+k c+2k c+3k c+4k+1   t  t+2k+1 t+4k+2
     v0     t2        hi(vinf) v1 t1     vinf
		 -t2                    +lo(v0)
  */

  /* c  c+k c+2k c+3k c+4k     t  t+2k+1 t+4k+2
     v0     t2        vinf     v1 t1     vinf
		 -t2                    +lo(v0)
  */

  /* subtract v0+vinf in {c+2k, ...} */
  cy = cinf0 + mpn_sub_n (c2, c2, vinf, twor);
  if (twor < twok)
    {
      __GMPN_SUB_1 (cy, c2 + twor, c2 + twor, twok - twor, cy);
      cy += mpn_sub_n (c2 + twor, c2 + twor, v0 + twor, twok - twor);
    }
  __GMPN_SUB_1 (cout, c4, c4, twor, cy); /* 2n-4k = 2r */

  /* c   c+k  c+2k  c+3k  c+4k      t   t+2k+1  t+4k+2
     v0       t2          vinf      v1  t1      vinf
	      -v0   -t2                        +lo(v0)
	      -vinf                                    */

  /* subtract t1 in {c+k, ...} */
  cy = mpn_sub_n (c1, c1, v2, kk1);
  __GMPN_SUB_1 (cout, c3 + 1, c3 + 1, twor + k - 1, cy); /* 2n-(3k+1)=k+2r-1 */

  /* c   c+k  c+2k  c+3k  c+4k      t   t+2k+1  t+4k+2
     v0       t2          vinf      v1  t1      vinf
	 -t1  -v0   -t2
	      -vinf                                    */

  /* add t1 in {c+3k, ...} */
  cy = mpn_add_n (c3, c3, v2, kk1);
  __GMPN_ADD_1 (cout, c5 + 1, c5 + 1, twor - k - 1, cy); /* 2n-(5k+1) = 2r-k-1 */

  /* c   c+k  c+2k  c+3k  c+4k      t   t+2k+1  t+4k+2
     v0       t2    t1    vinf      v1  t1      vinf
	 -t1  -v0   -t2
	      -vinf                                    */

  /* add v1 in {c+k, ...} */
  cy = mpn_add_n (c1, c1, v1, kk1);
  __GMPN_ADD_1 (cout, c3 + 1, c3 + 1, twor + k - 1, cy); /* 2n-(3k+1) = 2r+k-1 */

  /* c   c+k  c+2k  c+3k  c+4k      t   t+2k+1  t+4k+2
     v0  v1   t2    t1    vinf      v1  t1      vinf
	 -t1  -v0   -t2
	      -vinf                                    */
#undef v0
#undef t2
}
Ejemplo n.º 18
0
void
check_decr_data (void)
{
  static const struct {
    mp_limb_t        n;
    const mp_limb_t  src[SIZE];
    const mp_limb_t  want[SIZE];
  } data[] = {
    { 1,   { 1 },   { 0   } },
    { 1,   { 123 }, { 122 } },
    { 1,   { M },   { M-1 } },
    { 2,   { 2 },   { 0   } },
    { 2,   { 123 }, { 121 } },
    { M,   { M },   { 0   } },
    { M-1, { M },   { 1   } },

    { 1,   { 0,   1   }, { M,   0   } },
    { 1,   { 0,   123 }, { M,   122 } },
    { 1,   { 0,   M   }, { M,   M-1 } },
    { 2,   { 0,   123 }, { M-1, 122 } },
    { 2,   { 1,   123 }, { M,   122 } },
    { M,   { 0,   123 }, { 1,   122 } },
    { M,   { M-1, M   }, { M,   M-1 } },

    { 1,   { 0,   0, 1   }, { M,   M, 0   } },
    { 1,   { 0,   0, 123 }, { M,   M, 122 } },
    { 1,   { 0,   0, M   }, { M,   M, M-1 } },
    { 2,   { 0,   0, 123 }, { M-1, M, 122 } },
    { 2,   { 1,   0, 123 }, { M,   M, 122 } },
    { M,   { 0,   0, 123 }, { 1,   M, 122 } },
    { M,   { M-1, 0, M   }, { M,   M, M-1 } },

    { 1,   { 0,   0, 0, 1   }, { M,   M, M, 0   } },
    { 1,   { 0,   0, 0, 123 }, { M,   M, M, 122 } },
    { 1,   { 0,   0, 0, M   }, { M,   M, M, M-1 } },
    { 2,   { 0,   0, 0, 123 }, { M-1, M, M, 122 } },
    { 2,   { 1,   0, 0, 123 }, { M,   M, M, 122 } },
    { M,   { 0,   0, 0, 123 }, { 1,   M, M, 122 } },
    { M,   { M-1, 0, 0, M   }, { M,   M, M, M-1 } },

    { 1,   { 0,   0, 0, 0, 1   }, { M,   M, M, M, 0   } },
    { 1,   { 0,   0, 0, 0, 123 }, { M,   M, M, M, 122 } },
    { 1,   { 0,   0, 0, 0, M   }, { M,   M, M, M, M-1 } },
    { 2,   { 0,   0, 0, 0, 123 }, { M-1, M, M, M, 122 } },
    { 2,   { 1,   0, 0, 0, 123 }, { M,   M, M, M, 122 } },
    { M,   { 0,   0, 0, 0, 123 }, { 1,   M, M, M, 122 } },
    { M,   { M-1, 0, 0, 0, M   }, { M,   M, M, M, M-1 } },

    { 1,   { 0,   0, 0, 0, 0, 1   }, { M,   M, M, M, M, 0   } },
    { 1,   { 0,   0, 0, 0, 0, 123 }, { M,   M, M, M, M, 122 } },
    { 1,   { 0,   0, 0, 0, 0, M   }, { M,   M, M, M, M, M-1 } },
    { 2,   { 0,   0, 0, 0, 0, 123 }, { M-1, M, M, M, M, 122 } },
    { 2,   { 1,   0, 0, 0, 0, 123 }, { M,   M, M, M, M, 122 } },
    { M,   { 0,   0, 0, 0, 0, 123 }, { 1,   M, M, M, M, 122 } },
    { M,   { M-1, 0, 0, 0, 0, M   }, { M,   M, M, M, M, M-1
#if defined (__hpux) && ! defined (__GNUC__)
    /* For explanation of this garbage, see previous function.  */
				       , 0, 0, 0, 0
#endif
    } }
  };

  mp_limb_t  got[SIZE];
  int   i;

  for (i = 0; i < numberof (data); i++)
    {
      refmpn_copyi (got, data[i].src, SIZE);
      MPN_DECR_U (got, SIZE, data[i].n);
      check_one ("check_decr_data", i,
                 data[i].src, data[i].n,
                 got, data[i].want, SIZE);

      if (data[i].n == 1)
        {
          refmpn_copyi (got, data[i].src, SIZE);
          MPN_DECR_U (got, SIZE, CNST_LIMB(1));
          check_one ("check_decr (const 1)", i,
                     data[i].src, data[i].n,
                     got, data[i].want, SIZE);
        }
    }
}