Exemplo n.º 1
0
void
mpz_set_f (mpz_ptr w, mpf_srcptr u)
{
  mp_ptr    wp, up;
  mp_size_t size;
  mp_exp_t  exp;

  /* abs(u)<1 truncates to zero */
  exp = EXP (u);
  if (exp <= 0)
    {
      SIZ(w) = 0;
      return;
    }

  wp = MPZ_REALLOC (w, exp);
  up = PTR(u);

  size = SIZ (u);
  SIZ(w) = (size >= 0 ? exp : -exp);
  size = ABS (size);

  if (exp > size)
    {
      /* pad with low zeros to get a total "exp" many limbs */
      mp_size_t  zeros = exp - size;
      MPN_ZERO (wp, zeros);
      wp += zeros;
    }
  else
    {
      /* exp<=size, trucate to the high "exp" many limbs */
      up += (size - exp);
      size = exp;
    }

  MPN_COPY (wp, up, size);
}
Exemplo n.º 2
0
void
mpfr_extract (mpz_ptr y, mpfr_srcptr p, unsigned int i)
{
  unsigned long two_i = 1UL << i;
  unsigned long two_i_2 = i ? two_i / 2 : 1;
  mp_size_t size_p = MPFR_LIMB_SIZE (p);

  /* as 0 <= |p| < 1, we don't have to care with infinities, NaN, ... */
  MPFR_ASSERTD (!MPFR_IS_SINGULAR (p));

  _mpz_realloc (y, two_i_2);
  if ((mpfr_uexp_t) size_p < two_i)
    {
      MPN_ZERO (PTR(y), two_i_2);
      if ((mpfr_uexp_t) size_p >= two_i_2)
        MPN_COPY (PTR(y) + two_i - size_p, MPFR_MANT(p), size_p - two_i_2);
    }
  else
    MPN_COPY (PTR(y), MPFR_MANT(p) + size_p - two_i, two_i_2);

  MPN_NORMALIZE (PTR(y), two_i_2);
  SIZ(y) = (MPFR_IS_NEG (p)) ? -two_i_2 : two_i_2;
}
Exemplo n.º 3
0
void tc4_addlsh1_unsigned(mp_ptr rp, mp_size_t * rn, mp_srcptr xp, mp_size_t xn)
{
	if (xn)
	{
		if (xn >= *rn)
		{
            mp_limb_t cy;
			if (xn > *rn) MPN_ZERO(rp + *rn, xn - *rn);
#if HAVE_NATIVE_mpn_addlsh1_n
            cy = mpn_addlsh1_n(rp, rp, xp, xn);
#else
            cy = mpn_add_n(rp, rp, xp, xn);
            cy += mpn_add_n(rp, rp, xp, xn);
#endif
			if (cy) 
			{
				rp[xn] = cy;
				*rn = xn + 1;
			} else *rn = xn;
		} else
	   {
		   mp_limb_t cy;
#if HAVE_NATIVE_mpn_addlsh1_n
            cy = mpn_addlsh1_n(rp, rp, xp, xn);
#else
            cy = mpn_add_n(rp, rp, xp, xn);
            cy += mpn_add_n(rp, rp, xp, xn);
#endif
	      if (cy) cy = mpn_add_1(rp + xn, rp + xn, *rn - xn, cy);
		   if (cy) 
		   {
			   rp[*rn] = cy;
			   (*rn)++;
		   }
		}
	}
}
Exemplo n.º 4
0
int
mpfr_div (mpfr_ptr q, mpfr_srcptr u, mpfr_srcptr v, mp_rnd_t rnd_mode)
{
  mp_srcptr up, vp, bp;
  mp_size_t usize, vsize;

  mp_ptr ap, qp, rp;
  mp_size_t asize, bsize, qsize, rsize;
  mp_exp_t qexp;

  mp_size_t err, k;
  mp_limb_t tonearest;
  int inex, sh, can_round = 0, sign_quotient;
  unsigned int cc = 0, rw;

  TMP_DECL (marker);


  /**************************************************************************
   *                                                                        *
   *              This part of the code deals with special cases            *
   *                                                                        *
   **************************************************************************/

  if (MPFR_ARE_SINGULAR(u,v))
    {
      if (MPFR_IS_NAN(u) || MPFR_IS_NAN(v))
	{
	  MPFR_SET_NAN(q);
	  MPFR_RET_NAN;
	}
      sign_quotient = MPFR_MULT_SIGN( MPFR_SIGN(u) , MPFR_SIGN(v) );
      MPFR_SET_SIGN(q, sign_quotient);
      if (MPFR_IS_INF(u))
	{
	  if (MPFR_IS_INF(v))
	    {
	      MPFR_SET_NAN(q);
	      MPFR_RET_NAN;
	    }
	  else
	    {
	      MPFR_SET_INF(q);
	      MPFR_RET(0);
	    }
	}
      else if (MPFR_IS_INF(v))
	{
	  MPFR_SET_ZERO(q);
	  MPFR_RET(0);
	}
      else if (MPFR_IS_ZERO(v))
	{
	  if (MPFR_IS_ZERO(u))
	    {
	      MPFR_SET_NAN(q);
	      MPFR_RET_NAN;
	    }
	  else
	    {
	      MPFR_SET_INF(q);
	      MPFR_RET(0);
	    }
	}
      else
	{
	  MPFR_ASSERTD(MPFR_IS_ZERO(u));
	  MPFR_SET_ZERO(q);
	  MPFR_RET(0);
	}
    }
  MPFR_CLEAR_FLAGS(q);

  /**************************************************************************
   *                                                                        *
   *              End of the part concerning special values.                *
   *                                                                        *
   **************************************************************************/

  sign_quotient = MPFR_MULT_SIGN( MPFR_SIGN(u) , MPFR_SIGN(v) );
  up = MPFR_MANT(u);
  vp = MPFR_MANT(v);
  MPFR_SET_SIGN(q, sign_quotient);

  TMP_MARK (marker);
  usize = MPFR_LIMB_SIZE(u);
  vsize = MPFR_LIMB_SIZE(v);

  /**************************************************************************
   *                                                                        *
   *   First try to use only part of u, v. If this is not sufficient,       *
   *   use the full u and v, to avoid long computations eg. in the case     *
   *   u = v.                                                               *
   *                                                                        *
   **************************************************************************/

  /* The dividend is a, length asize. The divisor is b, length bsize. */

  qsize = (MPFR_PREC(q) + 3) / BITS_PER_MP_LIMB + 1;

  /* in case PREC(q)=PREC(v), then vsize=qsize with probability 1-4/b
     where b is the number of bits per limb */
  if (MPFR_LIKELY(vsize <= qsize))
    {
      bsize = vsize;
      bp = vp;
    }
  else /* qsize < vsize: take only the qsize high limbs of the divisor */
    {
      bsize = qsize;
      bp = (mp_srcptr) vp + (vsize - qsize);
    }

  /* we have {bp, bsize} * (1 + errb) = (true divisor)
     with 0 <= errb < 2^(-qsize*BITS_PER_MP_LIMB+1) */

  asize = bsize + qsize;
  ap = (mp_ptr) TMP_ALLOC (asize * BYTES_PER_MP_LIMB);
  /* if all arguments have same precision, then asize will be about 2*usize */
  if (MPFR_LIKELY(asize > usize))
    {
      /* copy u into the high limbs of {ap, asize}, and pad with zeroes */
      /* FIXME: could we copy only the qsize high limbs of the dividend? */
      MPN_COPY (ap + asize - usize, up, usize);
      MPN_ZERO (ap, asize - usize);
    }
  else /* truncate the high asize limbs of u into {ap, asize} */
    MPN_COPY (ap, up + usize - asize, asize);

  /* we have {ap, asize} = (true dividend) * (1 - erra)
     with 0 <= erra < 2^(-asize*BITS_PER_MP_LIMB).
     This {ap, asize} / {bp, bsize} =
     (true dividend) / (true divisor) * (1 - erra) (1 + errb) */

  /* Allocate limbs for quotient and remainder. */
  qp = (mp_ptr) TMP_ALLOC ((qsize + 1) * BYTES_PER_MP_LIMB);
  rp = (mp_ptr) TMP_ALLOC (bsize * BYTES_PER_MP_LIMB);
  rsize = bsize;

  mpn_tdiv_qr (qp, rp, 0, ap, asize, bp, bsize);
  sh = - (int) qp[qsize];
  /* since u and v are normalized, sh is 0 or -1 */

  /* we have {qp, qsize + 1} = {ap, asize} / {bp, bsize} (1 - errq)
     with 0 <= errq < 2^(-qsize*BITS_PER_MP_LIMB+1+sh)
     thus {qp, qsize + 1} =
     (true dividend) / (true divisor) * (1 - erra) (1 + errb) (1 - errq).
     
     In fact, since the truncated dividend and {rp, bsize} do not overlap,
     we have: {qp, qsize + 1} =
     (true dividend) / (true divisor) * (1 - erra') (1 + errb)
     where 0 <= erra' < 2^(-qsize*BITS_PER_MP_LIMB+sh) */

  /* Estimate number of correct bits. */

  err = qsize * BITS_PER_MP_LIMB;

  /* We want to check if rounding is possible, but without normalizing
     because we might have to divide again if rounding is impossible, or
     if the result might be exact. We have however to mimic normalization */

  /*
     To detect asap if the result is inexact, so as to avoid doing the
     division completely, we perform the following check :

     - if rnd_mode != GMP_RNDN, and the result is exact, we are unable
     to round simultaneously to zero and to infinity ;

     - if rnd_mode == GMP_RNDN, and if we can round to zero with one extra
     bit of precision, we can decide rounding. Hence in that case, check
     as in the case of GMP_RNDN, with one extra bit. Note that in the case
     of close to even rounding we shall do the division completely, but
     this is necessary anyway : we need to know whether this is really
     even rounding or not.
  */

  if (MPFR_UNLIKELY(asize < usize || bsize < vsize))
    {
      {
	mp_rnd_t  rnd_mode1, rnd_mode2;
	mp_exp_t  tmp_exp;
	mp_prec_t tmp_prec;

        if (bsize < vsize)
          err -= 2; /* divisor is truncated */
#if 0 /* commented this out since the truncation of the dividend is already
         taken into account in {rp, bsize}, which does not overlap with the
         neglected part of the dividend */
        else if (asize < usize)
          err --;   /* dividend is truncated */
#endif

	if (MPFR_LIKELY(rnd_mode == GMP_RNDN))
	  {
	    rnd_mode1 = GMP_RNDZ;
	    rnd_mode2 = MPFR_IS_POS_SIGN(sign_quotient) ? GMP_RNDU : GMP_RNDD;
	    sh++;
	  }
	else
	  {
	    rnd_mode1 = rnd_mode;
	    switch (rnd_mode)
	      {
	      case GMP_RNDU:
		rnd_mode2 = GMP_RNDD; break;
	      case GMP_RNDD:
		rnd_mode2 = GMP_RNDU; break;
	      default:
		rnd_mode2 = MPFR_IS_POS_SIGN(sign_quotient) ?
		  GMP_RNDU : GMP_RNDD;
		break;
	      }
	  }

	tmp_exp  = err + sh + BITS_PER_MP_LIMB;
	tmp_prec = MPFR_PREC(q) + sh + BITS_PER_MP_LIMB;
	
	can_round =
	  mpfr_can_round_raw (qp, qsize + 1, sign_quotient, tmp_exp,
                              GMP_RNDN, rnd_mode1, tmp_prec)
	  & mpfr_can_round_raw (qp, qsize + 1, sign_quotient, tmp_exp,
                                GMP_RNDN, rnd_mode2, tmp_prec);

        /* restore original value of sh, i.e. sh = - qp[qsize] */
	sh -= (rnd_mode == GMP_RNDN);
      }
Exemplo n.º 5
0
/* Since MPFR-3.0, return the usual inexact value.
   The erange flag is set if an error occurred in the conversion
   (y is NaN, +Inf, or -Inf that have no equivalent in mpf)
*/
int
mpfr_get_f (mpf_ptr x, mpfr_srcptr y, mpfr_rnd_t rnd_mode)
{
  int inex;
  mp_size_t sx, sy;
  mpfr_prec_t precx, precy;
  mp_limb_t *xp;
  int sh;

  if (MPFR_UNLIKELY(MPFR_IS_SINGULAR(y)))
    {
      if (MPFR_IS_ZERO(y))
        {
          mpf_set_ui (x, 0);
          return 0;
        }
      else if (MPFR_IS_NAN (y))
        {
          MPFR_SET_ERANGEFLAG ();
          return 0;
        }
      else /* y is plus infinity (resp. minus infinity), set x to the maximum
              value (resp. the minimum value) in precision PREC(x) */
        {
          int i;
          mp_limb_t *xp;

          MPFR_SET_ERANGEFLAG ();

          /* To this day, [mp_exp_t] and mp_size_t are #defined as the same
             type */
          EXP (x) = MP_SIZE_T_MAX;

          sx = PREC (x);
          SIZ (x) = sx;
          xp = PTR (x);
          for (i = 0; i < sx; i++)
            xp[i] = MPFR_LIMB_MAX;

          if (MPFR_IS_POS (y))
            return -1;
          else
            {
              mpf_neg (x, x);
              return +1;
            }
        }
    }

  sx = PREC(x); /* number of limbs of the mantissa of x */

  precy = MPFR_PREC(y);
  precx = (mpfr_prec_t) sx * GMP_NUMB_BITS;
  sy = MPFR_LIMB_SIZE (y);

  xp = PTR (x);

  /* since mpf numbers are represented in base 2^GMP_NUMB_BITS,
     we loose -EXP(y) % GMP_NUMB_BITS bits in the most significant limb */
  sh = MPFR_GET_EXP(y) % GMP_NUMB_BITS;
  sh = sh <= 0 ? - sh : GMP_NUMB_BITS - sh;
  MPFR_ASSERTD (sh >= 0);
  if (precy + sh <= precx) /* we can copy directly */
    {
      mp_size_t ds;

      MPFR_ASSERTN (sx >= sy);
      ds = sx - sy;

      if (sh != 0)
        {
          mp_limb_t out;
          out = mpn_rshift (xp + ds, MPFR_MANT(y), sy, sh);
          MPFR_ASSERTN (ds > 0 || out == 0);
          if (ds > 0)
            xp[--ds] = out;
        }
      else
        MPN_COPY (xp + ds, MPFR_MANT (y), sy);
      if (ds > 0)
        MPN_ZERO (xp, ds);
      EXP(x) = (MPFR_GET_EXP(y) + sh) / GMP_NUMB_BITS;
      inex = 0;
    }
  else /* we have to round to precx - sh bits */
    {
      mpfr_t z;
      mp_size_t sz;

      /* Recall that precx = (mpfr_prec_t) sx * GMP_NUMB_BITS, thus removing
         sh bits (sh < GMP_NUMB_BITSS) won't reduce the number of limbs. */
      mpfr_init2 (z, precx - sh);
      sz = MPFR_LIMB_SIZE (z);
      MPFR_ASSERTN (sx == sz);

      inex = mpfr_set (z, y, rnd_mode);
      /* warning, sh may change due to rounding, but then z is a power of two,
         thus we can safely ignore its last bit which is 0 */
      sh = MPFR_GET_EXP(z) % GMP_NUMB_BITS;
      sh = sh <= 0 ? - sh : GMP_NUMB_BITS - sh;
      MPFR_ASSERTD (sh >= 0);
      if (sh != 0)
        {
          mp_limb_t out;
          out = mpn_rshift (xp, MPFR_MANT(z), sz, sh);
          /* If sh hasn't changed, it is the number of the non-significant
             bits in the lowest limb of z. Therefore out == 0. */
          MPFR_ASSERTD (out == 0);  (void) out; /* avoid a warning */
        }
      else
        MPN_COPY (xp, MPFR_MANT(z), sz);
      EXP(x) = (MPFR_GET_EXP(z) + sh) / GMP_NUMB_BITS;
      mpfr_clear (z);
    }

  /* set size and sign */
  SIZ(x) = (MPFR_FROM_SIGN_TO_INT(MPFR_SIGN(y)) < 0) ? -sx : sx;

  return inex;
}
Exemplo n.º 6
0
void
mpn_toom3_sqr_n (mp_ptr c, mp_srcptr a, mp_size_t n, mp_ptr t)
{
  mp_size_t k, k1, kk1, r, twok, twor;
  mp_limb_t cy, saved, vinf0, cinf0;
  mp_ptr trec;
  int sa;
  mp_ptr c1, c2, c3, c4;

  ASSERT(GMP_NUMB_BITS >= 6);
  ASSERT(n >= 17); /* so that r <> 0 and 5k+3 <= 2n */

  /* the algorithm is the same as mpn_mul_n_tc3, with b=a */

  k = (n + 2) / 3; /* ceil(n/3) */
  twok = 2 * k;
  k1 = k + 1;
  kk1 = k + k1;
  r = n - twok;   /* last chunk */
  twor = 2 * r;

  c1 = c + k;
  c2 = c1 + k;
  c3 = c2 + k;
  c4 = c3 + k;

  trec = t + 4 * k + 3; /* trec = v2 + (2k+2) */

  cy = mpn_add_n (c, a, a + twok, r);
  if (r < k)
    __GMPN_ADD_1 (cy, c + r, a + r, k - r, cy);
  c3[2] = (c1[0] = cy) + mpn_add_n (c2 + 2, c, a + k, k);

#define v2 (t+2*k+1)
#define vinf (t+4*k+2)

  TOOM3_SQR_REC (t, c2 + 2, k1, trec);

  sa = (c[k] != 0) ? 1 : mpn_cmp (c, a + k, k);
  c[k] = (sa >= 0) ? c[k] - mpn_sub_n (c, c, a + k, k)
    : mpn_sub_n (c, a + k, c, k);

  TOOM3_SQR_REC (c2, c, k1, trec);

#ifdef HAVE_NATIVE_mpn_addlsh1_n
  c1[0] = mpn_addlsh1_n (c, a + k, a + twok, r);
  if (r < k)
    __GMPN_ADD_1 (c1[0], c + r, a + k + r, k - r, c1[0]);
  c1[0] = 2 * c1[0] + mpn_addlsh1_n (c, a, c, k);
#else
  c[r] = mpn_lshift (c, a + twok, r, 1);
  if (r < k)
    MPN_ZERO(c + r + 1, k - r);
  c1[0] += mpn_add_n (c, c, a + k, k);
  mpn_lshift (c, c, k1, 1);
  c1[0] += mpn_add_n (c, c, a, k);
#endif

  TOOM3_SQR_REC (v2, c, k1, trec);

  TOOM3_SQR_REC (c, a, k, trec);

#ifdef HAVE_NATIVE_mpn_addlsh1_n
  mpn_addlsh1_n (v2, v2, c2, kk1);
#else
  mpn_lshift (t + 4 * k + 2, c2, kk1, 1);
  mpn_add_n (v2, v2, t + 4 * k + 2, kk1);
#endif

  saved = c4[0];
  TOOM3_SQR_REC (c4, a + twok, r, trec);
  cinf0 = mpn_add_n (vinf, c4, c, twor);
  vinf0 = c4[0];
  c4[0] = saved;

  toom3_interpolate (c, t, v2, c2, vinf, k, r, 1, vinf0, cinf0, vinf + twor);

#undef v2
#undef vinf
}
Exemplo n.º 7
0
  choke me
#endif

void
mpq_set_d (mpq_ptr dest, double d)
{
  int negative;
  mp_exp_t exp;
  mp_limb_t tp[LIMBS_PER_DOUBLE];
  mp_ptr np, dp;
  mp_size_t nn, dn;
  int c;

  negative = d < 0;
  d = ABS (d);

  exp = __gmp_extract_double (tp, d);

  /* There are two main version of the conversion.  The `then' arm handles
     things that have a fractional part, while the `else' part handles
     only integers.  */
#if BITS_PER_MP_LIMB == 32
  if (exp <= 1 || (exp == 2 && tp[0] != 0))
#else
  if (exp <= 1)
#endif
    {
      if (d == 0.0)
	{
	  SIZ(&(dest->_mp_num)) = 0;
	  SIZ(&(dest->_mp_den)) = 1;
	  PTR(&(dest->_mp_den))[0] = 1;
	  return;
	}

      dn = -exp;
      if (dest->_mp_num._mp_alloc < 3)
	_mpz_realloc (&(dest->_mp_num), 3);
      np = PTR(&(dest->_mp_num));
#if BITS_PER_MP_LIMB == 32
      if ((tp[0] | tp[1]) == 0)
	np[0] = tp[2], nn = 1;
      else if (tp[0] == 0)
	np[1] = tp[2], np[0] = tp[1], nn = 2;
      else
	np[2] = tp[2], np[1] = tp[1], np[0] = tp[0], nn = 3;
#else
      if (tp[0] == 0)
	np[0] = tp[1], nn = 1;
      else
	np[1] = tp[1], np[0] = tp[0], nn = 2;
#endif
      dn += nn + 1;
      if (dest->_mp_den._mp_alloc < dn)
	_mpz_realloc (&(dest->_mp_den), dn);
      dp = PTR(&(dest->_mp_den));
      MPN_ZERO (dp, dn - 1);
      dp[dn - 1] = 1;
      count_trailing_zeros (c, np[0] | dp[0]);
      if (c != 0)
	{
	  mpn_rshift (np, np, nn, c);
	  nn -= np[nn - 1] == 0;
	  mpn_rshift (dp, dp, dn, c);
	  dn -= dp[dn - 1] == 0;
	}
      SIZ(&(dest->_mp_den)) = dn;
      SIZ(&(dest->_mp_num)) = negative ? -nn : nn;
    }
  else
    {
      nn = exp;
      if (dest->_mp_num._mp_alloc < nn)
	_mpz_realloc (&(dest->_mp_num), nn);
      np = PTR(&(dest->_mp_num));
#if BITS_PER_MP_LIMB == 32
      switch (nn)
        {
	default:
          MPN_ZERO (np, nn - 3);
          np += nn - 3;
	  /* fall through */
	case 3:
	  np[2] = tp[2], np[1] = tp[1], np[0] = tp[0];
	  break;
	case 2:
	  np[1] = tp[2], np[0] = tp[1];
	  break;
	}
#else
      switch (nn)
        {
	default:
	  MPN_ZERO (np, nn - 2);
	  np += nn - 2;
	  /* fall through */
	case 2:
	  np[1] = tp[1], np[0] = tp[0];
	  break;
	}
#endif
      dp = PTR(&(dest->_mp_den));
      dp[0] = 1;
      SIZ(&(dest->_mp_den)) = 1;
      SIZ(&(dest->_mp_num)) = negative ? -nn : nn;
    }
}
Exemplo n.º 8
0
/* FIXME:
    x Take scratch parameter, and figure out scratch need.

    x Use some fallback for small M->n?
*/
static mp_size_t
hgcd_matrix_apply (const struct hgcd_matrix *M,
		   mp_ptr ap, mp_ptr bp,
		   mp_size_t n)
{
  mp_size_t an, bn, un, vn, nn;
  mp_size_t mn[2][2];
  mp_size_t modn;
  mp_ptr tp, sp, scratch;
  mp_limb_t cy;
  unsigned i, j;

  TMP_DECL;

  ASSERT ( (ap[n-1] | bp[n-1]) > 0);

  an = n;
  MPN_NORMALIZE (ap, an);
  bn = n;
  MPN_NORMALIZE (bp, bn);

  for (i = 0; i < 2; i++)
    for (j = 0; j < 2; j++)
      {
	mp_size_t k;
	k = M->n;
	MPN_NORMALIZE (M->p[i][j], k);
	mn[i][j] = k;
      }

  ASSERT (mn[0][0] > 0);
  ASSERT (mn[1][1] > 0);
  ASSERT ( (mn[0][1] | mn[1][0]) > 0);

  TMP_MARK;

  if (mn[0][1] == 0)
    {
      /* A unchanged, M = (1, 0; q, 1) */
      ASSERT (mn[0][0] == 1);
      ASSERT (M->p[0][0][0] == 1);
      ASSERT (mn[1][1] == 1);
      ASSERT (M->p[1][1][0] == 1);

      /* Put B <-- B - q A */
      nn = submul (bp, bn, ap, an, M->p[1][0], mn[1][0]);
    }
  else if (mn[1][0] == 0)
    {
      /* B unchanged, M = (1, q; 0, 1) */
      ASSERT (mn[0][0] == 1);
      ASSERT (M->p[0][0][0] == 1);
      ASSERT (mn[1][1] == 1);
      ASSERT (M->p[1][1][0] == 1);

      /* Put A  <-- A - q * B */
      nn = submul (ap, an, bp, bn, M->p[0][1], mn[0][1]);
    }
  else
    {
      /* A = m00 a + m01 b  ==> a <= A / m00, b <= A / m01.
	 B = m10 a + m11 b  ==> a <= B / m10, b <= B / m11. */
      un = MIN (an - mn[0][0], bn - mn[1][0]) + 1;
      vn = MIN (an - mn[0][1], bn - mn[1][1]) + 1;

      nn = MAX (un, vn);
      /* In the range of interest, mulmod_bnm1 should always beat mullo. */
      modn = mpn_mulmod_bnm1_next_size (nn + 1);

      scratch = TMP_ALLOC_LIMBS (mpn_mulmod_bnm1_itch (modn, modn, M->n));
      tp = TMP_ALLOC_LIMBS (modn);
      sp = TMP_ALLOC_LIMBS (modn);

      ASSERT (n <= 2*modn);

      if (n > modn)
	{
	  cy = mpn_add (ap, ap, modn, ap + modn, n - modn);
	  MPN_INCR_U (ap, modn, cy);

	  cy = mpn_add (bp, bp, modn, bp + modn, n - modn);
	  MPN_INCR_U (bp, modn, cy);

	  n = modn;
	}

      mpn_mulmod_bnm1 (tp, modn, ap, n, M->p[1][1], mn[1][1], scratch);
      mpn_mulmod_bnm1 (sp, modn, bp, n, M->p[0][1], mn[0][1], scratch);

      /* FIXME: Handle the small n case in some better way. */
      if (n + mn[1][1] < modn)
	MPN_ZERO (tp + n + mn[1][1], modn - n - mn[1][1]);
      if (n + mn[0][1] < modn)
	MPN_ZERO (sp + n + mn[0][1], modn - n - mn[0][1]);

      cy = mpn_sub_n (tp, tp, sp, modn);
      MPN_DECR_U (tp, modn, cy);

      ASSERT (mpn_zero_p (tp + nn, modn - nn));

      mpn_mulmod_bnm1 (sp, modn, ap, n, M->p[1][0], mn[1][0], scratch);
      MPN_COPY (ap, tp, nn);
      mpn_mulmod_bnm1 (tp, modn, bp, n, M->p[0][0], mn[0][0], scratch);

      if (n + mn[1][0] < modn)
	MPN_ZERO (sp + n + mn[1][0], modn - n - mn[1][0]);
      if (n + mn[0][0] < modn)
	MPN_ZERO (tp + n + mn[0][0], modn - n - mn[0][0]);

      cy = mpn_sub_n (tp, tp, sp, modn);
      MPN_DECR_U (tp, modn, cy);

      ASSERT (mpn_zero_p (tp + nn, modn - nn));
      MPN_COPY (bp, tp, nn);

      while ( (ap[nn-1] | bp[nn-1]) == 0)
	{
	  nn--;
	  ASSERT (nn > 0);
	}
    }
  TMP_FREE;

  return nn;
}
Exemplo n.º 9
0
int
mpfr_sqrt (mpfr_ptr r, mpfr_srcptr u, mpfr_rnd_t rnd_mode)
{
  mp_size_t rsize; /* number of limbs of r (plus 1 if exact limb multiple) */
  mp_size_t rrsize;
  mp_size_t usize; /* number of limbs of u */
  mp_size_t tsize; /* number of limbs of the sqrtrem remainder */
  mp_size_t k;
  mp_size_t l;
  mpfr_limb_ptr rp, rp0;
  mpfr_limb_ptr up;
  mpfr_limb_ptr sp;
  mp_limb_t sticky0; /* truncated part of input */
  mp_limb_t sticky1; /* truncated part of rp[0] */
  mp_limb_t sticky;
  int odd_exp;
  int sh; /* number of extra bits in rp[0] */
  int inexact; /* return ternary flag */
  mpfr_exp_t expr;
  MPFR_TMP_DECL(marker);

  MPFR_LOG_FUNC
    (("x[%Pu]=%.*Rg rnd=%d", mpfr_get_prec (u), mpfr_log_prec, u, rnd_mode),
     ("y[%Pu]=%.*Rg inexact=%d",
      mpfr_get_prec (r), mpfr_log_prec, r, inexact));

  if (MPFR_UNLIKELY(MPFR_IS_SINGULAR(u)))
    {
      if (MPFR_IS_NAN(u))
        {
          MPFR_SET_NAN(r);
          MPFR_RET_NAN;
        }
      else if (MPFR_IS_ZERO(u))
        {
          /* 0+ or 0- */
          MPFR_SET_SAME_SIGN(r, u);
          MPFR_SET_ZERO(r);
          MPFR_RET(0); /* zero is exact */
        }
      else
        {
          MPFR_ASSERTD(MPFR_IS_INF(u));
          /* sqrt(-Inf) = NAN */
          if (MPFR_IS_NEG(u))
            {
              MPFR_SET_NAN(r);
              MPFR_RET_NAN;
            }
          MPFR_SET_POS(r);
          MPFR_SET_INF(r);
          MPFR_RET(0);
        }
    }
  if (MPFR_UNLIKELY(MPFR_IS_NEG(u)))
    {
      MPFR_SET_NAN(r);
      MPFR_RET_NAN;
    }
  MPFR_SET_POS(r);

  MPFR_TMP_MARK (marker);
  MPFR_UNSIGNED_MINUS_MODULO(sh,MPFR_PREC(r));
  if (sh == 0 && rnd_mode == MPFR_RNDN)
    sh = GMP_NUMB_BITS; /* ugly case */
  rsize = MPFR_LIMB_SIZE(r) + (sh == GMP_NUMB_BITS);
  /* rsize is the number of limbs of r + 1 if exact limb multiple and rounding
     to nearest, this is the number of wanted limbs for the square root */
  rrsize = rsize + rsize;
  usize = MPFR_LIMB_SIZE(u); /* number of limbs of u */
  rp0 = MPFR_MANT(r);
  rp = (sh < GMP_NUMB_BITS) ? rp0 : MPFR_TMP_LIMBS_ALLOC (rsize);
  up = MPFR_MANT(u);
  sticky0 = MPFR_LIMB_ZERO; /* truncated part of input */
  sticky1 = MPFR_LIMB_ZERO; /* truncated part of rp[0] */
  odd_exp = (unsigned int) MPFR_GET_EXP (u) & 1;
  inexact = -1; /* return ternary flag */

  sp = MPFR_TMP_LIMBS_ALLOC (rrsize);

  /* copy the most significant limbs of u to {sp, rrsize} */
  if (MPFR_LIKELY(usize <= rrsize)) /* in case r and u have the same precision,
                                       we have indeed rrsize = 2 * usize */
    {
      k = rrsize - usize;
      if (MPFR_LIKELY(k))
        MPN_ZERO (sp, k);
      if (odd_exp)
        {
          if (MPFR_LIKELY(k))
            sp[k - 1] = mpn_rshift (sp + k, up, usize, 1);
          else
            sticky0 = mpn_rshift (sp, up, usize, 1);
        }
      else
        MPN_COPY (sp + rrsize - usize, up, usize);
    }
  else /* usize > rrsize: truncate the input */
    {
      k = usize - rrsize;
      if (odd_exp)
        sticky0 = mpn_rshift (sp, up + k, rrsize, 1);
      else
        MPN_COPY (sp, up + k, rrsize);
      l = k;
      while (sticky0 == MPFR_LIMB_ZERO && l != 0)
        sticky0 = up[--l];
    }

  /* sticky0 is non-zero iff the truncated part of the input is non-zero */

  /* mpn_rootrem with NULL 2nd argument is faster than mpn_sqrtrem, thus use
     it if available and if the user asked to use GMP internal functions */
#if defined(WANT_GMP_INTERNALS) && defined(HAVE___GMPN_ROOTREM)
  tsize = __gmpn_rootrem (rp, NULL, sp, rrsize, 2);
#else
  tsize = mpn_sqrtrem (rp, NULL, sp, rrsize);
#endif

  /* a return value of zero in mpn_sqrtrem indicates a perfect square */
  sticky = sticky0 || tsize != 0;

  /* truncate low bits of rp[0] */
  sticky1 = rp[0] & ((sh < GMP_NUMB_BITS) ? MPFR_LIMB_MASK(sh)
                     : ~MPFR_LIMB_ZERO);
  rp[0] -= sticky1;

  sticky = sticky || sticky1;

  expr = (MPFR_GET_EXP(u) + odd_exp) / 2;  /* exact */

  if (rnd_mode == MPFR_RNDZ || rnd_mode == MPFR_RNDD || sticky == MPFR_LIMB_ZERO)
    {
      inexact = (sticky == MPFR_LIMB_ZERO) ? 0 : -1;
      goto truncate;
    }
  else if (rnd_mode == MPFR_RNDN)
    {
      /* if sh < GMP_NUMB_BITS, the round bit is bit (sh-1) of sticky1
                  and the sticky bit is formed by the low sh-1 bits from
                  sticky1, together with the sqrtrem remainder and sticky0. */
      if (sh < GMP_NUMB_BITS)
        {
          if (sticky1 & (MPFR_LIMB_ONE << (sh - 1)))
            { /* round bit is set */
              if (sticky1 == (MPFR_LIMB_ONE << (sh - 1)) && tsize == 0
                  && sticky0 == 0)
                goto even_rule;
              else
                goto add_one_ulp;
            }
          else /* round bit is zero */
            goto truncate; /* with the default inexact=-1 */
        }
      else /* sh = GMP_NUMB_BITS: the round bit is the most significant bit
              of rp[0], and the remaining GMP_NUMB_BITS-1 bits contribute to
              the sticky bit */
        {
          if (sticky1 & MPFR_LIMB_HIGHBIT)
            { /* round bit is set */
              if (sticky1 == MPFR_LIMB_HIGHBIT && tsize == 0 && sticky0 == 0)
                goto even_rule;
              else
                goto add_one_ulp;
            }
          else /* round bit is zero */
            goto truncate; /* with the default inexact=-1 */
        }
    }
  else /* rnd_mode=GMP_RDNU, necessarily sticky <> 0, thus add 1 ulp */
    goto add_one_ulp;

 even_rule: /* has to set inexact */
  if (sh < GMP_NUMB_BITS)
    inexact = (rp[0] & (MPFR_LIMB_ONE << sh)) ? 1 : -1;
  else
    inexact = (rp[1] & MPFR_LIMB_ONE) ? 1 : -1;
  if (inexact == -1)
    goto truncate;
  /* else go through add_one_ulp */

 add_one_ulp:
  inexact = 1; /* always here */
  if (sh == GMP_NUMB_BITS)
    {
      rp ++;
      rsize --;
      sh = 0;
    }
  if (mpn_add_1 (rp0, rp, rsize, MPFR_LIMB_ONE << sh))
    {
      expr ++;
      rp[rsize - 1] = MPFR_LIMB_HIGHBIT;
    }
  goto end;

 truncate: /* inexact = 0 or -1 */
  if (sh == GMP_NUMB_BITS)
    MPN_COPY (rp0, rp + 1, rsize - 1);

 end:
  MPFR_ASSERTN (expr >= MPFR_EMIN_MIN && expr <= MPFR_EMAX_MAX);
  MPFR_EXP (r) = expr;
  MPFR_TMP_FREE(marker);

  return mpfr_check_range (r, inexact, rnd_mode);
}
Exemplo n.º 10
0
/* rp[n-1..0] = bp[bn-1..0] ^ ep[en-1..0] mod mp[n-1..0]
   Requires that mp[n-1..0] is odd.  FIXME: is this true?
   Requires that ep[en-1..0] is > 1.
   Uses scratch space at tp of 3n+1 limbs.  */
void
mpn_powm_sec (mp_ptr rp, mp_srcptr bp, mp_size_t bn,
	      mp_srcptr ep, mp_size_t en,
	      mp_srcptr mp, mp_size_t n, mp_ptr tp)
{
  mp_limb_t minv;
  int cnt;
  mp_bitcnt_t ebi;
  int windowsize, this_windowsize;
  mp_limb_t expbits;
  mp_ptr pp, this_pp;
  long i;
  int cnd;

  ASSERT (en > 1 || (en == 1 && ep[0] > 0));
  ASSERT (n >= 1 && ((mp[0] & 1) != 0));

  count_leading_zeros (cnt, ep[en - 1]);
  ebi = (mp_bitcnt_t) en * GMP_LIMB_BITS - cnt;

  windowsize = win_size (ebi);

  binvert_limb (minv, mp[0]);
  minv = -minv;

  pp = tp + 4 * n;

  this_pp = pp;
  this_pp[n] = 1;
  redcify (this_pp, this_pp + n, 1, mp, n, tp + 6 * n);
  this_pp += n;
  redcify (this_pp, bp, bn, mp, n, tp + 6 * n);

  /* Precompute powers of b and put them in the temporary area at pp.  */
  for (i = (1 << windowsize) - 2; i > 0; i--)
    {
      mpn_mul_basecase (tp, this_pp, n, pp + n, n);
      this_pp += n;
      mpn_redc_1_sec (this_pp, tp, mp, n, minv);
    }

  expbits = getbits (ep, ebi, windowsize);
  if (ebi < windowsize)
    ebi = 0;
  else
    ebi -= windowsize;

#if WANT_CACHE_SECURITY
  mpn_tabselect (rp, pp, n, 1 << windowsize, expbits);
#else
  MPN_COPY (rp, pp + n * expbits, n);
#endif

  while (ebi != 0)
    {
      expbits = getbits (ep, ebi, windowsize);
      this_windowsize = windowsize;
      if (ebi < windowsize)
	{
	  this_windowsize -= windowsize - ebi;
	  ebi = 0;
	}
      else
	ebi -= windowsize;

      do
	{
	  mpn_local_sqr (tp, rp, n, tp + 2 * n);
	  mpn_redc_1_sec (rp, tp, mp, n, minv);
	  this_windowsize--;
	}
      while (this_windowsize != 0);

#if WANT_CACHE_SECURITY
      mpn_tabselect (tp + 2*n, pp, n, 1 << windowsize, expbits);
      mpn_mul_basecase (tp, rp, n, tp + 2*n, n);
#else
      mpn_mul_basecase (tp, rp, n, pp + n * expbits, n);
#endif
      mpn_redc_1_sec (rp, tp, mp, n, minv);
    }

  MPN_COPY (tp, rp, n);
  MPN_ZERO (tp + n, n);
  mpn_redc_1_sec (rp, tp, mp, n, minv);
  cnd = mpn_sub_n (tp, rp, mp, n);	/* we need just retval */
  mpn_subcnd_n (rp, rp, mp, n, !cnd);
}
Exemplo n.º 11
0
void
mpq_set_f (mpq_ptr q, mpf_srcptr f)
{
  mp_size_t  fexp = EXP(f);
  mp_ptr     fptr = PTR(f);
  mp_size_t  fsize = SIZ(f);
  mp_size_t  abs_fsize = ABS(fsize);
  mp_limb_t  flow;

  if (fsize == 0)
    {
      /* set q=0 */
      SIZ(NUM(q)) = 0;
      SIZ(DEN(q)) = 1;
      PTR(DEN(q))[0] = 1;
      return;
    }

  /* strip low zero limbs from f */
  flow = *fptr;
  MPN_STRIP_LOW_ZEROS_NOT_ZERO (fptr, abs_fsize, flow);

  if (fexp >= abs_fsize)
    {
      /* radix point is to the right of the limbs, no denominator */
      mp_ptr  num_ptr;

      num_ptr = MPZ_NEWALLOC (mpq_numref (q), fexp);
      MPN_ZERO (num_ptr, fexp - abs_fsize);
      MPN_COPY (num_ptr + fexp - abs_fsize, fptr, abs_fsize);

      SIZ(NUM(q)) = fsize >= 0 ? fexp : -fexp;
      SIZ(DEN(q)) = 1;
      PTR(DEN(q))[0] = 1;
    }
  else
    {
      /* radix point is within or to the left of the limbs, use denominator */
      mp_ptr     num_ptr, den_ptr;
      mp_size_t  den_size;

      den_size = abs_fsize - fexp;
      num_ptr = MPZ_NEWALLOC (mpq_numref (q), abs_fsize);
      den_ptr = MPZ_NEWALLOC (mpq_denref (q), den_size+1);

      if (flow & 1)
        {
          /* no powers of two to strip from numerator */

          MPN_COPY (num_ptr, fptr, abs_fsize);
          MPN_ZERO (den_ptr, den_size);
          den_ptr[den_size] = 1;
        }
      else
        {
          /* right shift numerator, adjust denominator accordingly */
          int  shift;

          den_size--;
          count_trailing_zeros (shift, flow);

          mpn_rshift (num_ptr, fptr, abs_fsize, shift);
          abs_fsize -= (num_ptr[abs_fsize-1] == 0);

          MPN_ZERO (den_ptr, den_size);
          den_ptr[den_size] = GMP_LIMB_HIGHBIT >> (shift-1);
        }

      SIZ(NUM(q)) = fsize >= 0 ? abs_fsize : -abs_fsize;
      SIZ(DEN(q)) = den_size + 1;
    }
}
Exemplo n.º 12
0
/* rp[n-1..0] = bp[bn-1..0] ^ ep[en-1..0] mod mp[n-1..0]
   Requires that mp[n-1..0] is odd.
   Requires that ep[en-1..0] is > 1.
   Uses scratch space tp[3n..0], i.e., 3n+1 words.  */
void
mpn_powm_sec (mp_ptr rp, mp_srcptr bp, mp_size_t bn,
	      mp_srcptr ep, mp_size_t en,
	      mp_srcptr mp, mp_size_t n, mp_ptr tp)
{
  mp_limb_t mip[2];
  int cnt;
  long ebi;
  int windowsize, this_windowsize;
  mp_limb_t expbits;
  mp_ptr pp, this_pp, last_pp;
  long i;
  int redc_x;
  TMP_DECL;

  ASSERT (en > 1 || (en == 1 && ep[0] > 1));
  ASSERT (n >= 1 && ((mp[0] & 1) != 0));

  TMP_MARK;

  count_leading_zeros (cnt, ep[en - 1]);
  ebi = en * GMP_LIMB_BITS - cnt;

  windowsize = win_size (ebi);

  if (BELOW_THRESHOLD (n, REDC_2_THRESHOLD))
    {
      binvert_limb (mip[0], mp[0]);
      mip[0] = -mip[0];
      redc_x = 1;
    }
#if defined (HAVE_NATIVE_mpn_addmul_2)
  else
    {
      mpn_binvert (mip, mp, 2, tp);
      mip[0] = -mip[0]; mip[1] = ~mip[1];
      redc_x = 2;
    }
#endif
#if 0
  mpn_binvert (mip, mp, n, tp);
  redc_x = 0;
#endif

  pp = TMP_ALLOC_LIMBS (n << windowsize);

  this_pp = pp;
  this_pp[n] = 1;
  redcify (this_pp, this_pp + n, 1, mp, n);
  this_pp += n;
  redcify (this_pp, bp, bn, mp, n);

  /* Precompute powers of b and put them in the temporary area at pp.  */
  for (i = (1 << windowsize) - 2; i > 0; i--)
    {
      last_pp = this_pp;
      this_pp += n;
      mpn_mul_n (tp, last_pp, pp + n, n);
      MPN_REDC_X (this_pp, tp, mp, n, mip);
    }

  expbits = getbits (ep, ebi, windowsize);
  ebi -= windowsize;
  if (ebi < 0)
    ebi = 0;

  MPN_COPY (rp, pp + n * expbits, n);

  while (ebi != 0)
    {
      expbits = getbits (ep, ebi, windowsize);
      ebi -= windowsize;
      this_windowsize = windowsize;
      if (ebi < 0)
	{
	  this_windowsize += ebi;
	  ebi = 0;
	}

      do
	{
	  mpn_sqr_n (tp, rp, n);
	  MPN_REDC_X (rp, tp, mp, n, mip);
	  this_windowsize--;
	}
      while (this_windowsize != 0);

#if WANT_CACHE_SECURITY
      mpn_tabselect (tp + 2*n, pp, n, 1 << windowsize, expbits);
      mpn_mul_n (tp, rp, tp + 2*n, n);
#else
      mpn_mul_n (tp, rp, pp + n * expbits, n);
#endif
      MPN_REDC_X (rp, tp, mp, n, mip);
    }

  MPN_COPY (tp, rp, n);
  MPN_ZERO (tp + n, n);
  MPN_REDC_X (rp, tp, mp, n, mip);
  if (mpn_cmp (rp, mp, n) >= 0)
    mpn_sub_n (rp, rp, mp, n);
  TMP_FREE;
}
Exemplo n.º 13
0
void
mpn_toom22_mul (mp_ptr pp,
                mp_srcptr ap, mp_size_t an,
                mp_srcptr bp, mp_size_t bn,
                mp_ptr scratch)
{
    const int __gmpn_cpuvec_initialized = 1;
    mp_size_t n, s, t;
    int vm1_neg;
    mp_limb_t cy, cy2;
    mp_ptr asm1;
    mp_ptr bsm1;

#define a0  ap
#define a1  (ap + n)
#define b0  bp
#define b1  (bp + n)

    s = an >> 1;
    n = an - s;
    t = bn - n;

    ASSERT (an >= bn);

    ASSERT (0 < s && s <= n && s >= n - 1);
    ASSERT (0 < t && t <= s);

    asm1 = pp;
    bsm1 = pp + n;

    vm1_neg = 0;

    /* Compute asm1.  */
    if (s == n)
    {
        if (mpn_cmp (a0, a1, n) < 0)
        {
            mpn_sub_n (asm1, a1, a0, n);
            vm1_neg = 1;
        }
        else
        {
            mpn_sub_n (asm1, a0, a1, n);
        }
    }
    else /* n - s == 1 */
    {
        if (a0[s] == 0 && mpn_cmp (a0, a1, s) < 0)
        {
            mpn_sub_n (asm1, a1, a0, s);
            asm1[s] = 0;
            vm1_neg = 1;
        }
        else
        {
            asm1[s] = a0[s] - mpn_sub_n (asm1, a0, a1, s);
        }
    }

    /* Compute bsm1.  */
    if (t == n)
    {
        if (mpn_cmp (b0, b1, n) < 0)
        {
            mpn_sub_n (bsm1, b1, b0, n);
            vm1_neg ^= 1;
        }
        else
        {
            mpn_sub_n (bsm1, b0, b1, n);
        }
    }
    else
    {
        if (mpn_zero_p (b0 + t, n - t) && mpn_cmp (b0, b1, t) < 0)
        {
            mpn_sub_n (bsm1, b1, b0, t);
            MPN_ZERO (bsm1 + t, n - t);
            vm1_neg ^= 1;
        }
        else
        {
            mpn_sub (bsm1, b0, n, b1, t);
        }
    }

#define v0	pp				/* 2n */
#define vinf	(pp + 2 * n)			/* s+t */
#define vm1	scratch				/* 2n */
#define scratch_out	scratch + 2 * n

    /* vm1, 2n limbs */
    TOOM22_MUL_N_REC (vm1, asm1, bsm1, n, scratch_out);

    if (s > t)  TOOM22_MUL_REC (vinf, a1, s, b1, t, scratch_out);
    else        TOOM22_MUL_N_REC (vinf, a1, b1, s, scratch_out);

    /* v0, 2n limbs */
    TOOM22_MUL_N_REC (v0, ap, bp, n, scratch_out);

    /* H(v0) + L(vinf) */
    cy = mpn_add_n (pp + 2 * n, v0 + n, vinf, n);

    /* L(v0) + H(v0) */
    cy2 = cy + mpn_add_n (pp + n, pp + 2 * n, v0, n);

    /* L(vinf) + H(vinf) */
    cy += mpn_add (pp + 2 * n, pp + 2 * n, n, vinf + n, s + t - n);

    if (vm1_neg)
        cy += mpn_add_n (pp + n, pp + n, vm1, 2 * n);
    else
        cy -= mpn_sub_n (pp + n, pp + n, vm1, 2 * n);

    ASSERT (cy + 1  <= 3);
    ASSERT (cy2 <= 2);

    MPN_INCR_U (pp + 2 * n, s + t, cy2);
    if (LIKELY (cy <= 2))
        /* if s+t==n, cy is zero, but we should not acces pp[3*n] at all. */
        MPN_INCR_U (pp + 3 * n, s + t - n, cy);
    else
        MPN_DECR_U (pp + 3 * n, s + t - n, 1);
}
Exemplo n.º 14
0
void
_gst_mpz_gcd (gst_mpz *g, const gst_mpz *u, const gst_mpz *v)
{
    int g_zero_bits, u_zero_bits, v_zero_bits;
    mp_size_t g_zero_limbs, u_zero_limbs, v_zero_limbs;
    mp_ptr tp;
    mp_ptr up = u->d;
    mp_size_t usize = ABS (u->size);
    mp_ptr vp = v->d;
    mp_size_t vsize = ABS (v->size);
    mp_size_t gsize;

    /* GCD(0, V) == GCD (U, 1) == V.  */
    if (usize == 0 || (vsize == 1 && vp[0] == 1))
    {
        gst_mpz_copy_abs (g, v);
        return;
    }

    /* GCD(U, 0) == GCD (1, V) == U.  */
    if (vsize == 0 || (usize == 1 && up[0] == 1))
    {
        gst_mpz_copy_abs (g, u);
        return;
    }

    if (usize == 1)
    {
        gst_mpz_realloc (g, 1);
        g->size = 1;
        g->d[0] = mpn_gcd_1 (vp, vsize, up[0]);
        return;
    }

    if (vsize == 1)
    {
        gst_mpz_realloc (g, 1);
        g->size = 1;
        g->d[0] = mpn_gcd_1 (up, usize, vp[0]);
        return;
    }

    /*  Eliminate low zero bits from U and V and move to temporary storage.  */
    u_zero_bits = mpn_scan1 (up, 0);
    u_zero_limbs = u_zero_bits / BITS_PER_MP_LIMB;
    u_zero_bits &= BITS_PER_MP_LIMB - 1;
    up += u_zero_limbs;
    usize -= u_zero_limbs;

    /* Operands could be destroyed for big-endian case, but let's be tidy.  */
    tp = up;
    up = (mp_ptr) alloca (usize * SIZEOF_MP_LIMB_T);
    if (u_zero_bits != 0)
    {
        mpn_rshift (up, tp, usize, u_zero_bits);
        usize -= up[usize - 1] == 0;
    }
    else
        MPN_COPY (up, tp, usize);

    v_zero_bits = mpn_scan1 (vp, 0);
    v_zero_limbs = v_zero_bits / BITS_PER_MP_LIMB;
    v_zero_bits &= BITS_PER_MP_LIMB - 1;
    vp += v_zero_limbs;
    vsize -= v_zero_limbs;

    /* Operands could be destroyed for big-endian case, but let's be tidy.  */
    tp = vp;
    vp = (mp_ptr) alloca (vsize * SIZEOF_MP_LIMB_T);
    if (v_zero_bits != 0)
    {
        mpn_rshift (vp, tp, vsize, v_zero_bits);
        vsize -= vp[vsize - 1] == 0;
    }
    else
        MPN_COPY (vp, tp, vsize);

    if (u_zero_limbs > v_zero_limbs)
    {
        g_zero_limbs = v_zero_limbs;
        g_zero_bits = v_zero_bits;
    }
    else if (u_zero_limbs < v_zero_limbs)
    {
        g_zero_limbs = u_zero_limbs;
        g_zero_bits = u_zero_bits;
    }
    else  /*  Equal.  */
    {
        g_zero_limbs = u_zero_limbs;
        g_zero_bits = MIN (u_zero_bits, v_zero_bits);
    }

    /*  Call mpn_gcd.  The 2nd argument must not have more bits than the 1st.  */
    vsize = (usize < vsize || (usize == vsize && up[usize-1] < vp[vsize-1]))
            ? mpn_gcd (vp, vp, vsize, up, usize)
            : mpn_gcd (vp, up, usize, vp, vsize);

    /*  Here G <-- V << (g_zero_limbs*BITS_PER_MP_LIMB + g_zero_bits).  */
    gsize = vsize + g_zero_limbs;
    if (g_zero_bits != 0)
    {
        mp_limb_t cy_limb;
        gsize += (vp[vsize - 1] >> (BITS_PER_MP_LIMB - g_zero_bits)) != 0;
        if (g->alloc < gsize)
            gst_mpz_realloc (g, gsize);
        MPN_ZERO (g->d, g_zero_limbs);

        tp = g->d + g_zero_limbs;
        cy_limb = mpn_lshift (tp, vp, vsize, g_zero_bits);
        if (cy_limb != 0)
            tp[vsize] = cy_limb;
    }
Exemplo n.º 15
0
static unsigned long int
lc (mp_ptr rp, gmp_randstate_t rstate)
{
  mp_ptr tp, seedp, ap;
  mp_size_t ta;
  mp_size_t tn, seedn, an;
  unsigned long int m2exp;
  unsigned long int bits;
  int cy;
  mp_size_t xn;
  gmp_rand_lc_struct *p;
  TMP_DECL;

  p = (gmp_rand_lc_struct *) RNG_STATE (rstate);

  m2exp = p->_mp_m2exp;

  seedp = PTR (p->_mp_seed);
  seedn = SIZ (p->_mp_seed);

  ap = PTR (p->_mp_a);
  an = SIZ (p->_mp_a);

  /* Allocate temporary storage.  Let there be room for calculation of
     (A * seed + C) % M, or M if bigger than that.  */

  TMP_MARK;

  ta = an + seedn + 1;
  tn = BITS_TO_LIMBS (m2exp);
  if (ta <= tn) /* that is, if (ta < tn + 1) */
    {
      mp_size_t tmp = an + seedn;
      ta = tn + 1;
      tp = (mp_ptr) TMP_ALLOC (ta * BYTES_PER_MP_LIMB);
      MPN_ZERO (&tp[tmp], ta - tmp); /* mpn_mul won't zero it out.  */
    }
  else
    tp = (mp_ptr) TMP_ALLOC (ta * BYTES_PER_MP_LIMB);

  /* t = a * seed.  NOTE: an is always > 0; see initialization.  */
  ASSERT (seedn >= an && an > 0);
  mpn_mul (tp, seedp, seedn, ap, an);

  /* t = t + c.  NOTE: tn is always >= p->_cn (precondition for __GMPN_ADD);
     see initialization.  */
  ASSERT (tn >= p->_cn);
  __GMPN_ADD (cy, tp, tp, tn, p->_cp, p->_cn);

  /* t = t % m */
  tp[m2exp / GMP_NUMB_BITS] &= (CNST_LIMB (1) << m2exp % GMP_NUMB_BITS) - 1;

  /* Save result as next seed.  */
  MPN_COPY (PTR (p->_mp_seed), tp, tn);

  /* Discard the lower m2exp/2 of the result.  */
  bits = m2exp / 2;
  xn = bits / GMP_NUMB_BITS;

  tn -= xn;
  if (tn > 0)
    {
      unsigned int cnt = bits % GMP_NUMB_BITS;
      if (cnt != 0)
	{
	  mpn_rshift (tp, tp + xn, tn, cnt);
	  MPN_COPY_INCR (rp, tp, xn + 1);
	}
      else			/* Even limb boundary.  */
	MPN_COPY_INCR (rp, tp + xn, tn);
    }

  TMP_FREE;

  /* Return number of valid bits in the result.  */
  return (m2exp + 1) / 2;
}
Exemplo n.º 16
0
void
mpz_combit (mpz_ptr d, mp_bitcnt_t bit_index)
{
  mp_size_t dsize = SIZ(d);
  mp_ptr dp = PTR(d);

  mp_size_t limb_index = bit_index / GMP_NUMB_BITS;
  mp_limb_t bit = (CNST_LIMB (1) << (bit_index % GMP_NUMB_BITS));

  /* Check for the most common case: Positive input, no realloc or
     normalization needed. */
  if (limb_index + 1 < dsize)
    dp[limb_index] ^= bit;

  /* Check for the hairy case. d < 0, and we have all zero bits to the
     right of the bit to toggle. */
  else if (limb_index < -dsize
	   && (limb_index == 0 || mpn_zero_p (dp, limb_index))
	   && (dp[limb_index] & (bit - 1)) == 0)
    {
      ASSERT (dsize < 0);
      dsize = -dsize;

      if (dp[limb_index] & bit)
	{
	  /* We toggle the least significant one bit. Corresponds to
	     an add, with potential carry propagation, on the absolute
	     value. */
	  dp = MPZ_REALLOC (d, 1 + dsize);
	  dp[dsize] = 0;
	  MPN_INCR_U (dp + limb_index, 1 + dsize - limb_index, bit);
	  SIZ(d) = - dsize - dp[dsize];
	}
      else
	{
	  /* We toggle a zero bit, subtract from the absolute value. */
	  MPN_DECR_U (dp + limb_index, dsize - limb_index, bit);
	  /* The absolute value shrinked by at most one bit. */
	  dsize -= dp[dsize - 1] == 0;
	  ASSERT (dsize > 0 && dp[dsize - 1] != 0);
	  SIZ (d) = -dsize;
	}
    }
  else
    {
      /* Simple case: Toggle the bit in the absolute value. */
      dsize = ABS(dsize);
      if (limb_index < dsize)
	{
	  mp_limb_t	 dlimb;
	  dlimb = dp[limb_index] ^ bit;
	  dp[limb_index] = dlimb;

	  /* Can happen only when limb_index = dsize - 1. Avoid SIZ(d)
	     bookkeeping in the common case. */
	  if (UNLIKELY ((dlimb == 0) + limb_index == dsize)) /* dsize == limb_index + 1 */
	    {
	      /* high limb became zero, must normalize */
	      MPN_NORMALIZE (dp, limb_index);
	      SIZ (d) = SIZ (d) >= 0 ? limb_index : -limb_index;
	    }
	}
      else
	{
	  dp = MPZ_REALLOC (d, limb_index + 1);
	  MPN_ZERO(dp + dsize, limb_index - dsize);
	  dp[limb_index++] = bit;
	  SIZ(d) = SIZ(d) >= 0 ? limb_index : -limb_index;
	}
    }
}
Exemplo n.º 17
0
int
mpfr_rint (mpfr_ptr r, mpfr_srcptr u, mpfr_rnd_t rnd_mode)
{
  int sign;
  int rnd_away;
  mp_exp_t exp;

  if (MPFR_UNLIKELY( MPFR_IS_SINGULAR(u) ))
    {
      if (MPFR_IS_NAN(u))
        {
          MPFR_SET_NAN(r);
          MPFR_RET_NAN;
        }
      MPFR_SET_SAME_SIGN(r, u);
      if (MPFR_IS_INF(u))
        {
          MPFR_SET_INF(r);
          MPFR_RET(0);  /* infinity is exact */
        }
      else /* now u is zero */
        {
          MPFR_ASSERTD(MPFR_IS_ZERO(u));
          MPFR_SET_ZERO(r);
          MPFR_RET(0);  /* zero is exact */
        }
    }
  MPFR_SET_SAME_SIGN (r, u); /* Does nothing if r==u */

  sign = MPFR_INT_SIGN (u);
  exp = MPFR_GET_EXP (u);

  rnd_away =
    rnd_mode == GMP_RNDD ? sign < 0 :
    rnd_mode == GMP_RNDU ? sign > 0 :
    rnd_mode == GMP_RNDZ ? 0 : -1;

  /* rnd_away:
     1 if round away from zero,
     0 if round to zero,
     -1 if not decided yet.
   */

  if (MPFR_UNLIKELY (exp <= 0))  /* 0 < |u| < 1 ==> round |u| to 0 or 1 */
    {
      /* Note: in the GMP_RNDN mode, 0.5 must be rounded to 0. */
      if (rnd_away != 0 &&
          (rnd_away > 0 ||
           (exp == 0 && (rnd_mode == GMP_RNDNA ||
                         !mpfr_powerof2_raw (u)))))
        {
          mp_limb_t *rp;
          mp_size_t rm;

          rp = MPFR_MANT(r);
          rm = (MPFR_PREC(r) - 1) / BITS_PER_MP_LIMB;
          rp[rm] = MPFR_LIMB_HIGHBIT;
          MPN_ZERO(rp, rm);
          MPFR_SET_EXP (r, 1);  /* |r| = 1 */
          MPFR_RET(sign > 0 ? 2 : -2);
        }
      else
        {
          MPFR_SET_ZERO(r);  /* r = 0 */
          MPFR_RET(sign > 0 ? -2 : 2);
        }
    }
  else  /* exp > 0, |u| >= 1 */
    {
      mp_limb_t *up, *rp;
      mp_size_t un, rn, ui;
      int sh, idiff;
      int uflags;

      /*
       * uflags will contain:
       *   _ 0 if u is an integer representable in r,
       *   _ 1 if u is an integer not representable in r,
       *   _ 2 if u is not an integer.
       */

      up = MPFR_MANT(u);
      rp = MPFR_MANT(r);

      un = MPFR_LIMB_SIZE(u);
      rn = MPFR_LIMB_SIZE(r);
      MPFR_UNSIGNED_MINUS_MODULO (sh, MPFR_PREC (r));

      MPFR_SET_EXP (r, exp); /* Does nothing if r==u */

      if ((exp - 1) / BITS_PER_MP_LIMB >= un)
        {
          ui = un;
          idiff = 0;
          uflags = 0;  /* u is an integer, representable or not in r */
        }
      else
        {
          mp_size_t uj;

          ui = (exp - 1) / BITS_PER_MP_LIMB + 1;  /* #limbs of the int part */
          MPFR_ASSERTD (un >= ui);
          uj = un - ui;  /* lowest limb of the integer part */
          idiff = exp % BITS_PER_MP_LIMB;  /* #int-part bits in up[uj] or 0 */

          uflags = idiff == 0 || (up[uj] << idiff) == 0 ? 0 : 2;
          if (uflags == 0)
            while (uj > 0)
              if (up[--uj] != 0)
                {
                  uflags = 2;
                  break;
                }
        }

      if (ui > rn)
        {
          /* More limbs in the integer part of u than in r.
             Just round u with the precision of r. */
          MPFR_ASSERTD (rp != up && un > rn);
          MPN_COPY (rp, up + (un - rn), rn); /* r != u */
          if (rnd_away < 0)
            {
              /* This is a rounding to nearest mode (GMP_RNDN or GMP_RNDNA).
                 Decide the rounding direction here. */
              if (rnd_mode == GMP_RNDN &&
                  (rp[0] & (MPFR_LIMB_ONE << sh)) == 0)
                { /* halfway cases rounded toward zero */
                  mp_limb_t a, b;
                  /* a: rounding bit and some of the following bits */
                  /* b: boundary for a (weight of the rounding bit in a) */
                  if (sh != 0)
                    {
                      a = rp[0] & ((MPFR_LIMB_ONE << sh) - 1);
                      b = MPFR_LIMB_ONE << (sh - 1);
                    }
                  else
                    {
                      a = up[un - rn - 1];
                      b = MPFR_LIMB_HIGHBIT;
                    }
                  rnd_away = a > b;
                  if (a == b)
                    {
                      mp_size_t i;
                      for (i = un - rn - 1 - (sh == 0); i >= 0; i--)
                        if (up[i] != 0)
                          {
                            rnd_away = 1;
                            break;
                          }
                    }
                }
              else  /* halfway cases rounded away from zero */
                rnd_away =  /* rounding bit */
                  ((sh != 0 && (rp[0] & (MPFR_LIMB_ONE << (sh - 1))) != 0) ||
                   (sh == 0 && (up[un - rn - 1] & MPFR_LIMB_HIGHBIT) != 0));
            }
          if (uflags == 0)
            { /* u is an integer; determine if it is representable in r */
              if (sh != 0 && rp[0] << (BITS_PER_MP_LIMB - sh) != 0)
                uflags = 1;  /* u is not representable in r */
              else
                {
                  mp_size_t i;
                  for (i = un - rn - 1; i >= 0; i--)
                    if (up[i] != 0)
                      {
                        uflags = 1;  /* u is not representable in r */
                        break;
                      }
                }
            }
        }
      else  /* ui <= rn */
        {
          mp_size_t uj, rj;
          int ush;

          uj = un - ui;  /* lowest limb of the integer part in u */
          rj = rn - ui;  /* lowest limb of the integer part in r */

          if (MPFR_LIKELY (rp != up))
            MPN_COPY(rp + rj, up + uj, ui);

          /* Ignore the lowest rj limbs, all equal to zero. */
          rp += rj;
          rn = ui;

          /* number of fractional bits in whole rp[0] */
          ush = idiff == 0 ? 0 : BITS_PER_MP_LIMB - idiff;

          if (rj == 0 && ush < sh)
            {
              /* If u is an integer (uflags == 0), we need to determine
                 if it is representable in r, i.e. if its sh - ush bits
                 in the non-significant part of r are all 0. */
              if (uflags == 0 && (rp[0] & ((MPFR_LIMB_ONE << sh) -
                                           (MPFR_LIMB_ONE << ush))) != 0)
                uflags = 1;  /* u is an integer not representable in r */
            }
          else  /* The integer part of u fits in r, we'll round to it. */
            sh = ush;

          if (rnd_away < 0)
            {
              /* This is a rounding to nearest mode.
                 Decide the rounding direction here. */
              if (uj == 0 && sh == 0)
                rnd_away = 0; /* rounding bit = 0 (not represented in u) */
              else if (rnd_mode == GMP_RNDN &&
                       (rp[0] & (MPFR_LIMB_ONE << sh)) == 0)
                { /* halfway cases rounded toward zero */
                  mp_limb_t a, b;
                  /* a: rounding bit and some of the following bits */
                  /* b: boundary for a (weight of the rounding bit in a) */
                  if (sh != 0)
                    {
                      a = rp[0] & ((MPFR_LIMB_ONE << sh) - 1);
                      b = MPFR_LIMB_ONE << (sh - 1);
                    }
                  else
                    {
                      MPFR_ASSERTD (uj >= 1);  /* see above */
                      a = up[uj - 1];
                      b = MPFR_LIMB_HIGHBIT;
                    }
                  rnd_away = a > b;
                  if (a == b)
                    {
                      mp_size_t i;
                      for (i = uj - 1 - (sh == 0); i >= 0; i--)
                        if (up[i] != 0)
                          {
                            rnd_away = 1;
                            break;
                          }
                    }
                }
              else  /* halfway cases rounded away from zero */
                rnd_away =  /* rounding bit */
                  ((sh != 0 && (rp[0] & (MPFR_LIMB_ONE << (sh - 1))) != 0) ||
                   (sh == 0 && (MPFR_ASSERTD (uj >= 1),
                                up[uj - 1] & MPFR_LIMB_HIGHBIT) != 0));
            }
          /* Now we can make the low rj limbs to 0 */
          MPN_ZERO (rp-rj, rj);
        }

      if (sh != 0)
        rp[0] &= MP_LIMB_T_MAX << sh;

      /* If u is a representable integer, there is no rounding. */
      if (uflags == 0)
        MPFR_RET(0);

      MPFR_ASSERTD (rnd_away >= 0);  /* rounding direction is defined */
      if (rnd_away && mpn_add_1(rp, rp, rn, MPFR_LIMB_ONE << sh))
        {
          if (exp == __gmpfr_emax)
            return mpfr_overflow(r, rnd_mode, MPFR_SIGN(r)) >= 0 ?
              uflags : -uflags;
          else
            {
              MPFR_SET_EXP(r, exp + 1);
              rp[rn-1] = MPFR_LIMB_HIGHBIT;
            }
        }

      MPFR_RET (rnd_away ^ (sign < 0) ? uflags : -uflags);
    }  /* exp > 0, |u| >= 1 */
}
Exemplo n.º 18
0
int
mpfr_round_raw_generic(
#if flag == 0
                       mp_limb_t *yp,
#endif
                       const mp_limb_t *xp, mpfr_prec_t xprec,
                       int neg, mpfr_prec_t yprec, mpfr_rnd_t rnd_mode
#if use_inexp != 0
                       , int *inexp
#endif
                       )
{
  mp_size_t xsize, nw;
  mp_limb_t himask, lomask, sb;
  int rw;
#if flag == 0
  int carry;
#endif
#if use_inexp == 0
  int *inexp;
#endif

  if (use_inexp)
    MPFR_ASSERTD(inexp != ((int*) 0));
  MPFR_ASSERTD(neg == 0 || neg == 1);

  if (flag && !use_inexp &&
      (xprec <= yprec || MPFR_IS_LIKE_RNDZ (rnd_mode, neg)))
    return 0;

  xsize = MPFR_PREC2LIMBS (xprec);
  nw = yprec / GMP_NUMB_BITS;
  rw = yprec & (GMP_NUMB_BITS - 1);

  if (MPFR_UNLIKELY(xprec <= yprec))
    { /* No rounding is necessary. */
      /* if yp=xp, maybe an overlap: MPN_COPY_DECR is OK when src <= dst */
      if (MPFR_LIKELY(rw))
        nw++;
      MPFR_ASSERTD(nw >= 1);
      MPFR_ASSERTD(nw >= xsize);
      if (use_inexp)
        *inexp = 0;
#if flag == 0
      MPN_COPY_DECR(yp + (nw - xsize), xp, xsize);
      MPN_ZERO(yp, nw - xsize);
#endif
      return 0;
    }

  if (use_inexp || !MPFR_IS_LIKE_RNDZ(rnd_mode, neg))
    {
      mp_size_t k = xsize - nw - 1;

      if (MPFR_LIKELY(rw))
        {
          nw++;
          lomask = MPFR_LIMB_MASK (GMP_NUMB_BITS - rw);
          himask = ~lomask;
        }
      else
        {
          lomask = MPFR_LIMB_MAX;
          himask = MPFR_LIMB_MAX;
        }
      MPFR_ASSERTD(k >= 0);
      sb = xp[k] & lomask;  /* First non-significant bits */
      /* Rounding to nearest? */
      if (MPFR_LIKELY (rnd_mode == MPFR_RNDN || rnd_mode == MPFR_RNDNA))
        {
          /* Rounding to nearest */
          mp_limb_t rbmask = MPFR_LIMB_ONE << (GMP_NUMB_BITS - 1 - rw);

          if ((sb & rbmask) == 0) /* rounding bit = 0 ? */
            goto rnd_RNDZ; /* yes, behave like rounding toward zero */
          /* Rounding to nearest with rounding bit = 1 */
          if (MPFR_UNLIKELY (rnd_mode == MPFR_RNDNA))
            /* FIXME: *inexp is not set. First, add a testcase that
               triggers the bug (at least with a sanitizer). */
            goto rnd_RNDN_add_one_ulp; /* like rounding away from zero */
          sb &= ~rbmask; /* first bits after the rounding bit */
          while (MPFR_UNLIKELY(sb == 0) && k > 0)
            sb = xp[--k];
          if (MPFR_UNLIKELY(sb == 0)) /* Even rounding. */
            {
              /* sb == 0 && rnd_mode == MPFR_RNDN */
              sb = xp[xsize - nw] & (himask ^ (himask << 1));
              if (sb == 0)
                {
                  if (use_inexp)
                    *inexp = 2*MPFR_EVEN_INEX*neg-MPFR_EVEN_INEX;
                  /* ((neg!=0)^(sb!=0)) ? MPFR_EVEN_INEX : -MPFR_EVEN_INEX */
                  /* since neg = 0 or 1 and sb = 0 */
#if flag == 0
                  MPN_COPY_INCR(yp, xp + xsize - nw, nw);
                  yp[0] &= himask;
#endif
                  return 0; /* sb != 0 && rnd_mode != MPFR_RNDZ */
                }
              else
                {
                  /* sb != 0 && rnd_mode == MPFR_RNDN */
                  if (use_inexp)
                    *inexp = MPFR_EVEN_INEX-2*MPFR_EVEN_INEX*neg;
                  /* ((neg!=0)^(sb!=0)) ? MPFR_EVEN_INEX : -MPFR_EVEN_INEX */
                  /* since neg = 0 or 1 and sb != 0 */
                  goto rnd_RNDN_add_one_ulp;
                }
            }
          else /* sb != 0 && rnd_mode == MPFR_RNDN */
            {
              if (use_inexp)
                *inexp = 1-2*neg; /* neg == 0 ? 1 : -1 */
            rnd_RNDN_add_one_ulp:
#if flag == 1
              return 1; /* sb != 0 && rnd_mode != MPFR_RNDZ */
#else
              carry = mpn_add_1 (yp, xp + xsize - nw, nw,
                                 rw ?
                                 MPFR_LIMB_ONE << (GMP_NUMB_BITS - rw)
                                 : MPFR_LIMB_ONE);
              yp[0] &= himask;
              return carry;
#endif
            }
        }
      /* Rounding toward zero? */
      else if (MPFR_IS_LIKE_RNDZ(rnd_mode, neg))
        {
          /* rnd_mode == MPFR_RNDZ */
        rnd_RNDZ:
          while (MPFR_UNLIKELY(sb == 0) && k > 0)
            sb = xp[--k];
          if (use_inexp)
            /* rnd_mode == MPFR_RNDZ and neg = 0 or 1 */
            /* ((neg != 0) ^ (rnd_mode != MPFR_RNDZ)) ? 1 : -1 */
            *inexp = MPFR_UNLIKELY(sb == 0) ? 0 : (2*neg-1);
#if flag == 0
          MPN_COPY_INCR(yp, xp + xsize - nw, nw);
          yp[0] &= himask;
#endif
          return 0; /* sb != 0 && rnd_mode != MPFR_RNDZ */
        }
      else
        {
          /* Rounding away from zero */
          while (MPFR_UNLIKELY(sb == 0) && k > 0)
            sb = xp[--k];
          if (MPFR_UNLIKELY(sb == 0))
            {
              /* sb = 0 && rnd_mode != MPFR_RNDZ */
              if (use_inexp)
                /* ((neg != 0) ^ (rnd_mode != MPFR_RNDZ)) ? 1 : -1 */
                *inexp = 0;
#if flag == 0
              MPN_COPY_INCR(yp, xp + xsize - nw, nw);
              yp[0] &= himask;
#endif
              return 0;
            }
          else
            {
              /* sb != 0 && rnd_mode != MPFR_RNDZ */
              if (use_inexp)
                *inexp = 1-2*neg; /* neg == 0 ? 1 : -1 */
#if flag == 1
              return 1;
#else
              carry = mpn_add_1(yp, xp + xsize - nw, nw,
                                rw ? MPFR_LIMB_ONE << (GMP_NUMB_BITS - rw)
                                : 1);
              yp[0] &= himask;
              return carry;
#endif
            }
        }
    }
  else
    {
      /* Rounding toward zero / no inexact flag */
#if flag == 0
      if (MPFR_LIKELY(rw))
        {
          nw++;
          himask = ~MPFR_LIMB_MASK (GMP_NUMB_BITS - rw);
        }
      else
        himask = MPFR_LIMB_MAX;
      MPN_COPY_INCR(yp, xp + xsize - nw, nw);
      yp[0] &= himask;
#endif
      return 0;
    }
}
Exemplo n.º 19
0
int
main (int argc, char **argv)
{
  gmp_randstate_ptr rands;
  unsigned long maxnbits, maxdbits, nbits, dbits;
  mpz_t n, d, tz;
  mp_size_t maxnn, maxdn, nn, dn, clearn, i;
  mp_ptr np, dp, qp, rp;
  mp_limb_t rh;
  mp_limb_t t;
  mp_limb_t dinv;
  int count = COUNT;
  mp_ptr scratch;
  mp_limb_t ran;
  mp_size_t alloc, itch;
  mp_limb_t rran0, rran1, qran0, qran1;
  TMP_DECL;

  if (argc > 1)
    {
      char *end;
      count = strtol (argv[1], &end, 0);
      if (*end || count <= 0)
	{
	  fprintf (stderr, "Invalid test count: %s.\n", argv[1]);
	  return 1;
	}
    }


  maxdbits = MAX_DN;
  maxnbits = MAX_NN;

  tests_start ();
  rands = RANDS;

  mpz_init (n);
  mpz_init (d);
  mpz_init (tz);

  maxnn = maxnbits / GMP_NUMB_BITS + 1;
  maxdn = maxdbits / GMP_NUMB_BITS + 1;

  TMP_MARK;

  qp = TMP_ALLOC_LIMBS (maxnn + 2) + 1;
  rp = TMP_ALLOC_LIMBS (maxnn + 2) + 1;

  alloc = 1;
  scratch = __GMP_ALLOCATE_FUNC_LIMBS (alloc);

  for (test = 0; test < count;)
    {
      nbits = random_word (rands) % (maxnbits - GMP_NUMB_BITS) + 2 * GMP_NUMB_BITS;
      if (maxdbits > nbits)
	dbits = random_word (rands) % nbits + 1;
      else
	dbits = random_word (rands) % maxdbits + 1;

#if RAND_UNIFORM
#define RANDFUNC mpz_urandomb
#else
#define RANDFUNC mpz_rrandomb
#endif

      do
	{
	  RANDFUNC (n, rands, nbits);
	  do
	    {
	      RANDFUNC (d, rands, dbits);
	    }
	  while (mpz_sgn (d) == 0);

	  np = PTR (n);
	  dp = PTR (d);
	  nn = SIZ (n);
	  dn = SIZ (d);
	}
      while (nn < dn);

      dp[0] |= 1;

      mpz_urandomb (tz, rands, 32);
      t = mpz_get_ui (tz);

      if (t % 17 == 0)
	dp[0] = GMP_NUMB_MAX;

      switch ((int) t % 16)
	{
	case 0:
	  clearn = random_word (rands) % nn;
	  for (i = 0; i <= clearn; i++)
	    np[i] = 0;
	  break;
	case 1:
	  mpn_sub_1 (np + nn - dn, dp, dn, random_word (rands));
	  break;
	case 2:
	  mpn_add_1 (np + nn - dn, dp, dn, random_word (rands));
	  break;
	}

      test++;

      binvert_limb (dinv, dp[0]);

      rran0 = random_word (rands);
      rran1 = random_word (rands);
      qran0 = random_word (rands);
      qran1 = random_word (rands);

      qp[-1] = qran0;
      qp[nn - dn + 1] = qran1;
      rp[-1] = rran0;

      ran = random_word (rands);

      if ((double) (nn - dn) * dn < 1e5)
	{
	  if (nn > dn)
	    {
	      /* Test mpn_sbpi1_bdiv_qr */
	      MPN_ZERO (qp, nn - dn);
	      MPN_ZERO (rp, dn);
	      MPN_COPY (rp, np, nn);
	      rh = mpn_sbpi1_bdiv_qr (qp, rp, nn, dp, dn, -dinv);
	      ASSERT_ALWAYS (qp[-1] == qran0);  ASSERT_ALWAYS (qp[nn - dn + 1] == qran1);
	      ASSERT_ALWAYS (rp[-1] == rran0);
	      check_one (qp, rp + nn - dn, rh, np, nn, dp, dn, "mpn_sbpi1_bdiv_qr");
	    }

	  if (nn > dn)
	    {
	      /* Test mpn_sbpi1_bdiv_q */
	      MPN_COPY (rp, np, nn);
	      MPN_ZERO (qp, nn - dn);
	      mpn_sbpi1_bdiv_q (qp, rp, nn - dn, dp, MIN(dn,nn-dn), -dinv);
	      ASSERT_ALWAYS (qp[-1] == qran0);  ASSERT_ALWAYS (qp[nn - dn + 1] == qran1);
	      ASSERT_ALWAYS (rp[-1] == rran0);
	      check_one (qp, NULL, 0, np, nn, dp, dn, "mpn_sbpi1_bdiv_q");
	    }
	}

      if (dn >= 4 && nn - dn >= 2)
	{
	  /* Test mpn_dcpi1_bdiv_qr */
	  MPN_COPY (rp, np, nn);
	  MPN_ZERO (qp, nn - dn);
	  rh = mpn_dcpi1_bdiv_qr (qp, rp, nn, dp, dn, -dinv);
	  ASSERT_ALWAYS (qp[-1] == qran0);  ASSERT_ALWAYS (qp[nn - dn + 1] == qran1);
	  ASSERT_ALWAYS (rp[-1] == rran0);
	  check_one (qp, rp + nn - dn, rh, np, nn, dp, dn, "mpn_dcpi1_bdiv_qr");
	}

      if (dn >= 4 && nn - dn >= 2)
	{
	  /* Test mpn_dcpi1_bdiv_q */
	  MPN_COPY (rp, np, nn);
	  MPN_ZERO (qp, nn - dn);
	  mpn_dcpi1_bdiv_q (qp, rp, nn - dn, dp, MIN(dn,nn-dn), -dinv);
	  ASSERT_ALWAYS (qp[-1] == qran0);  ASSERT_ALWAYS (qp[nn - dn + 1] == qran1);
	  ASSERT_ALWAYS (rp[-1] == rran0);
	  check_one (qp, NULL, 0, np, nn, dp, dn, "mpn_dcpi1_bdiv_q");
	}

      if (nn > dn)
	{
	  /* Test mpn_bdiv_qr */
	  itch = mpn_bdiv_qr_itch (nn, dn);
	  if (itch + 1 > alloc)
	    {
	      scratch = __GMP_REALLOCATE_FUNC_LIMBS (scratch, alloc, itch + 1);
	      alloc = itch + 1;
	    }
	  scratch[itch] = ran;
	  MPN_ZERO (qp, nn - dn);
	  MPN_ZERO (rp, dn);
	  rp[dn] = rran1;
	  rh = mpn_bdiv_qr (qp, rp, np, nn, dp, dn, scratch);
	  ASSERT_ALWAYS (ran == scratch[itch]);
	  ASSERT_ALWAYS (qp[-1] == qran0);  ASSERT_ALWAYS (qp[nn - dn + 1] == qran1);
	  ASSERT_ALWAYS (rp[-1] == rran0);  ASSERT_ALWAYS (rp[dn] == rran1);

	  check_one (qp, rp, rh, np, nn, dp, dn, "mpn_bdiv_qr");
	}

      if (nn - dn < 2 || dn < 2)
	continue;

      /* Test mpn_mu_bdiv_qr */
      itch = mpn_mu_bdiv_qr_itch (nn, dn);
      if (itch + 1 > alloc)
	{
	  scratch = __GMP_REALLOCATE_FUNC_LIMBS (scratch, alloc, itch + 1);
	  alloc = itch + 1;
	}
      scratch[itch] = ran;
      MPN_ZERO (qp, nn - dn);
      MPN_ZERO (rp, dn);
      rp[dn] = rran1;
      rh = mpn_mu_bdiv_qr (qp, rp, np, nn, dp, dn, scratch);
      ASSERT_ALWAYS (ran == scratch[itch]);
      ASSERT_ALWAYS (qp[-1] == qran0);  ASSERT_ALWAYS (qp[nn - dn + 1] == qran1);
      ASSERT_ALWAYS (rp[-1] == rran0);  ASSERT_ALWAYS (rp[dn] == rran1);
      check_one (qp, rp, rh, np, nn, dp, dn, "mpn_mu_bdiv_qr");

      /* Test mpn_mu_bdiv_q */
      itch = mpn_mu_bdiv_q_itch (nn, dn);
      if (itch + 1 > alloc)
	{
	  scratch = __GMP_REALLOCATE_FUNC_LIMBS (scratch, alloc, itch + 1);
	  alloc = itch + 1;
	}
      scratch[itch] = ran;
      MPN_ZERO (qp, nn - dn + 1);
      mpn_mu_bdiv_q (qp, np, nn - dn, dp, dn, scratch);
      ASSERT_ALWAYS (ran == scratch[itch]);
      ASSERT_ALWAYS (qp[-1] == qran0);  ASSERT_ALWAYS (qp[nn - dn + 1] == qran1);
      check_one (qp, NULL, 0, np, nn, dp, dn, "mpn_mu_bdiv_q");
    }

  __GMP_FREE_FUNC_LIMBS (scratch, alloc);

  TMP_FREE;

  mpz_clear (n);
  mpz_clear (d);
  mpz_clear (tz);

  tests_end ();
  return 0;
}
Exemplo n.º 20
0
void
mpn_toom4_mul_n (mp_ptr rp, mp_srcptr up,
		          mp_srcptr vp, mp_size_t n)
{
  mp_size_t ind;
  mp_limb_t cy, cy2, r30, r31;
  mp_ptr tp;
  mp_size_t sn, n1, n2, n3, n4, n5, n6, n7, n8, rpn, t4, h1;
  TMP_DECL;

  sn = (n + 3) / 4;

  h1 = n - 3*sn;
  
#define a0 (up)
#define a1 (up + sn)
#define a2 (up + 2*sn)
#define a3 (up + 3*sn)
#define b0 (vp)
#define b1 (vp + sn)
#define b2 (vp + 2*sn)
#define b3 (vp + 3*sn)

   t4 = 2*sn+2; // allows mult of 2 integers of sn + 1 limbs

   TMP_MARK;

   tp = TMP_ALLOC_LIMBS(4*t4 + 5*(sn + 1));

#define u2 (tp + 4*t4)
#define u3 (tp + 4*t4 + (sn+1))
#define u4 (tp + 4*t4 + 2*(sn+1))
#define u5 (tp + 4*t4 + 3*(sn+1))
#define u6 (tp + 4*t4 + 4*(sn+1))

   u6[sn] = mpn_add(u6, a1, sn, a3, h1);
   u5[sn] = mpn_add_n(u5, a2, a0, sn);
   mpn_add_n(u3, u5, u6, sn + 1);
   n4 = sn + 1;
   if (mpn_cmp(u5, u6, sn + 1) >= 0)
      mpn_sub_n(u4, u5, u6, sn + 1);
   else
   {  
      mpn_sub_n(u4, u6, u5, sn + 1);
      n4 = -n4;
   }

   u6[sn] = mpn_add(u6, b1, sn, b3, h1);
   u5[sn] = mpn_add_n(u5, b2, b0, sn);
   mpn_add_n(r2, u5, u6, sn + 1);
   n5 = sn + 1;
   if (mpn_cmp(u5, u6, sn + 1) >= 0)
      mpn_sub_n(u5, u5, u6, sn + 1);
   else
   {  
      mpn_sub_n(u5, u6, u5, sn + 1);
      n5 = -n5;
   }
 
   MUL_TC4_UNSIGNED(r3, n3, u3, sn + 1, r2, sn + 1); /* 1 */
   MUL_TC4(r4, n4, u4, n4, u5, n5); /* -1 */
   
#if HAVE_NATIVE_mpn_addlsh_n
   r1[sn] = mpn_addlsh_n(r1, a2, a0, sn, 2);
   mpn_lshift(r1, r1, sn + 1, 1);
   cy = mpn_addlsh_n(r2, a3, a1, h1, 2);
#else
   r1[sn] = mpn_lshift(r1, a2, sn, 1);
   MPN_COPY(r2, a3, h1);
   r1[sn] += mpn_addmul_1(r1, a0, sn, 8);
   cy = mpn_addmul_1(r2, a1, h1, 4);
#endif
   if (sn > h1) 
   {
      cy2 = mpn_lshift(r2 + h1, a1 + h1, sn - h1, 2);
      cy = cy2 + mpn_add_1(r2 + h1, r2 + h1, sn - h1, cy);
   }
   r2[sn] = cy;
   mpn_add_n(u5, r1, r2, sn + 1);
   n6 = sn + 1;
   if (mpn_cmp(r1, r2, sn + 1) >= 0)
      mpn_sub_n(u6, r1, r2, sn + 1);
   else
   {  
      mpn_sub_n(u6, r2, r1, sn + 1);
      n6 = -n6;
   }
 
#if HAVE_NATIVE_mpn_addlsh_n
   r1[sn] = mpn_addlsh_n(r1, b2, b0, sn, 2);
   mpn_lshift(r1, r1, sn + 1, 1);
   cy = mpn_addlsh_n(r2, b3, b1, h1, 2);
#else
   r1[sn] = mpn_lshift(r1, b2, sn, 1);
   MPN_COPY(r2, b3, h1);
   r1[sn] += mpn_addmul_1(r1, b0, sn, 8);
   cy = mpn_addmul_1(r2, b1, h1, 4);
#endif
   if (sn > h1) 
   {
      cy2 = mpn_lshift(r2 + h1, b1 + h1, sn - h1, 2);
      cy = cy2 + mpn_add_1(r2 + h1, r2 + h1, sn - h1, cy);
   }
   r2[sn] = cy;
   mpn_add_n(u2, r1, r2, sn + 1);
   n8 = sn + 1;
   if (mpn_cmp(r1, r2, sn + 1) >= 0)
      mpn_sub_n(r2, r1, r2, sn + 1);
   else
   {  
      mpn_sub_n(r2, r2, r1, sn + 1);
      n8 = -n8;
   }
    
   r30 = r3[0];
   r31 = r3[1];
   MUL_TC4_UNSIGNED(r5, n5, u5, sn + 1, u2, sn + 1); /* 1/2 */
   MUL_TC4(r6, n6, u6, n6, r2, n8); /* -1/2 */
   r3[1] = r31;

#if HAVE_NATIVE_mpn_addlsh1_n
   cy = mpn_addlsh1_n(u2, a2, a3, h1);
   if (sn > h1)
      cy = mpn_add_1(u2 + h1, a2 + h1, sn - h1, cy); 
   u2[sn] = cy;
   u2[sn] = 2*u2[sn] + mpn_addlsh1_n(u2, a1, u2, sn);     
   u2[sn] = 2*u2[sn] + mpn_addlsh1_n(u2, a0, u2, sn);     
#else
   MPN_COPY(u2, a0, sn);
   u2[sn] = mpn_addmul_1(u2, a1, sn, 2);
   u2[sn] += mpn_addmul_1(u2, a2, sn, 4);
   cy = mpn_addmul_1(u2, a3, h1, 8);
   if (sn > h1) cy = mpn_add_1(u2 + h1, u2 + h1, sn - h1, cy);
   u2[sn] += cy;
#endif

#if HAVE_NATIVE_mpn_addlsh1_n
   cy = mpn_addlsh1_n(r1, b2, b3, h1);
   if (sn > h1)
      cy = mpn_add_1(r1 + h1, b2 + h1, sn - h1, cy); 
   r1[sn] = cy;
   r1[sn] = 2*r1[sn] + mpn_addlsh1_n(r1, b1, r1, sn);     
   r1[sn] = 2*r1[sn] + mpn_addlsh1_n(r1, b0, r1, sn);     
#else
   MPN_COPY(r1, b0, sn);
   r1[sn] = mpn_addmul_1(r1, b1, sn, 2);
   r1[sn] += mpn_addmul_1(r1, b2, sn, 4);
   cy = mpn_addmul_1(r1, b3, h1, 8);
   if (sn > h1) cy = mpn_add_1(r1 + h1, r1 + h1, sn - h1, cy);
   r1[sn] += cy;
#endif
   
   MUL_TC4_UNSIGNED(r2, n2, u2, sn + 1, r1, sn + 1); /* 2 */
   
   MUL_TC4_UNSIGNED(r1, n1, a3, h1, b3, h1); /* oo */
   MUL_TC4_UNSIGNED(r7, n7, a0, sn, b0, sn); /* 0 */

   TC4_DENORM(r1, n1, t4 - 1);

/*	rp        rp1          rp2           rp3          rp4           rp5         rp6           rp7
<----------- r7-----------><------------r5-------------->            
                                                       <-------------r3------------->

              <-------------r6------------->                        < -----------r2------------>{           }
                                         <-------------r4-------------->         <--------------r1---->
*/

   mpn_toom4_interpolate(rp, &rpn, sn, tp, t4 - 1, n4, n6, r30);

   if (rpn != 2*n) 
   {
	  MPN_ZERO((rp + rpn), 2*n - rpn);
   }

   TMP_FREE;
}
Exemplo n.º 21
0
static mp_size_t
one_test (mpz_t a, mpz_t b, int i)
{
  struct hgcd_matrix hgcd;
  struct hgcd_ref ref;

  mpz_t ref_r0;
  mpz_t ref_r1;
  mpz_t hgcd_r0;
  mpz_t hgcd_r1;

  int res[2];
  mp_size_t asize;
  mp_size_t bsize;

  mp_size_t hgcd_init_scratch;
  mp_size_t hgcd_scratch;

  mp_ptr hgcd_init_tp;
  mp_ptr hgcd_tp;
  mp_limb_t marker[4];

  asize = a->_mp_size;
  bsize = b->_mp_size;

  ASSERT (asize >= bsize);

  hgcd_init_scratch = MPN_HGCD_MATRIX_INIT_ITCH (asize);
  hgcd_init_tp = refmpn_malloc_limbs (hgcd_init_scratch + 2) + 1;
  mpn_hgcd_matrix_init (&hgcd, asize, hgcd_init_tp);

  hgcd_scratch = mpn_hgcd_appr_itch (asize);
  hgcd_tp = refmpn_malloc_limbs (hgcd_scratch + 2) + 1;

  mpn_random (marker, 4);

  hgcd_init_tp[-1] = marker[0];
  hgcd_init_tp[hgcd_init_scratch] = marker[1];
  hgcd_tp[-1] = marker[2];
  hgcd_tp[hgcd_scratch] = marker[3];

#if 0
  fprintf (stderr,
	   "one_test: i = %d asize = %d, bsize = %d\n",
	   i, a->_mp_size, b->_mp_size);

  gmp_fprintf (stderr,
	       "one_test: i = %d\n"
	       "  a = %Zx\n"
	       "  b = %Zx\n",
	       i, a, b);
#endif
  hgcd_ref_init (&ref);

  mpz_init_set (ref_r0, a);
  mpz_init_set (ref_r1, b);
  res[0] = hgcd_ref (&ref, ref_r0, ref_r1);

  mpz_init_set (hgcd_r0, a);
  mpz_init_set (hgcd_r1, b);
  if (bsize < asize)
    {
      _mpz_realloc (hgcd_r1, asize);
      MPN_ZERO (hgcd_r1->_mp_d + bsize, asize - bsize);
    }
  res[1] = mpn_hgcd_appr (hgcd_r0->_mp_d,
			  hgcd_r1->_mp_d,
			  asize,
			  &hgcd, hgcd_tp);

  if (hgcd_init_tp[-1] != marker[0]
      || hgcd_init_tp[hgcd_init_scratch] != marker[1]
      || hgcd_tp[-1] != marker[2]
      || hgcd_tp[hgcd_scratch] != marker[3])
    {
      fprintf (stderr, "ERROR in test %d\n", i);
      fprintf (stderr, "scratch space overwritten!\n");

      if (hgcd_init_tp[-1] != marker[0])
	gmp_fprintf (stderr,
		     "before init_tp: %Mx\n"
		     "expected: %Mx\n",
		     hgcd_init_tp[-1], marker[0]);
      if (hgcd_init_tp[hgcd_init_scratch] != marker[1])
	gmp_fprintf (stderr,
		     "after init_tp: %Mx\n"
		     "expected: %Mx\n",
		     hgcd_init_tp[hgcd_init_scratch], marker[1]);
      if (hgcd_tp[-1] != marker[2])
	gmp_fprintf (stderr,
		     "before tp: %Mx\n"
		     "expected: %Mx\n",
		     hgcd_tp[-1], marker[2]);
      if (hgcd_tp[hgcd_scratch] != marker[3])
	gmp_fprintf (stderr,
		     "after tp: %Mx\n"
		     "expected: %Mx\n",
		     hgcd_tp[hgcd_scratch], marker[3]);

      abort ();
    }

  if (!hgcd_appr_valid_p (a, b, res[0], &ref, ref_r0, ref_r1,
			  res[1], &hgcd))
    {
      fprintf (stderr, "ERROR in test %d\n", i);
      fprintf (stderr, "Invalid results for hgcd and hgcd_ref\n");
      fprintf (stderr, "op1=");                 debug_mp (a, -16);
      fprintf (stderr, "op2=");                 debug_mp (b, -16);
      fprintf (stderr, "hgcd_ref: %ld\n", (long) res[0]);
      fprintf (stderr, "mpn_hgcd_appr: %ld\n", (long) res[1]);
      abort ();
    }

  refmpn_free_limbs (hgcd_init_tp - 1);
  refmpn_free_limbs (hgcd_tp - 1);
  hgcd_ref_clear (&ref);
  mpz_clear (ref_r0);
  mpz_clear (ref_r1);
  mpz_clear (hgcd_r0);
  mpz_clear (hgcd_r1);

  return res[0];
}
Exemplo n.º 22
0
void
mpz_setbit (mpz_ptr d, mp_bitcnt_t bit_index)
{
  mp_size_t dsize = d->_mp_size;
  mp_ptr dp = d->_mp_d;
  mp_size_t limb_index;

  limb_index = bit_index / GMP_NUMB_BITS;
  if (dsize >= 0)
    {
      if (limb_index < dsize)
	{
	  dp[limb_index] |= (mp_limb_t) 1 << (bit_index % GMP_NUMB_BITS);
	  d->_mp_size = dsize;
	}
      else
	{
	  /* Ugh.  The bit should be set outside of the end of the
	     number.  We have to increase the size of the number.  */
	  if (UNLIKELY (d->_mp_alloc < limb_index + 1))
            dp = _mpz_realloc (d, limb_index + 1);
	  MPN_ZERO (dp + dsize, limb_index - dsize);
	  dp[limb_index] = (mp_limb_t) 1 << (bit_index % GMP_NUMB_BITS);
	  d->_mp_size = limb_index + 1;
	}
    }
  else
    {
      mp_size_t zero_bound;

      /* Simulate two's complement arithmetic, i.e. simulate
	 1. Set OP = ~(OP - 1) [with infinitely many leading ones].
	 2. Set the bit.
	 3. Set OP = ~OP + 1.  */

      dsize = -dsize;

      /* No upper bound on this loop, we're sure there's a non-zero limb
	 sooner ot later.  */
      for (zero_bound = 0; ; zero_bound++)
	if (dp[zero_bound] != 0)
	  break;

      if (limb_index > zero_bound)
	{
	  if (limb_index < dsize)
            {
              mp_limb_t  dlimb;
              dlimb = dp[limb_index];
              dlimb &= ~((mp_limb_t) 1 << (bit_index % GMP_NUMB_BITS));
              dp[limb_index] = dlimb;

              if (UNLIKELY (dlimb == 0 && limb_index == dsize-1))
                {
                  /* high limb became zero, must normalize */
                  do {
                    dsize--;
                  } while (dsize > 0 && dp[dsize-1] == 0);
                  d->_mp_size = -dsize;
                }
            }
	}
      else if (limb_index == zero_bound)
	{
	  dp[limb_index] = ((dp[limb_index] - 1)
			    & ~((mp_limb_t) 1 << (bit_index % GMP_NUMB_BITS))) + 1;
	  if (dp[limb_index] == 0)
	    {
	      mp_size_t i;
	      for (i = limb_index + 1; i < dsize; i++)
		{
		  dp[i] += 1;
		  if (dp[i] != 0)
		    goto fin;
		}
	      /* We got carry all way out beyond the end of D.  Increase
		 its size (and allocation if necessary).  */
	      dsize++;
	      if (UNLIKELY (d->_mp_alloc < dsize))
                dp = _mpz_realloc (d, dsize);
	      dp[i] = 1;
	      d->_mp_size = -dsize;
	    fin:;
	    }
	}
      else
	{
	  mpn_decr_u (dp + limb_index,
		     (mp_limb_t) 1 << (bit_index % GMP_NUMB_BITS));
	  dsize -= dp[dsize - 1] == 0;
	  d->_mp_size = -dsize;
	}
    }
}
Exemplo n.º 23
0
static mp_size_t
one_test (mpz_t a, mpz_t b, int i)
{
  struct hgcd_matrix hgcd;
  struct hgcd_ref ref;

  mpz_t ref_r0;
  mpz_t ref_r1;
  mpz_t hgcd_r0;
  mpz_t hgcd_r1;

  mp_size_t res[2];
  mp_size_t asize;
  mp_size_t bsize;

  mp_size_t hgcd_init_scratch;
  mp_size_t hgcd_scratch;

  mp_ptr hgcd_init_tp;
  mp_ptr hgcd_tp;

  asize = a->_mp_size;
  bsize = b->_mp_size;

  ASSERT (asize >= bsize);

  hgcd_init_scratch = MPN_HGCD_MATRIX_INIT_ITCH (asize);
  hgcd_init_tp = refmpn_malloc_limbs (hgcd_init_scratch);
  mpn_hgcd_matrix_init (&hgcd, asize, hgcd_init_tp);

  hgcd_scratch = mpn_hgcd_itch (asize);
  hgcd_tp = refmpn_malloc_limbs (hgcd_scratch);

#if 0
  fprintf (stderr,
	   "one_test: i = %d asize = %d, bsize = %d\n",
	   i, a->_mp_size, b->_mp_size);

  gmp_fprintf (stderr,
	       "one_test: i = %d\n"
	       "  a = %Zx\n"
	       "  b = %Zx\n",
	       i, a, b);
#endif
  hgcd_ref_init (&ref);

  mpz_init_set (ref_r0, a);
  mpz_init_set (ref_r1, b);
  res[0] = hgcd_ref (&ref, ref_r0, ref_r1);

  mpz_init_set (hgcd_r0, a);
  mpz_init_set (hgcd_r1, b);
  if (bsize < asize)
    {
      _mpz_realloc (hgcd_r1, asize);
      MPN_ZERO (hgcd_r1->_mp_d + bsize, asize - bsize);
    }
  res[1] = mpn_hgcd (hgcd_r0->_mp_d,
		     hgcd_r1->_mp_d,
		     asize,
		     &hgcd, hgcd_tp);

  if (res[0] != res[1])
    {
      fprintf (stderr, "ERROR in test %d\n", i);
      fprintf (stderr, "Different return value from hgcd and hgcd_ref\n");
      fprintf (stderr, "op1=");                 debug_mp (a, -16);
      fprintf (stderr, "op2=");                 debug_mp (b, -16);
      fprintf (stderr, "hgcd_ref: %ld\n", (long) res[0]);
      fprintf (stderr, "mpn_hgcd: %ld\n", (long) res[1]);
      abort ();
    }
  if (res[0] > 0)
    {
      if (!hgcd_ref_equal (&hgcd, &ref)
	  || !mpz_mpn_equal (ref_r0, hgcd_r0->_mp_d, res[1])
	  || !mpz_mpn_equal (ref_r1, hgcd_r1->_mp_d, res[1]))
	{
	  fprintf (stderr, "ERROR in test %d\n", i);
	  fprintf (stderr, "mpn_hgcd and hgcd_ref returned different values\n");
	  fprintf (stderr, "op1=");                 debug_mp (a, -16);
	  fprintf (stderr, "op2=");                 debug_mp (b, -16);
	  abort ();
	}
    }

  refmpn_free_limbs (hgcd_init_tp);
  refmpn_free_limbs (hgcd_tp);
  hgcd_ref_clear (&ref);
  mpz_clear (ref_r0);
  mpz_clear (ref_r1);
  mpz_clear (hgcd_r0);
  mpz_clear (hgcd_r1);

  return res[0];
}
Exemplo n.º 24
0
void
mpn_toom22_mul (mp_ptr pp,
		mp_srcptr ap, mp_size_t an,
		mp_srcptr bp, mp_size_t bn,
		mp_ptr scratch)
{
  mp_size_t n, s, t;
  int vm1_neg;
  mp_limb_t cy, cy2;
  mp_ptr asm1;
  mp_ptr bsm1;

#define a0  ap
#define a1  (ap + n)
#define b0  bp
#define b1  (bp + n)

  s = an >> 1;
  n = an - s;
  t = bn - n;

  ASSERT (an >= bn);

  ASSERT (0 < s && s <= n);
  ASSERT (0 < t && t <= s);

  asm1 = pp;
  bsm1 = pp + n;

  vm1_neg = 0;

  /* Compute asm1.  */
  if (s == n)
    {
      if (mpn_cmp (a0, a1, n) < 0)
	{
	  mpn_sub_n (asm1, a1, a0, n);
	  vm1_neg = 1;
	}
      else
	{
	  mpn_sub_n (asm1, a0, a1, n);
	}
    }
  else
    {
      if (mpn_zero_p (a0 + s, n - s) && mpn_cmp (a0, a1, s) < 0)
	{
	  mpn_sub_n (asm1, a1, a0, s);
	  MPN_ZERO (asm1 + s, n - s);
	  vm1_neg = 1;
	}
      else
	{
	  mpn_sub (asm1, a0, n, a1, s);
	}
    }

  /* Compute bsm1.  */
  if (t == n)
    {
      if (mpn_cmp (b0, b1, n) < 0)
	{
	  mpn_sub_n (bsm1, b1, b0, n);
	  vm1_neg ^= 1;
	}
      else
	{
	  mpn_sub_n (bsm1, b0, b1, n);
	}
    }
  else
    {
      if (mpn_zero_p (b0 + t, n - t) && mpn_cmp (b0, b1, t) < 0)
	{
	  mpn_sub_n (bsm1, b1, b0, t);
	  MPN_ZERO (bsm1 + t, n - t);
	  vm1_neg ^= 1;
	}
      else
	{
	  mpn_sub (bsm1, b0, n, b1, t);
	}
    }

#define v0	pp				/* 2n */
#define vinf	(pp + 2 * n)			/* s+t */
#define vm1	scratch				/* 2n */
#define scratch_out	scratch + 2 * n

  /* vm1, 2n limbs */
  TOOM22_MUL_N_REC (vm1, asm1, bsm1, n, scratch_out);

  if (s > t)  TOOM22_MUL_REC (vinf, a1, s, b1, t, scratch_out);
  else        TOOM22_MUL_N_REC (vinf, a1, b1, s, scratch_out);

  /* v0, 2n limbs */
  TOOM22_MUL_N_REC (v0, ap, bp, n, scratch_out);

  /* H(v0) + L(vinf) */
  cy = mpn_add_n (pp + 2 * n, v0 + n, vinf, n);

  /* L(v0) + H(v0) */
  cy2 = cy + mpn_add_n (pp + n, pp + 2 * n, v0, n);

  /* L(vinf) + H(vinf) */
  cy += mpn_add (pp + 2 * n, pp + 2 * n, n, vinf + n, s + t - n);

  if (vm1_neg)
    cy += mpn_add_n (pp + n, pp + n, vm1, 2 * n);
  else
    cy -= mpn_sub_n (pp + n, pp + n, vm1, 2 * n);

  ASSERT (cy + 1  <= 3);
  ASSERT (cy2 <= 2);

  mpn_incr_u (pp + 2 * n, cy2);
  if (LIKELY (cy <= 2))
    mpn_incr_u (pp + 3 * n, cy);
  else
    mpn_decr_u (pp + 3 * n, 1);
}
Exemplo n.º 25
0
void
mpn_toom4_sqr_n (mp_ptr rp, mp_srcptr up, mp_size_t n)
{
  mp_size_t len1, ind;
  mp_limb_t cy, r30, r31;
  mp_ptr tp;
  mp_size_t a0n, a1n, a2n, a3n, sn, n1, n2, n3, n4, n5, n6, n7, n8, n9, rpn, t4;

  len1 = n;
  ASSERT (n >= 1);

  MPN_NORMALIZE(up, len1);
  
  sn = (n - 1) / 4 + 1;

  /* a0 - a3 are defined in mpn_toom4_mul_n above */
  
   TC4_NORM(a0, a0n, sn);
	TC4_NORM(a1, a1n, sn);
	TC4_NORM(a2, a2n, sn);
	TC4_NORM(a3, a3n, n - 3*sn); 

   t4 = 2*sn+2; // allows mult of 2 integers of sn + 1 limbs

   tp = __GMP_ALLOCATE_FUNC_LIMBS(4*t4 + 4*(sn + 1));

   tc4_add_unsigned(u5, &n5, a3, a3n, a1, a1n); 
   tc4_add_unsigned(u4, &n4, a2, a2n, a0, a0n); 
	tc4_add_unsigned(u2, &n2, u4, n4, u5, n5); 
   tc4_sub(u3, &n3, u4, n4, u5, n5);

	SQR_TC4(r4, n4, u3, n3);
   SQR_TC4_UNSIGNED(r3, n3, u2, n2);
	
	tc4_lshift(r1, &n1, a0, a0n, 3);
	tc4_addlsh1_unsigned(r1, &n1, a2, a2n);
 	tc4_lshift(r2, &n8, a1, a1n, 2);
   tc4_add(r2, &n8, r2, n8, a3, a3n);
   tc4_add(u4, &n9, r1, n1, r2, n8);
   tc4_sub(u5, &n5, r1, n1, r2, n8);
   
	r30 = r3[0];
	if (!n3) r30 = CNST_LIMB(0);
   r31 = r3[1];
	SQR_TC4(r6, n6, u5, n5);
   SQR_TC4_UNSIGNED(r5, n5, u4, n9);
   r3[1] = r31;

   tc4_lshift(u2, &n8, a3, a3n, 3);
   tc4_addmul_1(u2, &n8, a2, a2n, 4);
	tc4_addlsh1_unsigned(u2, &n8, a1, a1n);
	tc4_add(u2, &n8, u2, n8, a0, a0n);
   
	SQR_TC4_UNSIGNED(r2, n2, u2, n8);
   SQR_TC4_UNSIGNED(r1, n1, a3, a3n);
   SQR_TC4_UNSIGNED(r7, n7, a0, a0n);

	TC4_DENORM(r1, n1,  t4 - 1);
   TC4_DENORM(r2, n2,  t4 - 1);
   if (n3)
     TC4_DENORM(r3, n3,  t4 - 1);
   else {
     /* MPN_ZERO defeats gcc 4.1.2 here, hence the explicit for loop */
     for (ind = 1 ; ind < t4 - 1; ind++)
        (r3)[ind] = CNST_LIMB(0);
   }
   TC4_DENORM(r4, n4,  t4 - 1);
   TC4_DENORM(r5, n5,  t4 - 1);
   TC4_DENORM(r6, n6,  t4 - 1);
   TC4_DENORM(r7, n7,  t4 - 2); // we treat r7 differently (it cannot exceed t4-2 in length)

/*	rp        rp1          rp2           rp3          rp4           rp5         rp6           rp7
<----------- r7-----------><------------r5-------------->            
                                                       <-------------r3------------->

              <-------------r6------------->                        < -----------r2------------>{           }
                                         <-------------r4-------------->         <--------------r1---->
*/

	mpn_toom4_interpolate(rp, &rpn, sn, tp, t4 - 1, n4, n6, r30);

	if (rpn != 2*n) 
	{
		MPN_ZERO((rp + rpn), 2*n - rpn);
	}

   __GMP_FREE_FUNC_LIMBS (tp, 4*t4 + 4*(sn+1));
}
Exemplo n.º 26
0
/* 
   Computes the quotient and remainder of { np, 2*dn } by { dp, dn }.
   We require dp to be normalised and inv to be a precomputed inverse 
   of { dp, dn } given by mpn_invert.
*/
mp_limb_t 
mpn_inv_div_qr_n(mp_ptr qp, mp_ptr np, 
                           mp_srcptr dp, mp_size_t dn, mp_srcptr inv)
{
   mp_limb_t cy, lo, ret = 0, ret2 = 0;
   mp_size_t m, i;
   mp_ptr tp;
   TMP_DECL;

   TMP_MARK;

   ASSERT(mpn_is_invert(inv, dp, dn));

   if (mpn_cmp(np + dn, dp, dn) >= 0)
   {
      ret2 = 1;
      mpn_sub_n(np + dn, np + dn, dp, dn);
   }

   tp = TMP_ALLOC_LIMBS(2*dn + 1);
   mpn_mul(tp, np + dn - 1, dn + 1, inv, dn);
   add_ssaaaa(cy, lo, 0, np[dn - 1], 0, tp[dn]);
   ret += mpn_add_n(qp, tp + dn + 1, np + dn, dn);
   ret += mpn_add_1(qp, qp, dn, cy);

   /* 
      Let X = B^dn + inv, D = { dp, dn }, N = { np, 2*dn }, then
      DX < B^{2*dn} <= D(X+1), thus
      Let N' = { np + n - 1, n + 1 }
	  N'X/B^{dn+1} < B^{dn-1}N'/D <= N'X/B^{dn+1} + N'/B^{dn+1} < N'X/B^{dn+1} + 1
      N'X/B^{dn+1} < N/D <= N'X/B^{dn+1} + 1 + 2/B
      There is either one integer in this range, or two. However, in the latter case
	  the left hand bound is either an integer or < 2/B below one.
   */
     
   if (UNLIKELY(ret == 1))
   {
      ret -= mpn_sub_1(qp, qp, dn, 1);
      ASSERT(ret == 0);
   }

   ret -= mpn_sub_1(qp, qp, dn, 1); 
   if (UNLIKELY(ret == ~CNST_LIMB(0))) 
      ret += mpn_add_1(qp, qp, dn, 1);
   /* ret is now guaranteed to be 0 or 1*/
   ASSERT(ret == 0);

   m = dn + 1;
   if ((dn <= MPN_FFT_MUL_N_MINSIZE) || (ret))
   {
      mpn_mul_n(tp, qp, dp, dn);
   } else
   {
      mp_limb_t cy, cy2;
      
      if (m >= FFT_MULMOD_2EXPP1_CUTOFF)
         m = mpir_fft_adjust_limbs (m);
      cy = mpn_mulmod_Bexpp1_fft (tp, m, qp, dn, dp, dn);
      
      /* cy, {tp, m} = qp * dp mod (B^m+1) */ 
      cy2 = mpn_add_n(tp, tp, np + m, 2*dn - m);
      mpn_add_1(tp + 2*dn - m, tp + 2*dn - m, 2*m - 2*dn, cy2);
          
      /* Make correction */    
      mpn_sub_1(tp, tp, m, tp[0] - dp[0]*qp[0]);
   }
   
   mpn_sub_n(np, np, tp, m);
   MPN_ZERO(np + m, 2*dn - m);
   while (np[dn] || mpn_cmp(np, dp, dn) >= 0)
   {
	   ret += mpn_add_1(qp, qp, dn, 1);
	   np[dn] -= mpn_sub_n(np, np, dp, dn);
   }
  
   /* Not possible for ret == 2 as we have qp*dp <= np */
   ASSERT(ret + ret2 < 2);

   TMP_FREE;

   return ret + ret2;
}
Exemplo n.º 27
0
void
mpn_toom3_mul_n (mp_ptr c, mp_srcptr a, mp_srcptr b, mp_size_t n, mp_ptr t)
{
  mp_size_t k, k1, kk1, r, twok, twor;
  mp_limb_t cy, cc, saved, vinf0, cinf0;
  mp_ptr trec;
  int sa, sb;
  mp_ptr c1, c2, c3, c4, c5;

  ASSERT(GMP_NUMB_BITS >= 6);
  ASSERT(n >= 17); /* so that r <> 0 and 5k+3 <= 2n */

  /*
  The algorithm is the following:

  0. k = ceil(n/3), r = n - 2k, B = 2^(GMP_NUMB_BITS), t = B^k
  1. split a and b in three parts each a0, a1, a2 and b0, b1, b2
     with a0, a1, b0, b1 of k limbs, and a2, b2 of r limbs
  2. v0   <- a0*b0
     v1   <- (a0+a1+a2)*(b0+b1+b2)
     v2   <- (a0+2*a1+4*a2)*(b0+2*b1+4*b2)
     vm1  <- (a0-a1+a2)*(b0-b1+b2)
     vinf <- a2*b2
     t1   <- (3*v0+2*vm1+v2)/6-2*vinf
     t2   <- (v1+vm1)/2
  3. result is c0+c1*t+c2*t^2+c3*t^3+c4*t^4 where
     c0   <- v0
     c1   <- v1 - t1
     c2   <- t2 - v0 - vinf
     c3   <- t1 - t2
     c4   <- vinf
  */

  k = (n + 2) / 3; /* ceil(n/3) */
  twok = 2 * k;
  k1 = k + 1;
  kk1 = k + k1;
  r = n - twok;   /* last chunk */
  twor = 2 * r;

  c1 = c + k;
  c2 = c1 + k;
  c3 = c2 + k;
  c4 = c3 + k;
  c5 = c4 + k;

  trec = t + 4 * k + 3; /* trec = v2 + (2k+2) */

  /* put a0+a2 in {c, k+1}, and b0+b2 in {c+k+1, k+1};
     put a0+a1+a2 in {c+2k+2, k+1} and b0+b1+b2 in {c+3k+3,k+1}
     [requires 4k+4 <= 2n, ie. n >= 8] */
  cy = mpn_add_n (c, a, a + twok, r);
  cc = mpn_add_n (c1 + 1, b, b + twok, r);
  if (r < k)
    {
      __GMPN_ADD_1 (cy, c + r, a + r, k - r, cy);
      __GMPN_ADD_1 (cc, c1 + 1 + r, b + r, k - r, cc);
    }
  c3[2] = (c1[0] = cy) + mpn_add_n (c2 + 2, c, a + k, k);
  c4[3] = (c2[1] = cc) + mpn_add_n (c3 + 3, c1 + 1, b + k, k);

#define v2 (t+2*k+1)
#define vinf (t+4*k+2)

  /* compute v1 := (a0+a1+a2)*(b0+b1+b2) in {t, 2k+1};
     since v1 < 9*B^(2k), v1 uses only 2k+1 words if GMP_NUMB_BITS >= 4 */
  TOOM3_MUL_REC (t, c2 + 2, c3 + 3, k1, trec);

  /* {c,2k} {c+2k,2k+1} {c+4k+1,2r-1} {t,2k+1} {t+2k+1,2k+1} {t+4k+2,2r}
					v1
  */

  /* put |a0-a1+a2| in {c, k+1} and |b0-b1+b2| in {c+4k+2,k+1} */
  /* sa = sign(a0-a1+a2) */
  sa = (c[k] != 0) ? 1 : mpn_cmp (c, a + k, k);
  c[k] = (sa >= 0) ? c[k] - mpn_sub_n (c, c, a + k, k)
		   : mpn_sub_n (c, a + k, c, k);
  /* b0+b2 is in {c+k+1, k+1} now */
  sb = (c2[1] != 0) ? 1 : mpn_cmp (c1 + 1, b + k, k);
  c5[2] = (sb >= 0) ? c2[1] - mpn_sub_n (c4 + 2, c1 + 1, b + k, k)
		    : mpn_sub_n (c4 + 2, b + k, c1 + 1, k);
  sa *= sb; /* sign of vm1 */

  /* compute vm1 := (a0-a1+a2)*(b0-b1+b2) in {c+2k, 2k+1};
     since |vm1| < 4*B^(2k), vm1 uses only 2k+1 limbs */
  TOOM3_MUL_REC (c2, c, c4 + 2, k1, trec);

  /* {c,2k} {c+2k,2k+1} {c+4k+1,2r-1} {t,2k+1} {t+2k+1,2k+1} {t+4k+2,2r}
		vm1                      v1
  */

  /* compute a0+2a1+4a2 in {c, k+1} and b0+2b1+4b2 in {c+4k+2, k+1}
     [requires 5k+3 <= 2n, i.e. n >= 17] */
#ifdef HAVE_NATIVE_mpn_addlsh1_n
  c1[0] = mpn_addlsh1_n (c, a + k, a + twok, r);
  c5[2] = mpn_addlsh1_n (c4 + 2, b + k, b + twok, r);
  if (r < k)
    {
      __GMPN_ADD_1 (c1[0], c + r, a + k + r, k - r, c1[0]);
      __GMPN_ADD_1 (c5[2], c4 + 2 + r, b + k + r, k - r, c5[2]);
    }
  c1[0] = 2 * c1[0] + mpn_addlsh1_n (c, a, c, k);
  c5[2] = 2 * c5[2] + mpn_addlsh1_n (c4 + 2, b, c4 + 2, k);
#else
  c[r] = mpn_lshift (c, a + twok, r, 1);
  c4[r + 2] = mpn_lshift (c4 + 2, b + twok, r, 1);
  if (r < k)
    {
      MPN_ZERO(c + r + 1, k - r);
      MPN_ZERO(c4 + r + 3, k - r);
    }
  c1[0] += mpn_add_n (c, c, a + k, k);
  c5[2] += mpn_add_n (c4 + 2, c4 + 2, b + k, k);
  mpn_lshift (c, c, k1, 1);
  mpn_lshift (c4 + 2, c4 + 2, k1, 1);
  c1[0] += mpn_add_n (c, c, a, k);
  c5[2] += mpn_add_n (c4 + 2, c4 + 2, b, k);
#endif

  /* compute v2 := (a0+2a1+4a2)*(b0+2b1+4b2) in {t+2k+1, 2k+1}
     v2 < 49*B^k so v2 uses at most 2k+1 limbs if GMP_NUMB_BITS >= 6 */
  TOOM3_MUL_REC (v2, c, c4 + 2, k1, trec);

  /* {c,2k} {c+2k,2k+1} {c+4k+1,2r-1} {t,2k+1} {t+2k+1,2k+1} {t+4k+2,2r}
		vm1                      v1         v2
  */

  /* compute v0 := a0*b0 in {c, 2k} */
  TOOM3_MUL_REC (c, a, b, k, trec);

  /* {c,2k} {c+2k,2k+1} {c+4k+1,2r-1} {t,2k+1} {t+2k+1,2k+1} {t+4k+2,2r}
       v0       vm1                      v1         v2
  */

  /* now compute (3v0+2vm1+v2)/6 = [v0 + (2vm1+v2)/3]/2
     v2 <- v2+2vm1 = 3*(a0*b0+2*a0*b2+2*a1*b1+2*a1*b2+2*a2*b0+2*a2*b1+6*a2*b2),
     thus 0 <= v2 < 51*B^(2k) < 2^6*B^(2k)
     Uses temporary space {t+4k+2,2k+1}, requires T(n) >= 6k+3.
  */
  if (sa >= 0)
    {
#ifdef HAVE_NATIVE_mpn_addlsh1_n
      mpn_addlsh1_n (v2, v2, c2, kk1);
#else
      /* we can use vinf=t+4k+2 as workspace since it is not full yet */
      mpn_lshift (vinf, c2, kk1, 1);
      mpn_add_n (v2, v2, vinf, kk1);
#endif
    }
  else
    {
#ifdef HAVE_NATIVE_mpn_sublsh1_n
      mpn_sublsh1_n (v2, v2, c2, kk1);
#else
      /* we can use vinf=t+4k+2 as workspace since it is not full yet */
      mpn_lshift (vinf, c2, kk1, 1);
      mpn_sub_n (v2, v2, vinf, kk1);
#endif
    }

  /* {c,2k} {c+2k,2k+1} {c+4k+1,2r-1} {t,2k+1} {t+2k+1,2k+1} {t+4k+2,2r}
       v0       vm1                      v1       v2+2vm1             */

  /* compute vinf := a2*b2 in {t+4k+2, 2r}: first put it in {c4, 2r},
     then copy it in {t+4k+2,2r} */
  saved = c4[0];
  TOOM3_MUL_REC (c4, a + twok, b + twok, r, trec);
  cinf0 = mpn_add_n (vinf, c4, c, twor); /* {v0,2r} + {vinf,2r} */
  vinf0 = c4[0];
  c4[0] = saved;

  toom3_interpolate (c, t, v2, c2, vinf, k, r, sa, vinf0, cinf0, vinf + twor);

#undef v2
#undef vinf
}
Exemplo n.º 28
0
Arquivo: gcdext.c Projeto: qsnake/mpir
mp_size_t
mpn_gcdext (mp_ptr gp, mp_ptr up, mp_size_t *usizep,
	    mp_ptr ap, mp_size_t an, mp_ptr bp, mp_size_t n)
{
  mp_size_t talloc;
  mp_size_t scratch;
  mp_size_t matrix_scratch;
  mp_size_t ualloc = n + 1;

  mp_size_t un;
  mp_ptr u0;
  mp_ptr u1;

  mp_ptr tp;

  TMP_DECL;

  ASSERT (an >= n);
  ASSERT (n > 0);

  TMP_MARK;

  /* FIXME: Check for small sizes first, before setting up temporary
     storage etc. */
  talloc = MPN_GCDEXT_LEHMER_N_ITCH(n);

  /* For initial division */
  scratch = an - n + 1;
  if (scratch > talloc)
    talloc = scratch;

  if (ABOVE_THRESHOLD (n, GCDEXT_DC_THRESHOLD))
    {
      /* For hgcd loop. */
      mp_size_t hgcd_scratch;
      mp_size_t update_scratch;
      mp_size_t p1 = CHOOSE_P_1 (n);
      mp_size_t p2 = CHOOSE_P_2 (n);
      mp_size_t min_p = MIN(p1, p2);
      mp_size_t max_p = MAX(p1, p2);
      matrix_scratch = MPN_HGCD_MATRIX_INIT_ITCH (n - min_p);
      hgcd_scratch = mpn_hgcd_itch (n - min_p);
      update_scratch = max_p + n - 1;

      scratch = matrix_scratch + MAX(hgcd_scratch, update_scratch);
      if (scratch > talloc)
	talloc = scratch;

      /* Final mpn_gcdext_lehmer_n call. Need space for u and for
	 copies of a and b. */
      scratch = MPN_GCDEXT_LEHMER_N_ITCH (GCDEXT_DC_THRESHOLD)
	+ 3*GCDEXT_DC_THRESHOLD;

      if (scratch > talloc)
	talloc = scratch;

      /* Cofactors u0 and u1 */
      talloc += 2*(n+1);
    }

  tp = TMP_ALLOC_LIMBS(talloc);

  if (an > n)
    {
      mpn_tdiv_qr (tp, ap, 0, ap, an, bp, n);

      if (mpn_zero_p (ap, n))
	{
	  MPN_COPY (gp, bp, n);
	  *usizep = 0;
	  TMP_FREE;
	  return n;
	}
    }

  if (BELOW_THRESHOLD (n, GCDEXT_DC_THRESHOLD))
    {
      mp_size_t gn = mpn_gcdext_lehmer_n(gp, up, usizep, ap, bp, n, tp);

      TMP_FREE;
      return gn;
    }

  MPN_ZERO (tp, 2*ualloc);
  u0 = tp; tp += ualloc;
  u1 = tp; tp += ualloc;

  {
    /* For the first hgcd call, there are no u updates, and it makes
       some sense to use a different choice for p. */

    /* FIXME: We could trim use of temporary storage, since u0 and u1
       are not used yet. For the hgcd call, we could swap in the u0
       and u1 pointers for the relevant matrix elements. */

    struct hgcd_matrix M;
    mp_size_t p = CHOOSE_P_1 (n);
    mp_size_t nn;

    mpn_hgcd_matrix_init (&M, n - p, tp);
    nn = mpn_hgcd (ap + p, bp + p, n - p, &M, tp + matrix_scratch);
    if (nn > 0)
      {
	ASSERT (M.n <= (n - p - 1)/2);
	ASSERT (M.n + p <= (p + n - 1) / 2);

	/* Temporary storage 2 (p + M->n) <= p + n - 1 */
	n = mpn_hgcd_matrix_adjust (&M, p + nn, ap, bp, p, tp + matrix_scratch);

	MPN_COPY (u0, M.p[1][0], M.n);
	MPN_COPY (u1, M.p[1][1], M.n);
	un = M.n;
	while ( (u0[un-1] | u1[un-1] ) == 0)
	  un--;
      }
    else
      {
	/* mpn_hgcd has failed. Then either one of a or b is very
	   small, or the difference is very small. Perform one
	   subtraction followed by one division. */
	mp_size_t gn;
	mp_size_t updated_un = 1;

	u1[0] = 1;

	/* Temporary storage 2n + 1 */
	n = mpn_gcdext_subdiv_step (gp, &gn, up, usizep, ap, bp, n,
				    u0, u1, &updated_un, tp, tp + n);
	if (n == 0)
	  {
	    TMP_FREE;
	    return gn;
	  }

	un = updated_un;
	ASSERT (un < ualloc);
      }
  }

  while (ABOVE_THRESHOLD (n, GCDEXT_DC_THRESHOLD))
    {
      struct hgcd_matrix M;
      mp_size_t p = CHOOSE_P_2 (n);
      mp_size_t nn;

      mpn_hgcd_matrix_init (&M, n - p, tp);
      nn = mpn_hgcd (ap + p, bp + p, n - p, &M, tp + matrix_scratch);
      if (nn > 0)
	{
	  mp_ptr t0;

	  t0 = tp + matrix_scratch;
	  ASSERT (M.n <= (n - p - 1)/2);
	  ASSERT (M.n + p <= (p + n - 1) / 2);

	  /* Temporary storage 2 (p + M->n) <= p + n - 1 */
	  n = mpn_hgcd_matrix_adjust (&M, p + nn, ap, bp, p, t0);

	  /* By the same analysis as for mpn_hgcd_matrix_mul */
	  ASSERT (M.n + un <= ualloc);

	  /* FIXME: This copying could be avoided by some swapping of
	   * pointers. May need more temporary storage, though. */
	  MPN_COPY (t0, u0, un);

	  /* Temporary storage ualloc */
	  un = hgcd_mul_matrix_vector (&M, u0, t0, u1, un, t0 + un);

	  ASSERT (un < ualloc);
	  ASSERT ( (u0[un-1] | u1[un-1]) > 0);
	}
      else
	{
	  /* mpn_hgcd has failed. Then either one of a or b is very
	     small, or the difference is very small. Perform one
	     subtraction followed by one division. */
	  mp_size_t gn;
	  mp_size_t updated_un = un;

	  /* Temporary storage 2n + 1 */
	  n = mpn_gcdext_subdiv_step (gp, &gn, up, usizep, ap, bp, n,
				      u0, u1, &updated_un, tp, tp + n);
	  if (n == 0)
	    {
	      TMP_FREE;
	      return gn;
	    }

	  un = updated_un;
	  ASSERT (un < ualloc);
	}
    }

  if (UNLIKELY (mpn_cmp (ap, bp, n) == 0))
    {
      /* Must return the smallest cofactor, +u1 or -u0 */
      int c;

      MPN_COPY (gp, ap, n);

      MPN_CMP (c, u0, u1, un);
      ASSERT (c != 0);
      if (c < 0)
	{
	  MPN_NORMALIZE (u0, un);
	  MPN_COPY (up, u0, un);
	  *usizep = -un;
	}
      else
	{
	  MPN_NORMALIZE_NOT_ZERO (u1, un);
	  MPN_COPY (up, u1, un);
	  *usizep = un;
	}

      TMP_FREE;
      return n;
    }
  else if (mpn_zero_p (u0, un))
    {
      mp_size_t gn;
      ASSERT (un == 1);
      ASSERT (u1[0] == 1);

      /* g = u a + v b = (u u1 - v u0) A + (...) B = u A + (...) B */
      gn = mpn_gcdext_lehmer_n (gp, up, usizep, ap, bp, n, tp);

      TMP_FREE;
      return gn;
    }
  else
    {
      /* We have A = ... a + ... b
		 B =  u0 a +  u1 b

		 a = u1  A + ... B
		 b = -u0 A + ... B

	 with bounds

	   |u0|, |u1| <= B / min(a, b)

	 Compute g = u a + v b = (u u1 - v u0) A + (...) B
	 Here, u, v are bounded by

	 |u| <= b,
	 |v| <= a
      */

      mp_size_t u0n;
      mp_size_t u1n;
      mp_size_t lehmer_un;
      mp_size_t lehmer_vn;
      mp_size_t gn;

      mp_ptr lehmer_up;
      mp_ptr lehmer_vp;
      int negate;

      lehmer_up = tp; tp += n;

      /* Call mpn_gcdext_lehmer_n with copies of a and b. */
      MPN_COPY (tp, ap, n);
      MPN_COPY (tp + n, bp, n);
      gn = mpn_gcdext_lehmer_n (gp, lehmer_up, &lehmer_un, tp, tp + n, n, tp + 2*n);

      u0n = un;
      MPN_NORMALIZE (u0, u0n);
      if (lehmer_un == 0)
	{
	  /* u == 0  ==>  v = g / b == 1  ==> g = - u0 A + (...) B */
	  MPN_COPY (up, u0, u0n);
	  *usizep = -u0n;

	  TMP_FREE;
	  return gn;
	}

      lehmer_vp = tp;
      /* Compute v = (g - u a) / b */
      lehmer_vn = compute_v (lehmer_vp,
			     ap, bp, n, gp, gn, lehmer_up, lehmer_un, tp + n + 1);

      if (lehmer_un > 0)
	negate = 0;
      else
	{
	  lehmer_un = -lehmer_un;
	  negate = 1;
	}

      u1n = un;
      MPN_NORMALIZE (u1, u1n);

      /* It's possible that u0 = 1, u1 = 0 */
      if (u1n == 0)
	{
	  ASSERT (un == 1);
	  ASSERT (u0[0] == 1);

	  /* u1 == 0 ==> u u1 + v u0 = v */
	  MPN_COPY (up, lehmer_vp, lehmer_vn);
	  *usizep = negate ? lehmer_vn : - lehmer_vn;

	  TMP_FREE;
	  return gn;
	}

      ASSERT (lehmer_un + u1n <= ualloc);
      ASSERT (lehmer_vn + u0n <= ualloc);

      /* Now u0, u1, u are non-zero. We may still have v == 0 */

      /* Compute u u0 */
      if (lehmer_un <= u1n)
	/* Should be the common case */
	mpn_mul (up, u1, u1n, lehmer_up, lehmer_un);
      else
	mpn_mul (up, lehmer_up, lehmer_un, u1, u1n);

      un = u1n + lehmer_un;
      un -= (up[un - 1] == 0);

      if (lehmer_vn > 0)
	{
	  mp_limb_t cy;

	  /* Overwrites old u1 value */
	  if (lehmer_vn <= u0n)
	    /* Should be the common case */
	    mpn_mul (u1, u0, u0n, lehmer_vp, lehmer_vn);
	  else
	    mpn_mul (u1, lehmer_vp, lehmer_vn, u0, u0n);

	  u1n = u0n + lehmer_vn;
	  u1n -= (u1[u1n - 1] == 0);

	  if (u1n <= un)
	    {
	      cy = mpn_add (up, up, un, u1, u1n);
	    }
	  else
	    {
	      cy = mpn_add (up, u1, u1n, up, un);
	      un = u1n;
	    }
	  up[un] = cy;
	  un += (cy != 0);

	  ASSERT (un < ualloc);
	}
      *usizep = negate ? -un : un;

      TMP_FREE;
      return gn;
    }
}
Exemplo n.º 29
0
int
mpfr_sub1sp (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode)
{
  mpfr_exp_t bx,cx;
  mpfr_uexp_t d;
  mpfr_prec_t p, sh, cnt;
  mp_size_t n;
  mp_limb_t *ap, *bp, *cp;
  mp_limb_t limb;
  int inexact;
  mp_limb_t bcp,bcp1; /* Cp and C'p+1 */
  mp_limb_t bbcp = (mp_limb_t) -1, bbcp1 = (mp_limb_t) -1; /* Cp+1 and C'p+2,
    gcc claims that they might be used uninitialized. We fill them with invalid
    values, which should produce a failure if so. See README.dev file. */

  MPFR_TMP_DECL(marker);

  MPFR_TMP_MARK(marker);

  MPFR_ASSERTD(MPFR_PREC(a) == MPFR_PREC(b) && MPFR_PREC(b) == MPFR_PREC(c));
  MPFR_ASSERTD(MPFR_IS_PURE_FP(b));
  MPFR_ASSERTD(MPFR_IS_PURE_FP(c));

  /* Read prec and num of limbs */
  p = MPFR_PREC (b);
  n = MPFR_PREC2LIMBS (p);

  /* Fast cmp of |b| and |c|*/
  bx = MPFR_GET_EXP (b);
  cx = MPFR_GET_EXP (c);
  if (MPFR_UNLIKELY(bx == cx))
    {
      mp_size_t k = n - 1;
      /* Check mantissa since exponent are equals */
      bp = MPFR_MANT(b);
      cp = MPFR_MANT(c);
      while (k>=0 && MPFR_UNLIKELY(bp[k] == cp[k]))
        k--;
      if (MPFR_UNLIKELY(k < 0))
        /* b == c ! */
        {
          /* Return exact number 0 */
          if (rnd_mode == MPFR_RNDD)
            MPFR_SET_NEG(a);
          else
            MPFR_SET_POS(a);
          MPFR_SET_ZERO(a);
          MPFR_RET(0);
        }
      else if (bp[k] > cp[k])
        goto BGreater;
      else
        {
          MPFR_ASSERTD(bp[k]<cp[k]);
          goto CGreater;
        }
    }
  else if (MPFR_UNLIKELY(bx < cx))
    {
      /* Swap b and c and set sign */
      mpfr_srcptr t;
      mpfr_exp_t tx;
    CGreater:
      MPFR_SET_OPPOSITE_SIGN(a,b);
      t  = b;  b  = c;  c  = t;
      tx = bx; bx = cx; cx = tx;
    }
  else
    {
      /* b > c */
    BGreater:
      MPFR_SET_SAME_SIGN(a,b);
    }

  /* Now b > c */
  MPFR_ASSERTD(bx >= cx);
  d = (mpfr_uexp_t) bx - cx;
  DEBUG (printf ("New with diff=%lu\n", (unsigned long) d));

  if (MPFR_UNLIKELY(d <= 1))
    {
      if (MPFR_LIKELY(d < 1))
        {
          /* <-- b -->
             <-- c --> : exact sub */
          ap = MPFR_MANT(a);
          mpn_sub_n (ap, MPFR_MANT(b), MPFR_MANT(c), n);
          /* Normalize */
        ExactNormalize:
          limb = ap[n-1];
          if (MPFR_LIKELY(limb))
            {
              /* First limb is not zero. */
              count_leading_zeros(cnt, limb);
              /* cnt could be == 0 <= SubD1Lose */
              if (MPFR_LIKELY(cnt))
                {
                  mpn_lshift(ap, ap, n, cnt); /* Normalize number */
                  bx -= cnt; /* Update final expo */
                }
              /* Last limb should be ok */
              MPFR_ASSERTD(!(ap[0] & MPFR_LIMB_MASK((unsigned int) (-p)
                                                    % GMP_NUMB_BITS)));
            }
          else
            {
              /* First limb is zero */
              mp_size_t k = n-1, len;
              /* Find the first limb not equal to zero.
                 FIXME:It is assume it exists (since |b| > |c| and same prec)*/
              do
                {
                  MPFR_ASSERTD( k > 0 );
                  limb = ap[--k];
                }
              while (limb == 0);
              MPFR_ASSERTD(limb != 0);
              count_leading_zeros(cnt, limb);
              k++;
              len = n - k; /* Number of last limb */
              MPFR_ASSERTD(k >= 0);
              if (MPFR_LIKELY(cnt))
                mpn_lshift(ap+len, ap, k, cnt); /* Normalize the High Limb*/
              else
                {
                  /* Must use DECR since src and dest may overlap & dest>=src*/
                  MPN_COPY_DECR(ap+len, ap, k);
                }
              MPN_ZERO(ap, len); /* Zeroing the last limbs */
              bx -= cnt + len*GMP_NUMB_BITS; /* Update Expo */
              /* Last limb should be ok */
              MPFR_ASSERTD(!(ap[len]&MPFR_LIMB_MASK((unsigned int) (-p)
                                                    % GMP_NUMB_BITS)));
            }
          /* Check expo underflow */
          if (MPFR_UNLIKELY(bx < __gmpfr_emin))
            {
              MPFR_TMP_FREE(marker);
              /* inexact=0 */
              DEBUG( printf("(D==0 Underflow)\n") );
              if (rnd_mode == MPFR_RNDN &&
                  (bx < __gmpfr_emin - 1 ||
                   (/*inexact >= 0 &&*/ mpfr_powerof2_raw (a))))
                rnd_mode = MPFR_RNDZ;
              return mpfr_underflow (a, rnd_mode, MPFR_SIGN(a));
            }
          MPFR_SET_EXP (a, bx);
          /* No rounding is necessary since the result is exact */
          MPFR_ASSERTD(ap[n-1] > ~ap[n-1]);
          MPFR_TMP_FREE(marker);
          return 0;
        }
      else /* if (d == 1) */
        {
          /* | <-- b -->
             |  <-- c --> */
          mp_limb_t c0, mask;
          mp_size_t k;
          MPFR_UNSIGNED_MINUS_MODULO(sh, p);
          /* If we lose at least one bit, compute 2*b-c (Exact)
           * else compute b-c/2 */
          bp = MPFR_MANT(b);
          cp = MPFR_MANT(c);
          k = n-1;
          limb = bp[k] - cp[k]/2;
          if (limb > MPFR_LIMB_HIGHBIT)
            {
              /* We can't lose precision: compute b-c/2 */
              /* Shift c in the allocated temporary block */
            SubD1NoLose:
              c0 = cp[0] & (MPFR_LIMB_ONE<<sh);
              cp = MPFR_TMP_LIMBS_ALLOC (n);
              mpn_rshift(cp, MPFR_MANT(c), n, 1);
              if (MPFR_LIKELY(c0 == 0))
                {
                  /* Result is exact: no need of rounding! */
                  ap = MPFR_MANT(a);
                  mpn_sub_n (ap, bp, cp, n);
                  MPFR_SET_EXP(a, bx); /* No expo overflow! */
                  /* No truncate or normalize is needed */
                  MPFR_ASSERTD(ap[n-1] > ~ap[n-1]);
                  /* No rounding is necessary since the result is exact */
                  MPFR_TMP_FREE(marker);
                  return 0;
                }
              ap = MPFR_MANT(a);
              mask = ~MPFR_LIMB_MASK(sh);
              cp[0] &= mask; /* Delete last bit of c */
              mpn_sub_n (ap, bp, cp, n);
              MPFR_SET_EXP(a, bx);                 /* No expo overflow! */
              MPFR_ASSERTD( !(ap[0] & ~mask) );    /* Check last bits */
              /* No normalize is needed */
              MPFR_ASSERTD(ap[n-1] > ~ap[n-1]);
              /* Rounding is necessary since c0 = 1*/
              /* Cp =-1 and C'p+1=0 */
              bcp = 1; bcp1 = 0;
              if (MPFR_LIKELY(rnd_mode == MPFR_RNDN))
                {
                  /* Even Rule apply: Check Ap-1 */
                  if (MPFR_LIKELY( (ap[0] & (MPFR_LIMB_ONE<<sh)) == 0) )
                    goto truncate;
                  else
                    goto sub_one_ulp;
                }
              MPFR_UPDATE_RND_MODE(rnd_mode, MPFR_IS_NEG(a));
              if (rnd_mode == MPFR_RNDZ)
                goto sub_one_ulp;
              else
                goto truncate;
            }
          else if (MPFR_LIKELY(limb < MPFR_LIMB_HIGHBIT))
            {
              /* We lose at least one bit of prec */
              /* Calcul of 2*b-c (Exact) */
              /* Shift b in the allocated temporary block */
            SubD1Lose:
              bp = MPFR_TMP_LIMBS_ALLOC (n);
              mpn_lshift (bp, MPFR_MANT(b), n, 1);
              ap = MPFR_MANT(a);
              mpn_sub_n (ap, bp, cp, n);
              bx--;
              goto ExactNormalize;
            }
          else
            {
              /* Case: limb = 100000000000 */
              /* Check while b[k] == c'[k] (C' is C shifted by 1) */
              /* If b[k]<c'[k] => We lose at least one bit*/
              /* If b[k]>c'[k] => We don't lose any bit */
              /* If k==-1 => We don't lose any bit
                 AND the result is 100000000000 0000000000 00000000000 */
              mp_limb_t carry;
              do {
                carry = cp[k]&MPFR_LIMB_ONE;
                k--;
              } while (k>=0 &&
                       bp[k]==(carry=cp[k]/2+(carry<<(GMP_NUMB_BITS-1))));
              if (MPFR_UNLIKELY(k<0))
                {
                  /*If carry then (sh==0 and Virtual c'[-1] > Virtual b[-1]) */
                  if (MPFR_UNLIKELY(carry)) /* carry = cp[0]&MPFR_LIMB_ONE */
                    {
                      /* FIXME: Can be faster? */
                      MPFR_ASSERTD(sh == 0);
                      goto SubD1Lose;
                    }
                  /* Result is a power of 2 */
                  ap = MPFR_MANT (a);
                  MPN_ZERO (ap, n);
                  ap[n-1] = MPFR_LIMB_HIGHBIT;
                  MPFR_SET_EXP (a, bx); /* No expo overflow! */
                  /* No Normalize is needed*/
                  /* No Rounding is needed */
                  MPFR_TMP_FREE (marker);
                  return 0;
                }
              /* carry = cp[k]/2+(cp[k-1]&1)<<(GMP_NUMB_BITS-1) = c'[k]*/
              else if (bp[k] > carry)
                goto SubD1NoLose;
              else
                {
                  MPFR_ASSERTD(bp[k]<carry);
                  goto SubD1Lose;
                }
            }
        }
    }
  else if (MPFR_UNLIKELY(d >= p))
    {
      ap = MPFR_MANT(a);
      MPFR_UNSIGNED_MINUS_MODULO(sh, p);
      /* We can't set A before since we use cp for rounding... */
      /* Perform rounding: check if a=b or a=b-ulp(b) */
      if (MPFR_UNLIKELY(d == p))
        {
          /* cp == -1 and c'p+1 = ? */
          bcp  = 1;
          /* We need Cp+1 later for a very improbable case. */
          bbcp = (MPFR_MANT(c)[n-1] & (MPFR_LIMB_ONE<<(GMP_NUMB_BITS-2)));
          /* We need also C'p+1 for an even more unprobable case... */
          if (MPFR_LIKELY( bbcp ))
            bcp1 = 1;
          else
            {
              cp = MPFR_MANT(c);
              if (MPFR_UNLIKELY(cp[n-1] == MPFR_LIMB_HIGHBIT))
                {
                  mp_size_t k = n-1;
                  do {
                    k--;
                  } while (k>=0 && cp[k]==0);
                  bcp1 = (k>=0);
                }
              else
                bcp1 = 1;
            }
          DEBUG( printf("(D=P) Cp=-1 Cp+1=%d C'p+1=%d \n", bbcp!=0, bcp1!=0) );
          bp = MPFR_MANT (b);

          /* Even if src and dest overlap, it is ok using MPN_COPY */
          if (MPFR_LIKELY(rnd_mode == MPFR_RNDN))
            {
              if (MPFR_UNLIKELY( bcp && bcp1==0 ))
                /* Cp=-1 and C'p+1=0: Even rule Apply! */
                /* Check Ap-1 = Bp-1 */
                if ((bp[0] & (MPFR_LIMB_ONE<<sh)) == 0)
                  {
                    MPN_COPY(ap, bp, n);
                    goto truncate;
                  }
              MPN_COPY(ap, bp, n);
              goto sub_one_ulp;
            }
          MPFR_UPDATE_RND_MODE(rnd_mode, MPFR_IS_NEG(a));
          if (rnd_mode == MPFR_RNDZ)
            {
              MPN_COPY(ap, bp, n);
              goto sub_one_ulp;
            }
          else
            {
              MPN_COPY(ap, bp, n);
              goto truncate;
            }
        }
      else
        {
          /* Cp=0, Cp+1=-1 if d==p+1, C'p+1=-1 */
          bcp = 0; bbcp = (d==p+1); bcp1 = 1;
          DEBUG( printf("(D>P) Cp=%d Cp+1=%d C'p+1=%d\n", bcp!=0,bbcp!=0,bcp1!=0) );
          /* Need to compute C'p+2 if d==p+1 and if rnd_mode=NEAREST
             (Because of a very improbable case) */
          if (MPFR_UNLIKELY(d==p+1 && rnd_mode==MPFR_RNDN))
            {
              cp = MPFR_MANT(c);
              if (MPFR_UNLIKELY(cp[n-1] == MPFR_LIMB_HIGHBIT))
                {
                  mp_size_t k = n-1;
                  do {
                    k--;
                  } while (k>=0 && cp[k]==0);
                  bbcp1 = (k>=0);
                }
              else
                bbcp1 = 1;
              DEBUG( printf("(D>P) C'p+2=%d\n", bbcp1!=0) );
            }
          /* Copy mantissa B in A */
          MPN_COPY(ap, MPFR_MANT(b), n);
          /* Round */
          if (MPFR_LIKELY(rnd_mode == MPFR_RNDN))
            goto truncate;
          MPFR_UPDATE_RND_MODE(rnd_mode, MPFR_IS_NEG(a));
          if (rnd_mode == MPFR_RNDZ)
            goto sub_one_ulp;
          else /* rnd_mode = AWAY */
            goto truncate;
        }
    }
  else
    {
      mpfr_uexp_t dm;
      mp_size_t m;
      mp_limb_t mask;

      /* General case: 2 <= d < p */
      MPFR_UNSIGNED_MINUS_MODULO(sh, p);
      cp = MPFR_TMP_LIMBS_ALLOC (n);

      /* Shift c in temporary allocated place */
      dm = d % GMP_NUMB_BITS;
      m = d / GMP_NUMB_BITS;
      if (MPFR_UNLIKELY(dm == 0))
        {
          /* dm = 0 and m > 0: Just copy */
          MPFR_ASSERTD(m!=0);
          MPN_COPY(cp, MPFR_MANT(c)+m, n-m);
          MPN_ZERO(cp+n-m, m);
        }
      else if (MPFR_LIKELY(m == 0))
        {
          /* dm >=2 and m == 0: just shift */
          MPFR_ASSERTD(dm >= 2);
          mpn_rshift(cp, MPFR_MANT(c), n, dm);
        }
      else
        {
          /* dm > 0 and m > 0: shift and zero  */
          mpn_rshift(cp, MPFR_MANT(c)+m, n-m, dm);
          MPN_ZERO(cp+n-m, m);
        }

      DEBUG( mpfr_print_mant_binary("Before", MPFR_MANT(c), p) );
      DEBUG( mpfr_print_mant_binary("B=    ", MPFR_MANT(b), p) );
      DEBUG( mpfr_print_mant_binary("After ", cp, p) );

      /* Compute bcp=Cp and bcp1=C'p+1 */
      if (MPFR_LIKELY(sh))
        {
          /* Try to compute them from C' rather than C (FIXME: Faster?) */
          bcp = (cp[0] & (MPFR_LIMB_ONE<<(sh-1))) ;
          if (MPFR_LIKELY( cp[0] & MPFR_LIMB_MASK(sh-1) ))
            bcp1 = 1;
          else
            {
              /* We can't compute C'p+1 from C'. Compute it from C */
              /* Start from bit x=p-d+sh in mantissa C
                 (+sh since we have already looked sh bits in C'!) */
              mpfr_prec_t x = p-d+sh-1;
              if (MPFR_LIKELY(x>p))
                /* We are already looked at all the bits of c, so C'p+1 = 0*/
                bcp1 = 0;
              else
                {
                  mp_limb_t *tp = MPFR_MANT(c);
                  mp_size_t kx = n-1 - (x / GMP_NUMB_BITS);
                  mpfr_prec_t sx = GMP_NUMB_BITS-1-(x%GMP_NUMB_BITS);
                  DEBUG (printf ("(First) x=%lu Kx=%ld Sx=%lu\n",
                                 (unsigned long) x, (long) kx,
                                 (unsigned long) sx));
                  /* Looks at the last bits of limb kx (if sx=0 does nothing)*/
                  if (tp[kx] & MPFR_LIMB_MASK(sx))
                    bcp1 = 1;
                  else
                    {
                      /*kx += (sx==0);*/
                      /*If sx==0, tp[kx] hasn't been checked*/
                      do {
                        kx--;
                      } while (kx>=0 && tp[kx]==0);
                      bcp1 = (kx >= 0);
                    }
                }
            }
        }
      else
        {
          /* Compute Cp and C'p+1 from C with sh=0 */
          mp_limb_t *tp = MPFR_MANT(c);
          /* Start from bit x=p-d in mantissa C */
          mpfr_prec_t  x = p-d;
          mp_size_t   kx = n-1 - (x / GMP_NUMB_BITS);
          mpfr_prec_t sx = GMP_NUMB_BITS-1-(x%GMP_NUMB_BITS);
          MPFR_ASSERTD(p >= d);
          bcp = (tp[kx] & (MPFR_LIMB_ONE<<sx));
          /* Looks at the last bits of limb kx (If sx=0, does nothing)*/
          if (tp[kx] & MPFR_LIMB_MASK(sx))
            bcp1 = 1;
          else
            {
              /*kx += (sx==0);*/ /*If sx==0, tp[kx] hasn't been checked*/
              do {
                kx--;
              } while (kx>=0 && tp[kx]==0);
              bcp1 = (kx>=0);
            }
        }
      DEBUG( printf("sh=%lu Cp=%d C'p+1=%d\n", sh, bcp!=0, bcp1!=0) );

      /* Check if we can lose a bit, and if so compute Cp+1 and C'p+2 */
      bp = MPFR_MANT(b);
      if (MPFR_UNLIKELY((bp[n-1]-cp[n-1]) <= MPFR_LIMB_HIGHBIT))
        {
          /* We can lose a bit so we precompute Cp+1 and C'p+2 */
          /* Test for trivial case: since C'p+1=0, Cp+1=0 and C'p+2 =0 */
          if (MPFR_LIKELY(bcp1 == 0))
            {
              bbcp = 0;
              bbcp1 = 0;
            }
          else /* bcp1 != 0 */
            {
              /* We can lose a bit:
                 compute Cp+1 and C'p+2 from mantissa C */
              mp_limb_t *tp = MPFR_MANT(c);
              /* Start from bit x=(p+1)-d in mantissa C */
              mpfr_prec_t x  = p+1-d;
              mp_size_t kx = n-1 - (x/GMP_NUMB_BITS);
              mpfr_prec_t sx = GMP_NUMB_BITS-1-(x%GMP_NUMB_BITS);
              MPFR_ASSERTD(p > d);
              DEBUG (printf ("(pre) x=%lu Kx=%ld Sx=%lu\n",
                             (unsigned long) x, (long) kx,
                             (unsigned long) sx));
              bbcp = (tp[kx] & (MPFR_LIMB_ONE<<sx)) ;
              /* Looks at the last bits of limb kx (If sx=0, does nothing)*/
              /* If Cp+1=0, since C'p+1!=0, C'p+2=1 ! */
              if (MPFR_LIKELY(bbcp==0 || (tp[kx]&MPFR_LIMB_MASK(sx))))
                bbcp1 = 1;
              else
                {
                  /*kx += (sx==0);*/ /*If sx==0, tp[kx] hasn't been checked*/
                  do {
                    kx--;
                  } while (kx>=0 && tp[kx]==0);
                  bbcp1 = (kx>=0);
                  DEBUG (printf ("(Pre) Scan done for %ld\n", (long) kx));
                }
            } /*End of Bcp1 != 0*/
          DEBUG( printf("(Pre) Cp+1=%d C'p+2=%d\n", bbcp!=0, bbcp1!=0) );
        } /* End of "can lose a bit" */

      /* Clean shifted C' */
      mask = ~MPFR_LIMB_MASK (sh);
      cp[0] &= mask;

      /* Subtract the mantissa c from b in a */
      ap = MPFR_MANT(a);
      mpn_sub_n (ap, bp, cp, n);
      DEBUG( mpfr_print_mant_binary("Sub=  ", ap, p) );

     /* Normalize: we lose at max one bit*/
      if (MPFR_UNLIKELY(MPFR_LIMB_MSB(ap[n-1]) == 0))
        {
          /* High bit is not set and we have to fix it! */
          /* Ap >= 010000xxx001 */
          mpn_lshift(ap, ap, n, 1);
          /* Ap >= 100000xxx010 */
          if (MPFR_UNLIKELY(bcp!=0)) /* Check if Cp = -1 */
            /* Since Cp == -1, we have to substract one more */
            {
              mpn_sub_1(ap, ap, n, MPFR_LIMB_ONE<<sh);
              MPFR_ASSERTD(MPFR_LIMB_MSB(ap[n-1]) != 0);
            }
          /* Ap >= 10000xxx001 */
          /* Final exponent -1 since we have shifted the mantissa */
          bx--;
          /* Update bcp and bcp1 */
          MPFR_ASSERTN(bbcp != (mp_limb_t) -1);
          MPFR_ASSERTN(bbcp1 != (mp_limb_t) -1);
          bcp  = bbcp;
          bcp1 = bbcp1;
          /* We dont't have anymore a valid Cp+1!
             But since Ap >= 100000xxx001, the final sub can't unnormalize!*/
        }
      MPFR_ASSERTD( !(ap[0] & ~mask) );

      /* Rounding */
      if (MPFR_LIKELY(rnd_mode == MPFR_RNDN))
        {
          if (MPFR_LIKELY(bcp==0))
            goto truncate;
          else if ((bcp1) || ((ap[0] & (MPFR_LIMB_ONE<<sh)) != 0))
            goto sub_one_ulp;
          else
            goto truncate;
        }

      /* Update rounding mode */
      MPFR_UPDATE_RND_MODE(rnd_mode, MPFR_IS_NEG(a));
      if (rnd_mode == MPFR_RNDZ && (MPFR_LIKELY(bcp || bcp1)))
        goto sub_one_ulp;
      goto truncate;
    }
  MPFR_RET_NEVER_GO_HERE ();

  /* Sub one ulp to the result */
 sub_one_ulp:
  mpn_sub_1 (ap, ap, n, MPFR_LIMB_ONE << sh);
  /* Result should be smaller than exact value: inexact=-1 */
  inexact = -1;
  /* Check normalisation */
  if (MPFR_UNLIKELY(MPFR_LIMB_MSB(ap[n-1]) == 0))
    {
      /* ap was a power of 2, and we lose a bit */
      /* Now it is 0111111111111111111[00000 */
      mpn_lshift(ap, ap, n, 1);
      bx--;
      /* And the lost bit x depends on Cp+1, and Cp */
      /* Compute Cp+1 if it isn't already compute (ie d==1) */
      /* FIXME: Is this case possible? */
      if (MPFR_UNLIKELY(d == 1))
        bbcp = 0;
      DEBUG( printf("(SubOneUlp)Cp=%d, Cp+1=%d C'p+1=%d\n", bcp!=0,bbcp!=0,bcp1!=0));
      /* Compute the last bit (Since we have shifted the mantissa)
         we need one more bit!*/
      MPFR_ASSERTN(bbcp != (mp_limb_t) -1);
      if ( (rnd_mode == MPFR_RNDZ && bcp==0)
           || (rnd_mode==MPFR_RNDN && bbcp==0)
           || (bcp && bcp1==0) ) /*Exact result*/
        {
          ap[0] |= MPFR_LIMB_ONE<<sh;
          if (rnd_mode == MPFR_RNDN)
            inexact = 1;
          DEBUG( printf("(SubOneUlp) Last bit set\n") );
        }
      /* Result could be exact if C'p+1 = 0 and rnd == Zero
         since we have had one more bit to the result */
      /* Fixme: rnd_mode == MPFR_RNDZ needed ? */
      if (bcp1==0 && rnd_mode==MPFR_RNDZ)
        {
          DEBUG( printf("(SubOneUlp) Exact result\n") );
          inexact = 0;
        }
    }

  goto end_of_sub;

 truncate:
  /* Check if the result is an exact power of 2: 100000000000
     in which cases, we could have to do sub_one_ulp due to some nasty reasons:
     If Result is a Power of 2:
      + If rnd = AWAY,
      |  If Cp=-1 and C'p+1 = 0, SubOneUlp and the result is EXACT.
         If Cp=-1 and C'p+1 =-1, SubOneUlp and the result is above.
         Otherwise truncate
      + If rnd = NEAREST,
         If Cp= 0 and Cp+1  =-1 and C'p+2=-1, SubOneUlp and the result is above
         If cp=-1 and C'p+1 = 0, SubOneUlp and the result is exact.
         Otherwise truncate.
      X bit should always be set if SubOneUlp*/
  if (MPFR_UNLIKELY(ap[n-1] == MPFR_LIMB_HIGHBIT))
    {
      mp_size_t k = n-1;
      do {
        k--;
      } while (k>=0 && ap[k]==0);
      if (MPFR_UNLIKELY(k<0))
        {
          /* It is a power of 2! */
          /* Compute Cp+1 if it isn't already compute (ie d==1) */
          /* FIXME: Is this case possible? */
          if (d == 1)
            bbcp=0;
          DEBUG( printf("(Truncate) Cp=%d, Cp+1=%d C'p+1=%d C'p+2=%d\n", \
                 bcp!=0, bbcp!=0, bcp1!=0, bbcp1!=0) );
          MPFR_ASSERTN(bbcp != (mp_limb_t) -1);
          MPFR_ASSERTN((rnd_mode != MPFR_RNDN) || (bcp != 0) || (bbcp == 0) || (bbcp1 != (mp_limb_t) -1));
          if (((rnd_mode != MPFR_RNDZ) && bcp)
              ||
              ((rnd_mode == MPFR_RNDN) && (bcp == 0) && (bbcp) && (bbcp1)))
            {
              DEBUG( printf("(Truncate) Do sub\n") );
              mpn_sub_1 (ap, ap, n, MPFR_LIMB_ONE << sh);
              mpn_lshift(ap, ap, n, 1);
              ap[0] |= MPFR_LIMB_ONE<<sh;
              bx--;
              /* FIXME: Explain why it works (or why not)... */
              inexact = (bcp1 == 0) ? 0 : (rnd_mode==MPFR_RNDN) ? -1 : 1;
              goto end_of_sub;
            }
        }
    }

  /* Calcul of Inexact flag.*/
  inexact = MPFR_LIKELY(bcp || bcp1) ? 1 : 0;

 end_of_sub:
  /* Update Expo */
  /* FIXME: Is this test really useful?
      If d==0      : Exact case. This is never called.
      if 1 < d < p : bx=MPFR_EXP(b) or MPFR_EXP(b)-1 > MPFR_EXP(c) > emin
      if d == 1    : bx=MPFR_EXP(b). If we could lose any bits, the exact
                     normalisation is called.
      if d >=  p   : bx=MPFR_EXP(b) >= MPFR_EXP(c) + p > emin
     After SubOneUlp, we could have one bit less.
      if 1 < d < p : bx >= MPFR_EXP(b)-2 >= MPFR_EXP(c) > emin
      if d == 1    : bx >= MPFR_EXP(b)-1 = MPFR_EXP(c) > emin.
      if d >=  p   : bx >= MPFR_EXP(b)-1 > emin since p>=2.
  */
  MPFR_ASSERTD( bx >= __gmpfr_emin);
  /*
    if (MPFR_UNLIKELY(bx < __gmpfr_emin))
    {
      DEBUG( printf("(Final Underflow)\n") );
      if (rnd_mode == MPFR_RNDN &&
          (bx < __gmpfr_emin - 1 ||
           (inexact >= 0 && mpfr_powerof2_raw (a))))
        rnd_mode = MPFR_RNDZ;
      MPFR_TMP_FREE(marker);
      return mpfr_underflow (a, rnd_mode, MPFR_SIGN(a));
    }
  */
  MPFR_SET_EXP (a, bx);

  MPFR_TMP_FREE(marker);
  MPFR_RET (inexact * MPFR_INT_SIGN (a));
}
Exemplo n.º 30
0
int
mpfr_set_uj_2exp (mpfr_t x, uintmax_t j, intmax_t e, mp_rnd_t rnd)
{
  unsigned int cnt, i;
  mp_size_t k, len;
  mp_limb_t limb;
  mp_limb_t yp[sizeof(uintmax_t) / sizeof(mp_limb_t)];
  mpfr_t y;
  unsigned long uintmax_bit_size = sizeof(uintmax_t) * CHAR_BIT;
  unsigned long bpml = BITS_PER_MP_LIMB % uintmax_bit_size;

  /* Special case */
  if (j == 0)
    {
      MPFR_SET_POS(x);
      MPFR_SET_ZERO(x);
      MPFR_RET(0);
    }

  MPFR_ASSERTN (sizeof(uintmax_t) % sizeof(mp_limb_t) == 0);

  /* Create an auxillary var */
  MPFR_TMP_INIT1 (yp, y, uintmax_bit_size);
  k = numberof (yp);
  if (k == 1)
    limb = yp[0] = j;
  else
    {
      /* Note: either BITS_PER_MP_LIMB = uintmax_bit_size, then k = 1 the
         shift j >>= bpml is never done, or BITS_PER_MP_LIMB < uintmax_bit_size
         and bpml = BITS_PER_MP_LIMB. */
      for (i = 0; i < k; i++, j >>= bpml)
        yp[i] = j; /* Only the low bits are copied */

      /* Find the first limb not equal to zero. */
      do
        {
          MPFR_ASSERTD (k > 0);
          limb = yp[--k];
        }
      while (limb == 0);
      k++;
    }
  count_leading_zeros(cnt, limb);
  len = numberof (yp) - k;

  /* Normalize it: len = number of last 0 limb, k number of non-zero limbs */
  if (MPFR_LIKELY(cnt))
    mpn_lshift (yp+len, yp, k, cnt);  /* Normalize the High Limb*/
  else if (len != 0)
    MPN_COPY_DECR (yp+len, yp, k);    /* Must use DECR */
  if (len != 0)
    /* Note: when numberof(yp)==1, len is constant and null, so the compiler
       can optimize out this code. */
    {
      if (len == 1)
        yp[0] = (mp_limb_t) 0;
      else
        MPN_ZERO (yp, len);   /* Zeroing the last limbs */
    }
  e += k * BITS_PER_MP_LIMB - cnt;    /* Update Expo */
  MPFR_ASSERTD (MPFR_LIMB_MSB(yp[numberof (yp) - 1]) != 0);

  /* Check expo underflow / overflow (can't use mpfr_check_range) */
  if (MPFR_UNLIKELY(e < __gmpfr_emin))
    {
      /* The following test is necessary because in the rounding to the
       * nearest mode, mpfr_underflow always rounds away from 0. In
       * this rounding mode, we need to round to 0 if:
       *   _ |x| < 2^(emin-2), or
       *   _ |x| = 2^(emin-2) and the absolute value of the exact
       *     result is <= 2^(emin-2). */
      if (rnd == GMP_RNDN && (e+1 < __gmpfr_emin || mpfr_powerof2_raw(y)))
        rnd = GMP_RNDZ;
      return mpfr_underflow (x, rnd, MPFR_SIGN_POS);
    }
  if (MPFR_UNLIKELY(e > __gmpfr_emax))
    return mpfr_overflow (x, rnd, MPFR_SIGN_POS);
  MPFR_SET_EXP (y, e);

  /* Final: set x to y (rounding if necessary) */
  return mpfr_set (x, y, rnd);
}