Beispiel #1
0
static int
sum_tab (mpfr_ptr ret, mpfr_t *tab, unsigned long n, mpfr_rnd_t rnd)
{
  mpfr_ptr *tabtmp;
  unsigned long i;
  int inexact;
  MPFR_TMP_DECL(marker);

  MPFR_TMP_MARK(marker);
  tabtmp = (mpfr_ptr *) MPFR_TMP_ALLOC(n * sizeof(mpfr_srcptr));
  for (i = 0; i < n; i++)
    tabtmp[i] = tab[i];

  inexact = mpfr_sum (ret, tabtmp, n, rnd);
  MPFR_TMP_FREE(marker);
  return inexact;
}
Beispiel #2
0
/* Don't need to save / restore exponent range: the cache does it */
int
mpfr_const_log2_internal (mpfr_ptr x, mpfr_rnd_t rnd_mode)
{
  unsigned long n = MPFR_PREC (x);
  mpfr_prec_t w; /* working precision */
  unsigned long N;
  mpz_t *T, *P, *Q;
  mpfr_t t, q;
  int inexact;
  int ok = 1; /* ensures that the 1st try will give correct rounding */
  unsigned long lgN, i;
  MPFR_GROUP_DECL(group);
  MPFR_TMP_DECL(marker);
  MPFR_ZIV_DECL(loop);

  MPFR_LOG_FUNC (
    ("rnd_mode=%d", rnd_mode),
    ("x[%Pu]=%.*Rg inex=%d", mpfr_get_prec(x), mpfr_log_prec, x, inexact));

  if (n < 1253)
    w = n + 10; /* ensures correct rounding for the four rounding modes,
                   together with N = w / 3 + 1 (see below). */
  else if (n < 2571)
    w = n + 11; /* idem */
  else if (n < 3983)
    w = n + 12;
  else if (n < 4854)
    w = n + 13;
  else if (n < 26248)
    w = n + 14;
  else
    {
      w = n + 15;
      ok = 0;
    }

  MPFR_TMP_MARK(marker);
  MPFR_GROUP_INIT_2(group, w, t, q);

  MPFR_ZIV_INIT (loop, w);
  for (;;)
    {
      N = w / 3 + 1; /* Warning: do not change that (even increasing N!)
                        without checking correct rounding in the above
                        ranges for n. */

      /* the following are needed for error analysis (see algorithms.tex) */
      MPFR_ASSERTD(w >= 3 && N >= 2);

      lgN = MPFR_INT_CEIL_LOG2 (N) + 1;
      T  = (mpz_t *) MPFR_TMP_ALLOC (3 * lgN * sizeof (mpz_t));
      P  = T + lgN;
      Q  = T + 2*lgN;
      for (i = 0; i < lgN; i++)
        {
          mpz_init (T[i]);
          mpz_init (P[i]);
          mpz_init (Q[i]);
        }

      S (T, P, Q, 0, N, 0);

      mpfr_set_z (t, T[0], MPFR_RNDN);
      mpfr_set_z (q, Q[0], MPFR_RNDN);
      mpfr_div (t, t, q, MPFR_RNDN);

      for (i = 0; i < lgN; i++)
        {
          mpz_clear (T[i]);
          mpz_clear (P[i]);
          mpz_clear (Q[i]);
        }

      if (MPFR_LIKELY (ok != 0
                       || mpfr_can_round (t, w - 2, MPFR_RNDN, rnd_mode, n)))
        break;

      MPFR_ZIV_NEXT (loop, w);
      MPFR_GROUP_REPREC_2(group, w, t, q);
    }
  MPFR_ZIV_FREE (loop);

  inexact = mpfr_set (x, t, rnd_mode);

  MPFR_GROUP_CLEAR(group);
  MPFR_TMP_FREE(marker);

  return inexact;
}
Beispiel #3
0
int
mpfr_sqr (mpfr_ptr a, mpfr_srcptr b, mpfr_rnd_t rnd_mode)
{
  int cc, inexact;
  mpfr_exp_t ax;
  mp_limb_t *tmp;
  mp_limb_t b1;
  mpfr_prec_t bq;
  mp_size_t bn, tn;
  MPFR_TMP_DECL(marker);

  MPFR_LOG_FUNC (("x[%#R]=%R rnd=%d", b, b, rnd_mode),
                 ("y[%#R]=%R inexact=%d", a, a, inexact));

  /* deal with special cases */
  if (MPFR_UNLIKELY(MPFR_IS_SINGULAR(b)))
    {
      if (MPFR_IS_NAN(b))
        {
          MPFR_SET_NAN(a);
          MPFR_RET_NAN;
        }
      MPFR_SET_POS (a);
      if (MPFR_IS_INF(b))
        MPFR_SET_INF(a);
      else
        ( MPFR_ASSERTD(MPFR_IS_ZERO(b)), MPFR_SET_ZERO(a) );
      MPFR_RET(0);
    }
  ax = 2 * MPFR_GET_EXP (b);
  bq = MPFR_PREC(b);

  MPFR_ASSERTD (2 * bq > bq); /* PREC_MAX is /2 so no integer overflow */

  bn = MPFR_LIMB_SIZE(b); /* number of limbs of b */
  tn = 1 + (2 * bq - 1) / GMP_NUMB_BITS; /* number of limbs of square,
                                               2*bn or 2*bn-1 */

  MPFR_TMP_MARK(marker);
  tmp = (mp_limb_t *) MPFR_TMP_ALLOC((size_t) 2 * bn * BYTES_PER_MP_LIMB);

  /* Multiplies the mantissa in temporary allocated space */
  mpn_sqr_n (tmp, MPFR_MANT(b), bn);
  b1 = tmp[2 * bn - 1];

  /* now tmp[0]..tmp[2*bn-1] contains the product of both mantissa,
     with tmp[2*bn-1]>=2^(GMP_NUMB_BITS-2) */
  b1 >>= GMP_NUMB_BITS - 1; /* msb from the product */

  /* if the mantissas of b and c are uniformly distributed in ]1/2, 1],
     then their product is in ]1/4, 1/2] with probability 2*ln(2)-1 ~ 0.386
     and in [1/2, 1] with probability 2-2*ln(2) ~ 0.614 */
  tmp += 2 * bn - tn; /* +0 or +1 */
  if (MPFR_UNLIKELY(b1 == 0))
    mpn_lshift (tmp, tmp, tn, 1); /* tn <= k, so no stack corruption */

  cc = mpfr_round_raw (MPFR_MANT (a), tmp, 2 * bq, 0,
                       MPFR_PREC (a), rnd_mode, &inexact);
  /* cc = 1 ==> result is a power of two */
  if (MPFR_UNLIKELY(cc))
    MPFR_MANT(a)[MPFR_LIMB_SIZE(a)-1] = MPFR_LIMB_HIGHBIT;

  MPFR_TMP_FREE(marker);
  {
    mpfr_exp_t ax2 = ax + (mpfr_exp_t) (b1 - 1 + cc);
    if (MPFR_UNLIKELY( ax2 > __gmpfr_emax))
      return mpfr_overflow (a, rnd_mode, MPFR_SIGN_POS);
    if (MPFR_UNLIKELY( ax2 < __gmpfr_emin))
      {
        /* In the rounding to the nearest mode, if the exponent of the exact
           result (i.e. before rounding, i.e. without taking cc into account)
           is < __gmpfr_emin - 1 or the exact result is a power of 2 (i.e. if
           both arguments are powers of 2), then round to zero. */
        if (rnd_mode == MPFR_RNDN &&
            (ax + (mpfr_exp_t) b1 < __gmpfr_emin || mpfr_powerof2_raw (b)))
          rnd_mode = MPFR_RNDZ;
        return mpfr_underflow (a, rnd_mode, MPFR_SIGN_POS);
      }
    MPFR_SET_EXP (a, ax2);
    MPFR_SET_POS (a);
  }
  MPFR_RET (inexact);
}
Beispiel #4
0
int
mpfr_mul_ui (mpfr_ptr y, mpfr_srcptr x, unsigned long int u, mp_rnd_t rnd_mode)
{
  mp_limb_t *yp;
  mp_size_t xn, yn;
  int cnt, inexact;
  MPFR_TMP_DECL (marker);

  if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (x)))
    {
      if (MPFR_IS_NAN (x))
        {
          MPFR_SET_NAN (y);
          MPFR_RET_NAN;
        }
      else if (MPFR_IS_INF (x))
        {
          if (u != 0)
            {
              MPFR_SET_INF (y);
              MPFR_SET_SAME_SIGN (y, x);
              MPFR_RET (0); /* infinity is exact */
            }
          else /* 0 * infinity */
            {
              MPFR_SET_NAN (y);
              MPFR_RET_NAN;
            }
        }
      else /* x is zero */
        {
          MPFR_ASSERTD (MPFR_IS_ZERO (x));
          MPFR_SET_ZERO (y);
          MPFR_SET_SAME_SIGN (y, x);
          MPFR_RET (0); /* zero is exact */
        }
    }
  else if (MPFR_UNLIKELY (u <= 1))
    {
      if (u < 1)
        {
          MPFR_SET_ZERO (y);
          MPFR_SET_SAME_SIGN (y, x);
          MPFR_RET (0); /* zero is exact */
        }
      else
        return mpfr_set (y, x, rnd_mode);
    }
  else if (MPFR_UNLIKELY (IS_POW2 (u)))
    return mpfr_mul_2si (y, x, MPFR_INT_CEIL_LOG2 (u), rnd_mode);

  yp = MPFR_MANT (y);
  yn = MPFR_LIMB_SIZE (y);
  xn = MPFR_LIMB_SIZE (x);

  MPFR_ASSERTD (xn < MP_SIZE_T_MAX);
  MPFR_TMP_MARK(marker);
  yp = (mp_ptr) MPFR_TMP_ALLOC ((size_t) (xn + 1) * BYTES_PER_MP_LIMB);

  MPFR_ASSERTN (u == (mp_limb_t) u);
  yp[xn] = mpn_mul_1 (yp, MPFR_MANT (x), xn, u);

  /* x * u is stored in yp[xn], ..., yp[0] */

  /* since the case u=1 was treated above, we have u >= 2, thus
     yp[xn] >= 1 since x was msb-normalized */
  MPFR_ASSERTD (yp[xn] != 0);
  if (MPFR_LIKELY (MPFR_LIMB_MSB (yp[xn]) == 0))
    {
      count_leading_zeros (cnt, yp[xn]);
      mpn_lshift (yp, yp, xn + 1, cnt);
    }
  else
    {
      cnt = 0;
    }

  /* now yp[xn], ..., yp[0] is msb-normalized too, and has at most
     PREC(x) + (BITS_PER_MP_LIMB - cnt) non-zero bits */
  MPFR_RNDRAW (inexact, y, yp, (mp_prec_t) (xn + 1) * BITS_PER_MP_LIMB,
               rnd_mode, MPFR_SIGN (x), cnt -- );

  MPFR_TMP_FREE (marker);

  cnt = BITS_PER_MP_LIMB - cnt;
  if (MPFR_UNLIKELY (__gmpfr_emax < MPFR_EMAX_MIN + cnt
                     || MPFR_GET_EXP (x) > __gmpfr_emax - cnt))
    return mpfr_overflow (y, rnd_mode, MPFR_SIGN(x));

  MPFR_SET_EXP (y, MPFR_GET_EXP (x) + cnt);
  MPFR_SET_SAME_SIGN (y, x);

  return inexact;
}
Beispiel #5
0
Datei: mul.c Projekt: gnooth/xcl
static int
mpfr_mul3 (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode)
{
    /* Old implementation */
    int sign_product, cc, inexact;
    mpfr_exp_t ax;
    mp_limb_t *tmp;
    mp_limb_t b1;
    mpfr_prec_t bq, cq;
    mp_size_t bn, cn, tn, k;
    MPFR_TMP_DECL(marker);

    /* deal with special cases */
    if (MPFR_ARE_SINGULAR(b,c))
    {
        if (MPFR_IS_NAN(b) || MPFR_IS_NAN(c))
        {
            MPFR_SET_NAN(a);
            MPFR_RET_NAN;
        }
        sign_product = MPFR_MULT_SIGN( MPFR_SIGN(b) , MPFR_SIGN(c) );
        if (MPFR_IS_INF(b))
        {
            if (MPFR_IS_INF(c) || MPFR_NOTZERO(c))
            {
                MPFR_SET_SIGN(a,sign_product);
                MPFR_SET_INF(a);
                MPFR_RET(0); /* exact */
            }
            else
            {
                MPFR_SET_NAN(a);
                MPFR_RET_NAN;
            }
        }
        else if (MPFR_IS_INF(c))
        {
            if (MPFR_NOTZERO(b))
            {
                MPFR_SET_SIGN(a, sign_product);
                MPFR_SET_INF(a);
                MPFR_RET(0); /* exact */
            }
            else
            {
                MPFR_SET_NAN(a);
                MPFR_RET_NAN;
            }
        }
        else
        {
            MPFR_ASSERTD(MPFR_IS_ZERO(b) || MPFR_IS_ZERO(c));
            MPFR_SET_SIGN(a, sign_product);
            MPFR_SET_ZERO(a);
            MPFR_RET(0); /* 0 * 0 is exact */
        }
    }
    sign_product = MPFR_MULT_SIGN( MPFR_SIGN(b) , MPFR_SIGN(c) );

    ax = MPFR_GET_EXP (b) + MPFR_GET_EXP (c);

    bq = MPFR_PREC(b);
    cq = MPFR_PREC(c);

    MPFR_ASSERTD(bq+cq > bq); /* PREC_MAX is /2 so no integer overflow */

    bn = (bq+GMP_NUMB_BITS-1)/GMP_NUMB_BITS; /* number of limbs of b */
    cn = (cq+GMP_NUMB_BITS-1)/GMP_NUMB_BITS; /* number of limbs of c */
    k = bn + cn; /* effective nb of limbs used by b*c (= tn or tn+1) below */
    tn = (bq + cq + GMP_NUMB_BITS - 1) / GMP_NUMB_BITS;
    /* <= k, thus no int overflow */
    MPFR_ASSERTD(tn <= k);

    /* Check for no size_t overflow*/
    MPFR_ASSERTD((size_t) k <= ((size_t) -1) / BYTES_PER_MP_LIMB);
    MPFR_TMP_MARK(marker);
    tmp = (mp_limb_t *) MPFR_TMP_ALLOC((size_t) k * BYTES_PER_MP_LIMB);

    /* multiplies two mantissa in temporary allocated space */
    b1 = (MPFR_LIKELY(bn >= cn)) ?
         mpn_mul (tmp, MPFR_MANT(b), bn, MPFR_MANT(c), cn)
         : mpn_mul (tmp, MPFR_MANT(c), cn, MPFR_MANT(b), bn);

    /* now tmp[0]..tmp[k-1] contains the product of both mantissa,
       with tmp[k-1]>=2^(GMP_NUMB_BITS-2) */
    b1 >>= GMP_NUMB_BITS - 1; /* msb from the product */

    /* if the mantissas of b and c are uniformly distributed in ]1/2, 1],
       then their product is in ]1/4, 1/2] with probability 2*ln(2)-1 ~ 0.386
       and in [1/2, 1] with probability 2-2*ln(2) ~ 0.614 */
    tmp += k - tn;
    if (MPFR_UNLIKELY(b1 == 0))
        mpn_lshift (tmp, tmp, tn, 1); /* tn <= k, so no stack corruption */
    cc = mpfr_round_raw (MPFR_MANT (a), tmp, bq + cq,
                         MPFR_IS_NEG_SIGN(sign_product),
                         MPFR_PREC (a), rnd_mode, &inexact);

    /* cc = 1 ==> result is a power of two */
    if (MPFR_UNLIKELY(cc))
        MPFR_MANT(a)[MPFR_LIMB_SIZE(a)-1] = MPFR_LIMB_HIGHBIT;

    MPFR_TMP_FREE(marker);

    {
        mpfr_exp_t ax2 = ax + (mpfr_exp_t) (b1 - 1 + cc);
        if (MPFR_UNLIKELY( ax2 > __gmpfr_emax))
            return mpfr_overflow (a, rnd_mode, sign_product);
        if (MPFR_UNLIKELY( ax2 < __gmpfr_emin))
        {
            /* In the rounding to the nearest mode, if the exponent of the exact
               result (i.e. before rounding, i.e. without taking cc into account)
               is < __gmpfr_emin - 1 or the exact result is a power of 2 (i.e. if
               both arguments are powers of 2), then round to zero. */
            if (rnd_mode == MPFR_RNDN &&
                    (ax + (mpfr_exp_t) b1 < __gmpfr_emin ||
                     (mpfr_powerof2_raw (b) && mpfr_powerof2_raw (c))))
                rnd_mode = MPFR_RNDZ;
            return mpfr_underflow (a, rnd_mode, sign_product);
        }
        MPFR_SET_EXP (a, ax2);
        MPFR_SET_SIGN(a, sign_product);
    }
    MPFR_RET (inexact);
}
Beispiel #6
0
Datei: mul.c Projekt: gnooth/xcl
int
mpfr_mul (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode)
{
    int sign, inexact;
    mpfr_exp_t ax, ax2;
    mp_limb_t *tmp;
    mp_limb_t b1;
    mpfr_prec_t bq, cq;
    mp_size_t bn, cn, tn, k;
    MPFR_TMP_DECL (marker);

    MPFR_LOG_FUNC (("b[%#R]=%R c[%#R]=%R rnd=%d", b, b, c, c, rnd_mode),
                   ("a[%#R]=%R inexact=%d", a, a, inexact));

    /* deal with special cases */
    if (MPFR_ARE_SINGULAR (b, c))
    {
        if (MPFR_IS_NAN (b) || MPFR_IS_NAN (c))
        {
            MPFR_SET_NAN (a);
            MPFR_RET_NAN;
        }
        sign = MPFR_MULT_SIGN (MPFR_SIGN (b), MPFR_SIGN (c));
        if (MPFR_IS_INF (b))
        {
            if (!MPFR_IS_ZERO (c))
            {
                MPFR_SET_SIGN (a, sign);
                MPFR_SET_INF (a);
                MPFR_RET (0);
            }
            else
            {
                MPFR_SET_NAN (a);
                MPFR_RET_NAN;
            }
        }
        else if (MPFR_IS_INF (c))
        {
            if (!MPFR_IS_ZERO (b))
            {
                MPFR_SET_SIGN (a, sign);
                MPFR_SET_INF (a);
                MPFR_RET(0);
            }
            else
            {
                MPFR_SET_NAN (a);
                MPFR_RET_NAN;
            }
        }
        else
        {
            MPFR_ASSERTD (MPFR_IS_ZERO(b) || MPFR_IS_ZERO(c));
            MPFR_SET_SIGN (a, sign);
            MPFR_SET_ZERO (a);
            MPFR_RET (0);
        }
    }
    sign = MPFR_MULT_SIGN (MPFR_SIGN (b), MPFR_SIGN (c));

    ax = MPFR_GET_EXP (b) + MPFR_GET_EXP (c);
    /* Note: the exponent of the exact result will be e = bx + cx + ec with
       ec in {-1,0,1} and the following assumes that e is representable. */

    /* FIXME: Useful since we do an exponent check after ?
     * It is useful iff the precision is big, there is an overflow
     * and we are doing further mults...*/
#ifdef HUGE
    if (MPFR_UNLIKELY (ax > __gmpfr_emax + 1))
        return mpfr_overflow (a, rnd_mode, sign);
    if (MPFR_UNLIKELY (ax < __gmpfr_emin - 2))
        return mpfr_underflow (a, rnd_mode == MPFR_RNDN ? MPFR_RNDZ : rnd_mode,
                               sign);
#endif

    bq = MPFR_PREC (b);
    cq = MPFR_PREC (c);

    MPFR_ASSERTD (bq+cq > bq); /* PREC_MAX is /2 so no integer overflow */

    bn = (bq+GMP_NUMB_BITS-1)/GMP_NUMB_BITS; /* number of limbs of b */
    cn = (cq+GMP_NUMB_BITS-1)/GMP_NUMB_BITS; /* number of limbs of c */
    k = bn + cn; /* effective nb of limbs used by b*c (= tn or tn+1) below */
    tn = (bq + cq + GMP_NUMB_BITS - 1) / GMP_NUMB_BITS;
    MPFR_ASSERTD (tn <= k); /* tn <= k, thus no int overflow */

    /* Check for no size_t overflow*/
    MPFR_ASSERTD ((size_t) k <= ((size_t) -1) / BYTES_PER_MP_LIMB);
    MPFR_TMP_MARK (marker);
    tmp = (mp_limb_t *) MPFR_TMP_ALLOC ((size_t) k * BYTES_PER_MP_LIMB);

    /* multiplies two mantissa in temporary allocated space */
    if (MPFR_UNLIKELY (bn < cn))
    {
        mpfr_srcptr z = b;
        mp_size_t zn  = bn;
        b = c;
        bn = cn;
        c = z;
        cn = zn;
    }
    MPFR_ASSERTD (bn >= cn);
    if (MPFR_LIKELY (bn <= 2))
    {
        if (bn == 1)
        {
            /* 1 limb * 1 limb */
            umul_ppmm (tmp[1], tmp[0], MPFR_MANT (b)[0], MPFR_MANT (c)[0]);
            b1 = tmp[1];
        }
        else if (MPFR_UNLIKELY (cn == 1))
        {
            /* 2 limbs * 1 limb */
            mp_limb_t t;
            umul_ppmm (tmp[1], tmp[0], MPFR_MANT (b)[0], MPFR_MANT (c)[0]);
            umul_ppmm (tmp[2], t, MPFR_MANT (b)[1], MPFR_MANT (c)[0]);
            add_ssaaaa (tmp[2], tmp[1], tmp[2], tmp[1], 0, t);
            b1 = tmp[2];
        }
        else
        {
            /* 2 limbs * 2 limbs */
            mp_limb_t t1, t2, t3;
            /* First 2 limbs * 1 limb */
            umul_ppmm (tmp[1], tmp[0], MPFR_MANT (b)[0], MPFR_MANT (c)[0]);
            umul_ppmm (tmp[2], t1, MPFR_MANT (b)[1], MPFR_MANT (c)[0]);
            add_ssaaaa (tmp[2], tmp[1], tmp[2], tmp[1], 0, t1);
            /* Second, the other 2 limbs * 1 limb product */
            umul_ppmm (t1, t2, MPFR_MANT (b)[0], MPFR_MANT (c)[1]);
            umul_ppmm (tmp[3], t3, MPFR_MANT (b)[1], MPFR_MANT (c)[1]);
            add_ssaaaa (tmp[3], t1, tmp[3], t1, 0, t3);
            /* Sum those two partial products */
            add_ssaaaa (tmp[2], tmp[1], tmp[2], tmp[1], t1, t2);
            tmp[3] += (tmp[2] < t1);
            b1 = tmp[3];
        }
        b1 >>= (GMP_NUMB_BITS - 1);
        tmp += k - tn;
        if (MPFR_UNLIKELY (b1 == 0))
            mpn_lshift (tmp, tmp, tn, 1); /* tn <= k, so no stack corruption */
    }
    else
        /* Mulders' mulhigh. Disable if squaring, since it is not tuned for
           such a case */
        if (MPFR_UNLIKELY (bn > MPFR_MUL_THRESHOLD && b != c))
Beispiel #7
0
/* returns 0 if result exact, non-zero otherwise */
int
mpfr_div_ui (mpfr_ptr y, mpfr_srcptr x, unsigned long int u, mpfr_rnd_t rnd_mode)
{
  long i;
  int sh;
  mp_size_t xn, yn, dif;
  mp_limb_t *xp, *yp, *tmp, c, d;
  mpfr_exp_t exp;
  int inexact, middle = 1, nexttoinf;
  MPFR_TMP_DECL(marker);

  if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (x)))
    {
      if (MPFR_IS_NAN (x))
        {
          MPFR_SET_NAN (y);
          MPFR_RET_NAN;
        }
      else if (MPFR_IS_INF (x))
        {
          MPFR_SET_INF (y);
          MPFR_SET_SAME_SIGN (y, x);
          MPFR_RET (0);
        }
      else
        {
          MPFR_ASSERTD (MPFR_IS_ZERO(x));
          if (u == 0) /* 0/0 is NaN */
            {
              MPFR_SET_NAN(y);
              MPFR_RET_NAN;
            }
          else
            {
              MPFR_SET_ZERO(y);
              MPFR_SET_SAME_SIGN (y, x);
              MPFR_RET(0);
            }
        }
    }
  else if (MPFR_UNLIKELY (u <= 1))
    {
      if (u < 1)
        {
          /* x/0 is Inf since x != 0*/
          MPFR_SET_INF (y);
          MPFR_SET_SAME_SIGN (y, x);
          MPFR_RET (0);
        }
      else /* y = x/1 = x */
        return mpfr_set (y, x, rnd_mode);
    }
  else if (MPFR_UNLIKELY (IS_POW2 (u)))
    return mpfr_div_2si (y, x, MPFR_INT_CEIL_LOG2 (u), rnd_mode);

  MPFR_SET_SAME_SIGN (y, x);

  MPFR_TMP_MARK (marker);
  xn = MPFR_LIMB_SIZE (x);
  yn = MPFR_LIMB_SIZE (y);

  xp = MPFR_MANT (x);
  yp = MPFR_MANT (y);
  exp = MPFR_GET_EXP (x);

  dif = yn + 1 - xn;

  /* we need to store yn+1 = xn + dif limbs of the quotient */
  /* don't use tmp=yp since the mpn_lshift call below requires yp >= tmp+1 */
  tmp = (mp_limb_t*) MPFR_TMP_ALLOC ((yn + 1) * BYTES_PER_MP_LIMB);

  c = (mp_limb_t) u;
  MPFR_ASSERTN (u == c);
  if (dif >= 0)
    c = mpn_divrem_1 (tmp, dif, xp, xn, c); /* used all the dividend */
  else /* dif < 0 i.e. xn > yn, don't use the (-dif) low limbs from x */
    c = mpn_divrem_1 (tmp, 0, xp - dif, yn + 1, c);

  inexact = (c != 0);

  /* First pass in estimating next bit of the quotient, in case of RNDN    *
   * In case we just have the right number of bits (postpone this ?),      *
   * we need to check whether the remainder is more or less than half      *
   * the divisor. The test must be performed with a subtraction, so as     *
   * to prevent carries.                                                   */

  if (MPFR_LIKELY (rnd_mode == MPFR_RNDN))
    {
      if (c < (mp_limb_t) u - c) /* We have u > c */
        middle = -1;
      else if (c > (mp_limb_t) u - c)
        middle = 1;
      else
        middle = 0; /* exactly in the middle */
    }

  /* If we believe that we are right in the middle or exact, we should check
     that we did not neglect any word of x (division large / 1 -> small). */

  for (i=0; ((inexact == 0) || (middle == 0)) && (i < -dif); i++)
    if (xp[i])
      inexact = middle = 1; /* larger than middle */

  /*
     If the high limb of the result is 0 (xp[xn-1] < u), remove it.
     Otherwise, compute the left shift to be performed to normalize.
     In the latter case, we discard some low bits computed. They
     contain information useful for the rounding, hence the updating
     of middle and inexact.
  */

  if (tmp[yn] == 0)
    {
      MPN_COPY(yp, tmp, yn);
      exp -= GMP_NUMB_BITS;
    }
  else
    {
      int shlz;

      count_leading_zeros (shlz, tmp[yn]);

      /* shift left to normalize */
      if (MPFR_LIKELY (shlz != 0))
        {
          mp_limb_t w = tmp[0] << shlz;

          mpn_lshift (yp, tmp + 1, yn, shlz);
          yp[0] += tmp[0] >> (GMP_NUMB_BITS - shlz);

          if (w > (MPFR_LIMB_ONE << (GMP_NUMB_BITS - 1)))
            { middle = 1; }
          else if (w < (MPFR_LIMB_ONE << (GMP_NUMB_BITS - 1)))
            { middle = -1; }
          else
            { middle = (c != 0); }

          inexact = inexact || (w != 0);
          exp -= shlz;
        }
      else
        { /* this happens only if u == 1 and xp[xn-1] >=
Beispiel #8
0
/* Compute the first 2^m terms from the hypergeometric series
   with x = p / 2^r */
static int
GENERIC (mpfr_ptr y, mpz_srcptr p, long r, int m)
{
  unsigned long n,i,k,j,l;
  int is_p_one;
  mpz_t* P,*S;
#ifdef A
  mpz_t *T;
#endif
  mpz_t* ptoj;
#ifdef R_IS_RATIONAL
  mpz_t* qtoj;
  mpfr_t tmp;
#endif
  mp_exp_t diff, expo;
  mp_prec_t precy = MPFR_PREC(y);
  MPFR_TMP_DECL(marker);

  MPFR_TMP_MARK(marker);
  MPFR_CLEAR_FLAGS(y);
  n = 1UL << m;
  P = (mpz_t*) MPFR_TMP_ALLOC ((m+1) * sizeof(mpz_t));
  S = (mpz_t*) MPFR_TMP_ALLOC ((m+1) * sizeof(mpz_t));
  ptoj = (mpz_t*) MPFR_TMP_ALLOC ((m+1) * sizeof(mpz_t)); /* ptoj[i] = mantissa^(2^i) */
#ifdef A
  T = (mpz_t*) MPFR_TMP_ALLOC ((m+1) * sizeof(mpz_t));
#endif
#ifdef R_IS_RATIONAL
  qtoj = (mpz_t*) MPFR_TMP_ALLOC ((m+1) * sizeof(mpz_t));
#endif
  for (i = 0 ; i <= m ; i++)
    {
      mpz_init (P[i]);
      mpz_init (S[i]);
      mpz_init (ptoj[i]);
#ifdef R_IS_RATIONAL
      mpz_init (qtoj[i]);
#endif
#ifdef A
      mpz_init (T[i]);
#endif
    }
  mpz_set (ptoj[0], p);
#ifdef C
#  if C2 != 1
  mpz_mul_ui (ptoj[0], ptoj[0], C2);
#  endif
#endif
  is_p_one = mpz_cmp_ui(ptoj[0], 1) == 0;
#ifdef A
#  ifdef B
  mpz_set_ui (T[0], A1 * B1);
#  else
  mpz_set_ui (T[0], A1);
#  endif
#endif
  if (!is_p_one)
    for (i = 1 ; i < m ; i++)
      mpz_mul (ptoj[i], ptoj[i-1], ptoj[i-1]);
#ifdef R_IS_RATIONAL
  mpz_set_si (qtoj[0], r);
  for (i = 1 ; i <= m ; i++)
    mpz_mul(qtoj[i], qtoj[i-1], qtoj[i-1]);
#endif
  mpz_set_ui (P[0], 1);
  mpz_set_ui (S[0], 1);

  k = 0;
  for (i = 1 ; i < n ; i++) {
    k++;

#ifdef A
#  ifdef B
    mpz_set_ui (T[k], (A1 + A2*i)*(B1+B2*i));
#  else
    mpz_set_ui (T[k], A1 + A2*i);
#  endif
#endif

#ifdef C
#  ifdef NO_FACTORIAL
    mpz_set_ui (P[k], (C1 + C2 * (i-1)));
    mpz_set_ui (S[k], 1);
#  else
    mpz_set_ui (P[k], (i+1) * (C1 + C2 * (i-1)));
    mpz_set_ui (S[k], i+1);
#  endif
#else
#  ifdef NO_FACTORIAL
    mpz_set_ui (P[k], 1);
#  else
    mpz_set_ui (P[k], i+1);
#  endif
    mpz_set (S[k], P[k]);
#endif

    for (j = i+1, l = 0 ; (j & 1) == 0 ; l++, j>>=1, k--) {
      if (!is_p_one)
        mpz_mul (S[k], S[k], ptoj[l]);
#ifdef A
#  ifdef B
#    if (A2*B2) != 1
      mpz_mul_ui (P[k], P[k], A2*B2);
#    endif
#  else
#    if A2 != 1
      mpz_mul_ui (P[k], P[k], A2);
#  endif
#endif
      mpz_mul (S[k], S[k], T[k-1]);
#endif
      mpz_mul (S[k-1], S[k-1], P[k]);
#ifdef R_IS_RATIONAL
      mpz_mul (S[k-1], S[k-1], qtoj[l]);
#else
      mpz_mul_2exp (S[k-1], S[k-1], r*(1<<l));
#endif
      mpz_add (S[k-1], S[k-1], S[k]);
      mpz_mul (P[k-1], P[k-1], P[k]);
#ifdef A
      mpz_mul (T[k-1], T[k-1], T[k]);
#endif
    }
  }

  diff = mpz_sizeinbase(S[0],2) - 2*precy;
  expo = diff;
  if (diff >= 0)
    mpz_div_2exp(S[0],S[0],diff);
  else
    mpz_mul_2exp(S[0],S[0],-diff);
  diff = mpz_sizeinbase(P[0],2) - precy;
  expo -= diff;
  if (diff >=0)
    mpz_div_2exp(P[0],P[0],diff);
  else
    mpz_mul_2exp(P[0],P[0],-diff);

  mpz_tdiv_q(S[0], S[0], P[0]);
  mpfr_set_z(y, S[0], GMP_RNDD);
  MPFR_SET_EXP (y, MPFR_GET_EXP (y) + expo);

#ifdef R_IS_RATIONAL
  /* exact division */
  mpz_div_ui (qtoj[m], qtoj[m], r);
  mpfr_init2 (tmp, MPFR_PREC(y));
  mpfr_set_z (tmp, qtoj[m] , GMP_RNDD);
  mpfr_div (y, y, tmp, GMP_RNDD);
  mpfr_clear (tmp);
#else
  mpfr_div_2ui(y, y, r*(i-1), GMP_RNDN);
#endif
  for (i = 0 ; i <= m ; i++)
    {
      mpz_clear (P[i]);
      mpz_clear (S[i]);
      mpz_clear (ptoj[i]);
#ifdef R_IS_RATIONAL
      mpz_clear (qtoj[i]);
#endif
#ifdef A
      mpz_clear (T[i]);
#endif
    }
  MPFR_TMP_FREE (marker);
  return 0;
}
/* compute sign(b) * (|b| + |c|)
   Returns 0 iff result is exact,
   a negative value when the result is less than the exact value,
   a positive value otherwise. */
int
mpfr_add1sp (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode)
{
  mpfr_uexp_t d;
  mpfr_prec_t p;
  unsigned int sh;
  mp_size_t n;
  mp_limb_t *ap, *cp;
  mpfr_exp_t bx;
  mp_limb_t limb;
  int inexact;
  MPFR_TMP_DECL(marker);

  MPFR_TMP_MARK(marker);

  MPFR_ASSERTD(MPFR_PREC(a) == MPFR_PREC(b) && MPFR_PREC(b) == MPFR_PREC(c));
  MPFR_ASSERTD(MPFR_IS_PURE_FP(b) && MPFR_IS_PURE_FP(c));
  MPFR_ASSERTD(MPFR_GET_EXP(b) >= MPFR_GET_EXP(c));

  /* Read prec and num of limbs */
  p = MPFR_PREC(b);
  n = (p+GMP_NUMB_BITS-1)/GMP_NUMB_BITS;
  MPFR_UNSIGNED_MINUS_MODULO(sh, p);
  bx = MPFR_GET_EXP(b);
  d = (mpfr_uexp_t) (bx - MPFR_GET_EXP(c));

  DEBUG (printf ("New add1sp with diff=%lu\n", (unsigned long) d));

  if (MPFR_UNLIKELY(d == 0))
    {
      /* d==0 */
      DEBUG( mpfr_print_mant_binary("C= ", MPFR_MANT(c), p) );
      DEBUG( mpfr_print_mant_binary("B= ", MPFR_MANT(b), p) );
      bx++;                                /* exp + 1 */
      ap = MPFR_MANT(a);
      limb = mpn_add_n(ap, MPFR_MANT(b), MPFR_MANT(c), n);
      DEBUG( mpfr_print_mant_binary("A= ", ap, p) );
      MPFR_ASSERTD(limb != 0);             /* There must be a carry */
      limb = ap[0];                        /* Get LSB (In fact, LSW) */
      mpn_rshift(ap, ap, n, 1);            /* Shift mantissa A */
      ap[n-1] |= MPFR_LIMB_HIGHBIT;        /* Set MSB */
      ap[0]   &= ~MPFR_LIMB_MASK(sh);      /* Clear LSB bit */
      if (MPFR_LIKELY((limb&(MPFR_LIMB_ONE<<sh)) == 0)) /* Check exact case */
        { inexact = 0; goto set_exponent; }
      /* Zero: Truncate
         Nearest: Even Rule => truncate or add 1
         Away: Add 1 */
      if (MPFR_LIKELY(rnd_mode==MPFR_RNDN))
        {
          if (MPFR_LIKELY((ap[0]&(MPFR_LIMB_ONE<<sh))==0))
            { inexact = -1; goto set_exponent; }
          else
            goto add_one_ulp;
        }
      MPFR_UPDATE_RND_MODE(rnd_mode, MPFR_IS_NEG(b));
      if (rnd_mode==MPFR_RNDZ)
        { inexact = -1; goto set_exponent; }
      else
        goto add_one_ulp;
    }
  else if (MPFR_UNLIKELY (d >= p))
    {
      if (MPFR_LIKELY (d > p))
        {
          /* d > p : Copy B in A */
          /* Away:    Add 1
             Nearest: Trunc
             Zero:    Trunc */
          if (MPFR_LIKELY (rnd_mode==MPFR_RNDN
                           || MPFR_IS_LIKE_RNDZ (rnd_mode, MPFR_IS_NEG (b))))
            {
            copy_set_exponent:
              ap = MPFR_MANT (a);
              MPN_COPY (ap, MPFR_MANT(b), n);
              inexact = -1;
              goto set_exponent;
            }
          else
            {
            copy_add_one_ulp:
              ap = MPFR_MANT(a);
              MPN_COPY (ap, MPFR_MANT(b), n);
              goto add_one_ulp;
            }
        }
      else
        {
          /* d==p : Copy B in A */
          /* Away:    Add 1
             Nearest: Even Rule if C is a power of 2, else Add 1
             Zero:    Trunc */
          if (MPFR_LIKELY(rnd_mode==MPFR_RNDN))
            {
              /* Check if C was a power of 2 */
              cp = MPFR_MANT(c);
              if (MPFR_UNLIKELY(cp[n-1] == MPFR_LIMB_HIGHBIT))
                {
                  mp_size_t k = n-1;
                  do {
                    k--;
                  } while (k>=0 && cp[k]==0);
                  if (MPFR_UNLIKELY(k<0))
                    /* Power of 2: Even rule */
                    if ((MPFR_MANT (b)[0]&(MPFR_LIMB_ONE<<sh))==0)
                      goto copy_set_exponent;
                }
              /* Not a Power of 2 */
              goto copy_add_one_ulp;
            }
          else if (MPFR_IS_LIKE_RNDZ (rnd_mode, MPFR_IS_NEG (b)))
            goto copy_set_exponent;
          else
            goto copy_add_one_ulp;
        }
    }
  else
    {
      mp_limb_t mask;
      mp_limb_t bcp, bcp1; /* Cp and C'p+1 */

      /* General case: 1 <= d < p */
      cp = (mp_limb_t*) MPFR_TMP_ALLOC(n * BYTES_PER_MP_LIMB);

      /* Shift c in temporary allocated place */
      {
        mpfr_uexp_t dm;
        mp_size_t m;

        dm = d % GMP_NUMB_BITS;
        m = d / GMP_NUMB_BITS;
        if (MPFR_UNLIKELY(dm == 0))
          {
            /* dm = 0 and m > 0: Just copy */
            MPFR_ASSERTD(m!=0);
            MPN_COPY(cp, MPFR_MANT(c)+m, n-m);
            MPN_ZERO(cp+n-m, m);
          }
        else if (MPFR_LIKELY(m == 0))
          {
            /* dm >=1 and m == 0: just shift */
            MPFR_ASSERTD(dm >= 1);
            mpn_rshift(cp, MPFR_MANT(c), n, dm);
          }
        else
          {
            /* dm > 0 and m > 0: shift and zero  */
            mpn_rshift(cp, MPFR_MANT(c)+m, n-m, dm);
            MPN_ZERO(cp+n-m, m);
          }
      }

      DEBUG( mpfr_print_mant_binary("Before", MPFR_MANT(c), p) );
      DEBUG( mpfr_print_mant_binary("B=    ", MPFR_MANT(b), p) );
      DEBUG( mpfr_print_mant_binary("After ", cp, p) );

      /* Compute bcp=Cp and bcp1=C'p+1 */
      if (MPFR_LIKELY (sh > 0))
        {
          /* Try to compute them from C' rather than C */
          bcp = (cp[0] & (MPFR_LIMB_ONE<<(sh-1))) ;
          if (MPFR_LIKELY(cp[0]&MPFR_LIMB_MASK(sh-1)))
            bcp1 = 1;
          else
            {
              /* We can't compute C'p+1 from C'. Compute it from C */
              /* Start from bit x=p-d+sh in mantissa C
                 (+sh since we have already looked sh bits in C'!) */
              mpfr_prec_t x = p-d+sh-1;
              if (MPFR_LIKELY(x>p))
                /* We are already looked at all the bits of c, so C'p+1 = 0*/
                bcp1 = 0;
              else
                {
                  mp_limb_t *tp = MPFR_MANT(c);
                  mp_size_t kx = n-1 - (x / GMP_NUMB_BITS);
                  mpfr_prec_t sx = GMP_NUMB_BITS-1-(x%GMP_NUMB_BITS);
                  DEBUG (printf ("(First) x=%lu Kx=%ld Sx=%lu\n",
                                 (unsigned long) x, (long) kx,
                                 (unsigned long) sx));
                  /* Looks at the last bits of limb kx (if sx=0 does nothing)*/
                  if (tp[kx] & MPFR_LIMB_MASK(sx))
                    bcp1 = 1;
                  else
                    {
                      /*kx += (sx==0);*/
                      /*If sx==0, tp[kx] hasn't been checked*/
                      do {
                        kx--;
                      } while (kx>=0 && tp[kx]==0);
                      bcp1 = (kx >= 0);
                    }
                }
            }
        }
      else /* sh == 0 */
        {
          /* Compute Cp and C'p+1 from C with sh=0 */
          mp_limb_t *tp = MPFR_MANT(c);
          /* Start from bit x=p-d in mantissa C */
          mpfr_prec_t  x = p-d;
          mp_size_t   kx = n-1 - (x / GMP_NUMB_BITS);
          mpfr_prec_t sx = GMP_NUMB_BITS-1-(x%GMP_NUMB_BITS);
          MPFR_ASSERTD(p >= d);
          bcp = tp[kx] & (MPFR_LIMB_ONE<<sx);
          /* Looks at the last bits of limb kx (If sx=0, does nothing)*/
          if (tp[kx]&MPFR_LIMB_MASK(sx))
            bcp1 = 1;
          else
            {
              do {
                kx--;
              } while (kx>=0 && tp[kx]==0);
              bcp1 = (kx>=0);
            }
        }
      DEBUG (printf("sh=%u Cp=%lu C'p+1=%lu\n", sh,
                    (unsigned long) bcp, (unsigned long) bcp1));

      /* Clean shifted C' */
      mask = ~MPFR_LIMB_MASK(sh);
      cp[0] &= mask;

      /* Add the mantissa c from b in a */
      ap = MPFR_MANT(a);
      limb = mpn_add_n (ap, MPFR_MANT(b), cp, n);
      DEBUG( mpfr_print_mant_binary("Add=  ", ap, p) );

      /* Check for overflow */
      if (MPFR_UNLIKELY (limb))
        {
          limb = ap[0] & (MPFR_LIMB_ONE<<sh); /* Get LSB */
          mpn_rshift (ap, ap, n, 1);          /* Shift mantissa*/
          bx++;                               /* Fix exponent */
          ap[n-1] |= MPFR_LIMB_HIGHBIT;       /* Set MSB */
          ap[0]   &= mask;                    /* Clear LSB bit */
          bcp1    |= bcp;                     /* Recompute C'p+1 */
          bcp      = limb;                    /* Recompute Cp */
          DEBUG (printf ("(Overflow) Cp=%lu C'p+1=%lu\n",
                         (unsigned long) bcp, (unsigned long) bcp1));
          DEBUG (mpfr_print_mant_binary ("Add=  ", ap, p));
        }

      /* Round:
          Zero: Truncate but could be exact.
          Away: Add 1 if Cp or C'p+1 !=0
          Nearest: Truncate but could be exact if Cp==0
                   Add 1 if C'p+1 !=0,
                   Even rule else */
      if (MPFR_LIKELY(rnd_mode == MPFR_RNDN))
        {
          if (MPFR_LIKELY(bcp == 0))
            { inexact = MPFR_LIKELY(bcp1) ? -1 : 0; goto set_exponent; }
          else if (MPFR_UNLIKELY(bcp1==0) && (ap[0]&(MPFR_LIMB_ONE<<sh))==0)
            { inexact = -1; goto set_exponent; }
          else
            goto add_one_ulp;
        }
      MPFR_UPDATE_RND_MODE(rnd_mode, MPFR_IS_NEG(b));
      if (rnd_mode == MPFR_RNDZ)
        {
          inexact = MPFR_LIKELY(bcp || bcp1) ? -1 : 0;
          goto set_exponent;
        }
      else
        {
          if (MPFR_UNLIKELY(bcp==0 && bcp1==0))
            { inexact = 0; goto set_exponent; }
          else
            goto add_one_ulp;
        }
    }
  MPFR_ASSERTN(0);

 add_one_ulp:
  /* add one unit in last place to a */
  DEBUG( printf("AddOneUlp\n") );
  if (MPFR_UNLIKELY( mpn_add_1(ap, ap, n, MPFR_LIMB_ONE<<sh) ))
    {
      /* Case 100000x0 = 0x1111x1 + 1*/
      DEBUG( printf("Pow of 2\n") );
      bx++;
      ap[n-1] = MPFR_LIMB_HIGHBIT;
    }
  inexact = 1;

 set_exponent:
  if (MPFR_UNLIKELY(bx > __gmpfr_emax)) /* Check for overflow */
    {
      DEBUG( printf("Overflow\n") );
      MPFR_TMP_FREE(marker);
      MPFR_SET_SAME_SIGN(a,b);
      return mpfr_overflow(a, rnd_mode, MPFR_SIGN(a));
    }
  MPFR_SET_EXP (a, bx);
  MPFR_SET_SAME_SIGN(a,b);

  MPFR_TMP_FREE(marker);
  MPFR_RET (inexact * MPFR_INT_SIGN (a));
}