Exemplo n.º 1
0
/* the rounding mode is mpfr_rnd_t here since we return an mpfr number */
int
mpc_norm (mpfr_ptr a, mpc_srcptr b, mpfr_rnd_t rnd)
{
    mpfr_t u, v;
    mp_prec_t prec;
    int inexact, overflow;

    prec = MPFR_PREC(a);

    /* handling of special values; consistent with abs in that
       norm = abs^2; so norm (+-inf, nan) = norm (nan, +-inf) = +inf */
    if (   (mpfr_nan_p (MPC_RE (b)) || mpfr_nan_p (MPC_IM (b)))
            || (mpfr_inf_p (MPC_RE (b)) || mpfr_inf_p (MPC_IM (b))))
        return mpc_abs (a, b, rnd);

    mpfr_init (u);
    mpfr_init (v);

    if (!mpfr_zero_p(MPC_RE(b)) && !mpfr_zero_p(MPC_IM(b)) &&
            2 * SAFE_ABS (mp_exp_t, MPFR_EXP (MPC_RE (b)) - MPFR_EXP (MPC_IM (b)))
            > (mp_exp_t)prec)
        /* If real and imaginary part have very different magnitudes, then the */
        /* generic code increases the precision too much. Instead, compute the */
        /* squarings _exactly_.                                                */
    {
        mpfr_set_prec (u, 2 * MPFR_PREC (MPC_RE (b)));
        mpfr_set_prec (v, 2 * MPFR_PREC (MPC_IM (b)));
        mpfr_sqr (u, MPC_RE (b), GMP_RNDN);
        mpfr_sqr (v, MPC_IM (b), GMP_RNDN);
        inexact = mpfr_add (a, u, v, rnd);
    }
    else
    {
        do
        {
            prec += mpc_ceil_log2 (prec) + 3;

            mpfr_set_prec (u, prec);
            mpfr_set_prec (v, prec);

            inexact = mpfr_sqr (u, MPC_RE(b), GMP_RNDN);  /* err<=1/2ulp */
            inexact |= mpfr_sqr (v, MPC_IM(b), GMP_RNDN); /* err<=1/2ulp*/
            inexact |= mpfr_add (u, u, v, GMP_RNDN);      /* err <= 3/2 ulps */

            overflow = mpfr_inf_p (u);
        }
        while (!overflow && inexact &&
                mpfr_can_round (u, prec - 2, GMP_RNDN, rnd, MPFR_PREC(a)) == 0);

        inexact |= mpfr_set (a, u, rnd);
    }
    mpfr_clear (u);
    mpfr_clear (v);

    return inexact;
}
Exemplo n.º 2
0
/* We can't use fits_s.h <= mpfr_cmp_ui */
int
mpfr_fits_intmax_p (mpfr_srcptr f, mpfr_rnd_t rnd)
{
  mpfr_exp_t e;
  int prec;
  mpfr_t x, y;
  int neg;
  int res;

  if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (f)))
    /* Zero always fit */
    return MPFR_IS_ZERO (f) ? 1 : 0;

  /* now it fits if either
     (a) MINIMUM <= f <= MAXIMUM
     (b) or MINIMUM <= round(f, prec(slong), rnd) <= MAXIMUM */

  e = MPFR_EXP (f);
  if (e < 1)
    return 1; /* |f| < 1: always fits */

  neg = MPFR_IS_NEG (f);

  /* let EXTREMUM be MAXIMUM if f > 0, and MINIMUM if f < 0 */

  /* first compute prec(EXTREMUM), this could be done at configure time,
     but the result can depend on neg (the loop is moved inside the "if"
     to give the compiler a better chance to compute prec statically) */
  if (neg)
    {
      uintmax_t s;
      /* In C89, the division on negative integers isn't well-defined. */
      s = SAFE_ABS (uintmax_t, MPFR_INTMAX_MIN);
      for (prec = 0; s != 0; s /= 2, prec ++);
    }
  else
    {
      intmax_t s;
      s = MPFR_INTMAX_MAX;
      for (prec = 0; s != 0; s /= 2, prec ++);
    }

  /* EXTREMUM needs prec bits, i.e. 2^(prec-1) <= |EXTREMUM| < 2^prec */

   /* if e <= prec - 1, then f < 2^(prec-1) <= |EXTREMUM| */
  if (e <= prec - 1)
    return 1;

  /* if e >= prec + 1, then f >= 2^prec > |EXTREMUM| */
  if (e >= prec + 1)
    return 0;

  MPFR_ASSERTD (e == prec);

  /* hard case: first round to prec bits, then check */
  mpfr_init2 (x, prec);
  mpfr_set (x, f, rnd);

  if (neg)
    {
      mpfr_init2 (y, prec);
      mpfr_set_sj (y, MPFR_INTMAX_MIN, MPFR_RNDN);
      res = mpfr_cmp (x, y) >= 0;
      mpfr_clear (y);
    }
  else
    {
      res = MPFR_GET_EXP (x) == e;
    }

  mpfr_clear (x);
  return res;
}
Exemplo n.º 3
0
int
mpc_sqr (mpc_ptr rop, mpc_srcptr op, mpc_rnd_t rnd)
{
   int ok;
   mpfr_t u, v;
   mpfr_t x;
      /* temporary variable to hold the real part of op,
         needed in the case rop==op */
   mpfr_prec_t prec;
   int inex_re, inex_im, inexact;
   mpfr_exp_t emin;
   int saved_underflow;

   /* special values: NaN and infinities */
   if (!mpc_fin_p (op)) {
      if (mpfr_nan_p (mpc_realref (op)) || mpfr_nan_p (mpc_imagref (op))) {
         mpfr_set_nan (mpc_realref (rop));
         mpfr_set_nan (mpc_imagref (rop));
      }
      else if (mpfr_inf_p (mpc_realref (op))) {
         if (mpfr_inf_p (mpc_imagref (op))) {
            mpfr_set_inf (mpc_imagref (rop),
                          MPFR_SIGN (mpc_realref (op)) * MPFR_SIGN (mpc_imagref (op)));
            mpfr_set_nan (mpc_realref (rop));
         }
         else {
            if (mpfr_zero_p (mpc_imagref (op)))
               mpfr_set_nan (mpc_imagref (rop));
            else
               mpfr_set_inf (mpc_imagref (rop),
                             MPFR_SIGN (mpc_realref (op)) * MPFR_SIGN (mpc_imagref (op)));
            mpfr_set_inf (mpc_realref (rop), +1);
         }
      }
      else /* IM(op) is infinity, RE(op) is not */ {
         if (mpfr_zero_p (mpc_realref (op)))
            mpfr_set_nan (mpc_imagref (rop));
         else
            mpfr_set_inf (mpc_imagref (rop),
                          MPFR_SIGN (mpc_realref (op)) * MPFR_SIGN (mpc_imagref (op)));
         mpfr_set_inf (mpc_realref (rop), -1);
      }
      return MPC_INEX (0, 0); /* exact */
   }

   prec = MPC_MAX_PREC(rop);

   /* Check for real resp. purely imaginary number */
   if (mpfr_zero_p (mpc_imagref(op))) {
      int same_sign = mpfr_signbit (mpc_realref (op)) == mpfr_signbit (mpc_imagref (op));
      inex_re = mpfr_sqr (mpc_realref(rop), mpc_realref(op), MPC_RND_RE(rnd));
      inex_im = mpfr_set_ui (mpc_imagref(rop), 0ul, MPFR_RNDN);
      if (!same_sign)
        mpc_conj (rop, rop, MPC_RNDNN);
      return MPC_INEX(inex_re, inex_im);
   }
   if (mpfr_zero_p (mpc_realref(op))) {
      int same_sign = mpfr_signbit (mpc_realref (op)) == mpfr_signbit (mpc_imagref (op));
      inex_re = -mpfr_sqr (mpc_realref(rop), mpc_imagref(op), INV_RND (MPC_RND_RE(rnd)));
      mpfr_neg (mpc_realref(rop), mpc_realref(rop), MPFR_RNDN);
      inex_im = mpfr_set_ui (mpc_imagref(rop), 0ul, MPFR_RNDN);
      if (!same_sign)
        mpc_conj (rop, rop, MPC_RNDNN);
      return MPC_INEX(inex_re, inex_im);
   }

   if (rop == op)
   {
      mpfr_init2 (x, MPC_PREC_RE (op));
      mpfr_set (x, op->re, MPFR_RNDN);
   }
   else
      x [0] = op->re [0];
   /* From here on, use x instead of op->re and safely overwrite rop->re. */

   /* Compute real part of result. */
   if (SAFE_ABS (mpfr_exp_t,
                 mpfr_get_exp (mpc_realref (op)) - mpfr_get_exp (mpc_imagref (op)))
       > (mpfr_exp_t) MPC_MAX_PREC (op) / 2) {
      /* If the real and imaginary parts of the argument have very different
         exponents, it is not reasonable to use Karatsuba squaring; compute
         exactly with the standard formulae instead, even if this means an
         additional multiplication. Using the approach copied from mul, over-
         and underflows are also handled correctly. */

      inex_re = mpfr_fsss (rop->re, x, op->im, MPC_RND_RE (rnd));
   }
   else {
      /* Karatsuba squaring: we compute the real part as (x+y)*(x-y) and the
         imaginary part as 2*x*y, with a total of 2M instead of 2S+1M for the
         naive algorithm, which computes x^2-y^2 and 2*y*y */
      mpfr_init (u);
      mpfr_init (v);

      emin = mpfr_get_emin ();

      do
      {
         prec += mpc_ceil_log2 (prec) + 5;

         mpfr_set_prec (u, prec);
         mpfr_set_prec (v, prec);

         /* Let op = x + iy. We need u = x+y and v = x-y, rounded away.      */
         /* The error is bounded above by 1 ulp.                             */
         /* We first let inexact be 1 if the real part is not computed       */
         /* exactly and determine the sign later.                            */
         inexact =   mpfr_add (u, x, mpc_imagref (op), MPFR_RNDA)
                   | mpfr_sub (v, x, mpc_imagref (op), MPFR_RNDA);

         /* compute the real part as u*v, rounded away                    */
         /* determine also the sign of inex_re                            */

         if (mpfr_sgn (u) == 0 || mpfr_sgn (v) == 0) {
            /* as we have rounded away, the result is exact */
            mpfr_set_ui (mpc_realref (rop), 0, MPFR_RNDN);
            inex_re = 0;
            ok = 1;
         }
         else {
            inexact |= mpfr_mul (u, u, v, MPFR_RNDA); /* error 5 */
            if (mpfr_get_exp (u) == emin || mpfr_inf_p (u)) {
               /* under- or overflow */
               inex_re = mpfr_fsss (rop->re, x, op->im, MPC_RND_RE (rnd));
               ok = 1;
            }
            else {
               ok = (!inexact) | mpfr_can_round (u, prec - 3,
                     MPFR_RNDA, MPFR_RNDZ,
                     MPC_PREC_RE (rop) + (MPC_RND_RE (rnd) == MPFR_RNDN));
               if (ok) {
                  inex_re = mpfr_set (mpc_realref (rop), u, MPC_RND_RE (rnd));
                  if (inex_re == 0)
                     /* remember that u was already rounded */
                     inex_re = inexact;
               }
            }
         }
      }
      while (!ok);

      mpfr_clear (u);
      mpfr_clear (v);
   }

   saved_underflow = mpfr_underflow_p ();
   mpfr_clear_underflow ();
   inex_im = mpfr_mul (rop->im, x, op->im, MPC_RND_IM (rnd));
   if (!mpfr_underflow_p ())
      inex_im |= mpfr_mul_2ui (rop->im, rop->im, 1, MPC_RND_IM (rnd));
      /* We must not multiply by 2 if rop->im has been set to the smallest
         representable number. */
   if (saved_underflow)
      mpfr_set_underflow ();

   if (rop == op)
      mpfr_clear (x);

   return MPC_INEX (inex_re, inex_im);
}
Exemplo n.º 4
0
int
mpc_atan (mpc_ptr rop, mpc_srcptr op, mpc_rnd_t rnd)
{
    int s_re;
    int s_im;
    int inex_re;
    int inex_im;
    int inex;

    inex_re = 0;
    inex_im = 0;
    s_re = mpfr_signbit (mpc_realref (op));
    s_im = mpfr_signbit (mpc_imagref (op));

    /* special values */
    if (mpfr_nan_p (mpc_realref (op)) || mpfr_nan_p (mpc_imagref (op)))
    {
        if (mpfr_nan_p (mpc_realref (op)))
        {
            mpfr_set_nan (mpc_realref (rop));
            if (mpfr_zero_p (mpc_imagref (op)) || mpfr_inf_p (mpc_imagref (op)))
            {
                mpfr_set_ui (mpc_imagref (rop), 0, GMP_RNDN);
                if (s_im)
                    mpc_conj (rop, rop, MPC_RNDNN);
            }
            else
                mpfr_set_nan (mpc_imagref (rop));
        }
        else
        {
            if (mpfr_inf_p (mpc_realref (op)))
            {
                inex_re = set_pi_over_2 (mpc_realref (rop), -s_re, MPC_RND_RE (rnd));
                mpfr_set_ui (mpc_imagref (rop), 0, GMP_RNDN);
            }
            else
            {
                mpfr_set_nan (mpc_realref (rop));
                mpfr_set_nan (mpc_imagref (rop));
            }
        }
        return MPC_INEX (inex_re, 0);
    }

    if (mpfr_inf_p (mpc_realref (op)) || mpfr_inf_p (mpc_imagref (op)))
    {
        inex_re = set_pi_over_2 (mpc_realref (rop), -s_re, MPC_RND_RE (rnd));

        mpfr_set_ui (mpc_imagref (rop), 0, GMP_RNDN);
        if (s_im)
            mpc_conj (rop, rop, GMP_RNDN);

        return MPC_INEX (inex_re, 0);
    }

    /* pure real argument */
    if (mpfr_zero_p (mpc_imagref (op)))
    {
        inex_re = mpfr_atan (mpc_realref (rop), mpc_realref (op), MPC_RND_RE (rnd));

        mpfr_set_ui (mpc_imagref (rop), 0, GMP_RNDN);
        if (s_im)
            mpc_conj (rop, rop, GMP_RNDN);

        return MPC_INEX (inex_re, 0);
    }

    /* pure imaginary argument */
    if (mpfr_zero_p (mpc_realref (op)))
    {
        int cmp_1;

        if (s_im)
            cmp_1 = -mpfr_cmp_si (mpc_imagref (op), -1);
        else
            cmp_1 = mpfr_cmp_ui (mpc_imagref (op), +1);

        if (cmp_1 < 0)
        {
            /* atan(+0+iy) = +0 +i*atanh(y), if |y| < 1
               atan(-0+iy) = -0 +i*atanh(y), if |y| < 1 */

            mpfr_set_ui (mpc_realref (rop), 0, GMP_RNDN);
            if (s_re)
                mpfr_neg (mpc_realref (rop), mpc_realref (rop), GMP_RNDN);

            inex_im = mpfr_atanh (mpc_imagref (rop), mpc_imagref (op), MPC_RND_IM (rnd));
        }
        else if (cmp_1 == 0)
        {
            /* atan(+/-0+i) = NaN +i*inf
               atan(+/-0-i) = NaN -i*inf */
            mpfr_set_nan (mpc_realref (rop));
            mpfr_set_inf (mpc_imagref (rop), s_im ? -1 : +1);
        }
        else
        {
            /* atan(+0+iy) = +pi/2 +i*atanh(1/y), if |y| > 1
               atan(-0+iy) = -pi/2 +i*atanh(1/y), if |y| > 1 */
            mpfr_rnd_t rnd_im, rnd_away;
            mpfr_t y;
            mpfr_prec_t p, p_im;
            int ok;

            rnd_im = MPC_RND_IM (rnd);
            mpfr_init (y);
            p_im = mpfr_get_prec (mpc_imagref (rop));
            p = p_im;

            /* a = o(1/y)      with error(a) < 1 ulp(a)
               b = o(atanh(a)) with error(b) < (1+2^{1+Exp(a)-Exp(b)}) ulp(b)

               As |atanh (1/y)| > |1/y| we have Exp(a)-Exp(b) <=0 so, at most,
               2 bits of precision are lost.

               We round atanh(1/y) away from 0.
            */
            do
            {
                p += mpc_ceil_log2 (p) + 2;
                mpfr_set_prec (y, p);
                rnd_away = s_im == 0 ? GMP_RNDU : GMP_RNDD;
                inex_im = mpfr_ui_div (y, 1, mpc_imagref (op), rnd_away);
                /* FIXME: should we consider the case with unreasonably huge
                   precision prec(y)>3*exp_min, where atanh(1/Im(op)) could be
                   representable while 1/Im(op) underflows ?
                   This corresponds to |y| = 0.5*2^emin, in which case the
                   result may be wrong. */

                /* atanh cannot underflow: |atanh(x)| > |x| for |x| < 1 */
                inex_im |= mpfr_atanh (y, y, rnd_away);

                ok = inex_im == 0
                     || mpfr_can_round (y, p - 2, rnd_away, GMP_RNDZ,
                                        p_im + (rnd_im == GMP_RNDN));
            } while (ok == 0);

            inex_re = set_pi_over_2 (mpc_realref (rop), -s_re, MPC_RND_RE (rnd));
            inex_im = mpfr_set (mpc_imagref (rop), y, rnd_im);
            mpfr_clear (y);
        }
        return MPC_INEX (inex_re, inex_im);
    }

    /* regular number argument */
    {
        mpfr_t a, b, x, y;
        mpfr_prec_t prec, p;
        mpfr_exp_t err, expo;
        int ok = 0;
        mpfr_t minus_op_re;
        mpfr_exp_t op_re_exp, op_im_exp;
        mpfr_rnd_t rnd1, rnd2;

        mpfr_inits2 (MPFR_PREC_MIN, a, b, x, y, (mpfr_ptr) 0);

        /* real part: Re(arctan(x+i*y)) = [arctan2(x,1-y) - arctan2(-x,1+y)]/2 */
        minus_op_re[0] = mpc_realref (op)[0];
        MPFR_CHANGE_SIGN (minus_op_re);
        op_re_exp = mpfr_get_exp (mpc_realref (op));
        op_im_exp = mpfr_get_exp (mpc_imagref (op));

        prec = mpfr_get_prec (mpc_realref (rop)); /* result precision */

        /* a = o(1-y)         error(a) < 1 ulp(a)
           b = o(atan2(x,a))  error(b) < [1+2^{3+Exp(x)-Exp(a)-Exp(b)}] ulp(b)
                                         = kb ulp(b)
           c = o(1+y)         error(c) < 1 ulp(c)
           d = o(atan2(-x,c)) error(d) < [1+2^{3+Exp(x)-Exp(c)-Exp(d)}] ulp(d)
                                         = kd ulp(d)
           e = o(b - d)       error(e) < [1 + kb*2^{Exp(b}-Exp(e)}
                                            + kd*2^{Exp(d)-Exp(e)}] ulp(e)
                              error(e) < [1 + 2^{4+Exp(x)-Exp(a)-Exp(e)}
                                            + 2^{4+Exp(x)-Exp(c)-Exp(e)}] ulp(e)
                              because |atan(u)| < |u|
                                       < [1 + 2^{5+Exp(x)-min(Exp(a),Exp(c))
                                                 -Exp(e)}] ulp(e)
           f = e/2            exact
        */

        /* p: working precision */
        p = (op_im_exp > 0 || prec > SAFE_ABS (mpfr_prec_t, op_im_exp)) ? prec
            : (prec - op_im_exp);
        rnd1 = mpfr_sgn (mpc_realref (op)) > 0 ? GMP_RNDD : GMP_RNDU;
        rnd2 = mpfr_sgn (mpc_realref (op)) < 0 ? GMP_RNDU : GMP_RNDD;

        do
        {
            p += mpc_ceil_log2 (p) + 2;
            mpfr_set_prec (a, p);
            mpfr_set_prec (b, p);
            mpfr_set_prec (x, p);

            /* x = upper bound for atan (x/(1-y)). Since atan is increasing, we
               need an upper bound on x/(1-y), i.e., a lower bound on 1-y for
               x positive, and an upper bound on 1-y for x negative */
            mpfr_ui_sub (a, 1, mpc_imagref (op), rnd1);
            if (mpfr_sgn (a) == 0) /* y is near 1, thus 1+y is near 2, and
                                  expo will be 1 or 2 below */
            {
                MPC_ASSERT (mpfr_cmp_ui (mpc_imagref(op), 1) == 0);
                /* check for intermediate underflow */
                err = 2; /* ensures err will be expo below */
            }
            else
                err = mpfr_get_exp (a); /* err = Exp(a) with the notations above */
            mpfr_atan2 (x, mpc_realref (op), a, GMP_RNDU);

            /* b = lower bound for atan (-x/(1+y)): for x negative, we need a
               lower bound on -x/(1+y), i.e., an upper bound on 1+y */
            mpfr_add_ui (a, mpc_imagref(op), 1, rnd2);
            /* if a is exactly zero, i.e., Im(op) = -1, then the error on a is 0,
               and we can simply ignore the terms involving Exp(a) in the error */
            if (mpfr_sgn (a) == 0)
            {
                MPC_ASSERT (mpfr_cmp_si (mpc_imagref(op), -1) == 0);
                /* check for intermediate underflow */
                expo = err; /* will leave err unchanged below */
            }
            else
                expo = mpfr_get_exp (a); /* expo = Exp(c) with the notations above */
            mpfr_atan2 (b, minus_op_re, a, GMP_RNDD);

            err = err < expo ? err : expo; /* err = min(Exp(a),Exp(c)) */
            mpfr_sub (x, x, b, GMP_RNDU);

            err = 5 + op_re_exp - err - mpfr_get_exp (x);
            /* error is bounded by [1 + 2^err] ulp(e) */
            err = err < 0 ? 1 : err + 1;

            mpfr_div_2ui (x, x, 1, GMP_RNDU);

            /* Note: using RND2=RNDD guarantees that if x is exactly representable
               on prec + ... bits, mpfr_can_round will return 0 */
            ok = mpfr_can_round (x, p - err, GMP_RNDU, GMP_RNDD,
                                 prec + (MPC_RND_RE (rnd) == GMP_RNDN));
        } while (ok == 0);

        /* Imaginary part
           Im(atan(x+I*y)) = 1/4 * [log(x^2+(1+y)^2) - log (x^2 +(1-y)^2)] */
        prec = mpfr_get_prec (mpc_imagref (rop)); /* result precision */

        /* a = o(1+y)    error(a) < 1 ulp(a)
           b = o(a^2)    error(b) < 5 ulp(b)
           c = o(x^2)    error(c) < 1 ulp(c)
           d = o(b+c)    error(d) < 7 ulp(d)
           e = o(log(d)) error(e) < [1 + 7*2^{2-Exp(e)}] ulp(e) = ke ulp(e)
           f = o(1-y)    error(f) < 1 ulp(f)
           g = o(f^2)    error(g) < 5 ulp(g)
           h = o(c+f)    error(h) < 7 ulp(h)
           i = o(log(h)) error(i) < [1 + 7*2^{2-Exp(i)}] ulp(i) = ki ulp(i)
           j = o(e-i)    error(j) < [1 + ke*2^{Exp(e)-Exp(j)}
                                       + ki*2^{Exp(i)-Exp(j)}] ulp(j)
                         error(j) < [1 + 2^{Exp(e)-Exp(j)} + 2^{Exp(i)-Exp(j)}
                                       + 7*2^{3-Exp(j)}] ulp(j)
                                  < [1 + 2^{max(Exp(e),Exp(i))-Exp(j)+1}
                                       + 7*2^{3-Exp(j)}] ulp(j)
           k = j/4       exact
        */
        err = 2;
        p = prec; /* working precision */

        do
        {
            p += mpc_ceil_log2 (p) + err;
            mpfr_set_prec (a, p);
            mpfr_set_prec (b, p);
            mpfr_set_prec (y, p);

            /* a = upper bound for log(x^2 + (1+y)^2) */
            ROUND_AWAY (mpfr_add_ui (a, mpc_imagref (op), 1, MPFR_RNDA), a);
            mpfr_sqr (a, a, GMP_RNDU);
            mpfr_sqr (y, mpc_realref (op), GMP_RNDU);
            mpfr_add (a, a, y, GMP_RNDU);
            mpfr_log (a, a, GMP_RNDU);

            /* b = lower bound for log(x^2 + (1-y)^2) */
            mpfr_ui_sub (b, 1, mpc_imagref (op), GMP_RNDZ); /* round to zero */
            mpfr_sqr (b, b, GMP_RNDZ);
            /* we could write mpfr_sqr (y, mpc_realref (op), GMP_RNDZ) but it is
               more efficient to reuse the value of y (x^2) above and subtract
               one ulp */
            mpfr_nextbelow (y);
            mpfr_add (b, b, y, GMP_RNDZ);
            mpfr_log (b, b, GMP_RNDZ);

            mpfr_sub (y, a, b, GMP_RNDU);

            if (mpfr_zero_p (y))
                /* FIXME: happens when x and y have very different magnitudes;
                   could be handled more efficiently                           */
                ok = 0;
            else
            {
                expo = MPC_MAX (mpfr_get_exp (a), mpfr_get_exp (b));
                expo = expo - mpfr_get_exp (y) + 1;
                err = 3 - mpfr_get_exp (y);
                /* error(j) <= [1 + 2^expo + 7*2^err] ulp(j) */
                if (expo <= err) /* error(j) <= [1 + 2^{err+1}] ulp(j) */
                    err = (err < 0) ? 1 : err + 2;
                else
                    err = (expo < 0) ? 1 : expo + 2;

                mpfr_div_2ui (y, y, 2, GMP_RNDN);
                MPC_ASSERT (!mpfr_zero_p (y));
                /* FIXME: underflow. Since the main term of the Taylor series
                   in y=0 is 1/(x^2+1) * y, this means that y is very small
                   and/or x very large; but then the mpfr_zero_p (y) above
                   should be true. This needs a proof, or better yet,
                   special code.                                              */

                ok = mpfr_can_round (y, p - err, GMP_RNDU, GMP_RNDD,
                                     prec + (MPC_RND_IM (rnd) == GMP_RNDN));
            }
        } while (ok == 0);

        inex = mpc_set_fr_fr (rop, x, y, rnd);

        mpfr_clears (a, b, x, y, (mpfr_ptr) 0);
        return inex;
    }
}
Exemplo n.º 5
0
/* We can't use fits_s.h as it uses mpfr_cmp_si */
int
mpfr_fits_intmax_p (mpfr_srcptr f, mpfr_rnd_t rnd)
{
  mpfr_flags_t saved_flags;
  mpfr_exp_t e;
  int prec;
  mpfr_t x, y;
  int neg;
  int res;

  if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (f)))
    /* Zero always fit */
    return MPFR_IS_ZERO (f) ? 1 : 0;

  /* now it fits if either
     (a) MINIMUM <= f <= MAXIMUM
     (b) or MINIMUM <= round(f, prec(slong), rnd) <= MAXIMUM */

  e = MPFR_EXP (f);
  if (e < 1)
    return 1; /* |f| < 1: always fits */

  neg = MPFR_IS_NEG (f);

  /* let EXTREMUM be MAXIMUM if f > 0, and MINIMUM if f < 0 */

  /* first compute prec(EXTREMUM), this could be done at configure time,
     but the result can depend on neg (the loop is moved inside the "if"
     to give the compiler a better chance to compute prec statically) */
  if (neg)
    {
      uintmax_t s;
      /* In C89, the division on negative integers isn't well-defined. */
      s = SAFE_ABS (uintmax_t, MPFR_INTMAX_MIN);
      for (prec = 0; s != 0; s /= 2, prec ++);
    }
  else
    {
      intmax_t s;
      s = MPFR_INTMAX_MAX;
      for (prec = 0; s != 0; s /= 2, prec ++);
    }

  /* EXTREMUM needs prec bits, i.e. 2^(prec-1) <= |EXTREMUM| < 2^prec */

   /* if e <= prec - 1, then f < 2^(prec-1) <= |EXTREMUM| */
  if (e <= prec - 1)
    return 1;

  /* if e >= prec + 1, then f >= 2^prec > |EXTREMUM| */
  if (e >= prec + 1)
    return 0;

  MPFR_ASSERTD (e == prec);

  /* hard case: first round to prec bits, then check */
  saved_flags = __gmpfr_flags;
  mpfr_init2 (x, prec);
  /* for RNDF, it is necessary and sufficient to check it fits when rounding
     away from zero */
  mpfr_set (x, f, (rnd == MPFR_RNDF) ? MPFR_RNDA : rnd);

  if (neg)
    {
      mpfr_init2 (y, prec);
      mpfr_set_sj (y, MPFR_INTMAX_MIN, MPFR_RNDN);
      res = mpfr_cmp (x, y) >= 0;
      mpfr_clear (y);
    }
  else
    {
      /* Warning! Due to the rounding, x can be an infinity. Here we use
         the fact that singular numbers have a special exponent field,
         thus well-defined and different from e, in which case this means
         that the number does not fit. That's why we use MPFR_EXP, not
         MPFR_GET_EXP. */
      res = MPFR_EXP (x) == e;
    }

  mpfr_clear (x);
  __gmpfr_flags = saved_flags;
  return res;
}
Exemplo n.º 6
0
/* TODO
 *   exponents that use more than 16 bytes are not managed
*/
static unsigned char*
mpfr_fpif_store_exponent (unsigned char *buffer, size_t *buffer_size, mpfr_t x)
{
  unsigned char *result;
  mpfr_exp_t exponent;
  mpfr_uexp_t uexp;
  size_t exponent_size;

  exponent = mpfr_get_exp (x);
  exponent_size = 0;

  if (mpfr_regular_p (x))
    {
      if (exponent > MPFR_MAX_EMBEDDED_EXPONENT ||
          exponent < -MPFR_MAX_EMBEDDED_EXPONENT)
        {
          mpfr_exp_t copy_exponent;

          uexp = SAFE_ABS (mpfr_uexp_t, exponent)
            - MPFR_MAX_EMBEDDED_EXPONENT;

          copy_exponent = uexp << 1;
          COUNT_NB_BYTE(copy_exponent, exponent_size);

          if (exponent < 0)
            uexp |= (mpfr_uexp_t) 1 << (8 * exponent_size - 1);
        }
      else
        uexp = exponent + MPFR_MAX_EMBEDDED_EXPONENT;
    }

  result = buffer;
  ALLOC_RESULT(result, buffer_size, exponent_size + 1);

  if (mpfr_regular_p (x))
    {
      if (exponent_size == 0)
        result[0] = uexp;
      else
        {
          result[0] = MPFR_EXTERNAL_EXPONENT + exponent_size;

          putLittleEndianData (result + 1, (unsigned char *) &uexp,
                               sizeof(mpfr_exp_t), exponent_size);
        }
    }
  else if (mpfr_zero_p (x))
    result[0] = MPFR_KIND_ZERO;
  else if (mpfr_inf_p (x))
    result[0] = MPFR_KIND_INF;
  else
    {
      MPFR_ASSERTD (mpfr_nan_p (x));
      result[0] = MPFR_KIND_NAN;
    }

  if (MPFR_IS_NEG (x))
    result[0] |= 0x80;

  return result;
}