Example #1
0
int
main (int argc, char *argv[])
{
  mpfr_exp_t emin;

  tests_start_mpfr ();

  /* mpfr_round_nearest_away requires emin is not the smallest possible */
  if ((emin = mpfr_get_emin ()) == mpfr_get_emin_min ())
    mpfr_set_emin (mpfr_get_emin_min () + 1);

  test_special ();

  test_nonspecial ();

  mpfr_set_emin (emin);

  tests_end_mpfr ();
  return 0;
}
Example #2
0
SEXP R_mpfr_get_erange(SEXP kind_) {
    erange_kind kind = asInteger(kind_);
/* MUST be sync'ed with  ../R/mpfr.R
 *                       ~~~~~~~~~~~ where  .Summary.codes <-

 */
    mpfr_exp_t r;
    switch(kind) {
    case E_min:    r = mpfr_get_emin();     break;
    case E_max:    r = mpfr_get_emax();     break;
    case min_emin: r = mpfr_get_emin_min(); break;
    case max_emin: r = mpfr_get_emin_max(); break;
    case min_emax: r = mpfr_get_emax_min(); break;
    case max_emax: r = mpfr_get_emax_max(); break;
    default:
	error("invalid kind (code = %d) in R_mpfr_get_erange()", kind);
    }
    R_mpfr_dbg_printf(1,"R_mpfr_get_erange(%d): %ld\n", kind, (long)r);
    return (kind <= E_max && INT_MIN <= r && r <= INT_MAX) ? ScalarInteger((int) r)
	: ScalarReal((double) r);
}
Example #3
0
int
mpc_log (mpc_ptr rop, mpc_srcptr op, mpc_rnd_t rnd){
   int ok, underflow = 0;
   mpfr_srcptr x, y;
   mpfr_t v, w;
   mpfr_prec_t prec;
   int loops;
   int re_cmp, im_cmp;
   int inex_re, inex_im;
   int err;
   mpfr_exp_t expw;
   int sgnw;

   /* special values: NaN and infinities */
   if (!mpc_fin_p (op)) {
      if (mpfr_nan_p (mpc_realref (op))) {
         if (mpfr_inf_p (mpc_imagref (op)))
            mpfr_set_inf (mpc_realref (rop), +1);
         else
            mpfr_set_nan (mpc_realref (rop));
         mpfr_set_nan (mpc_imagref (rop));
         inex_im = 0; /* Inf/NaN is exact */
      }
      else if (mpfr_nan_p (mpc_imagref (op))) {
         if (mpfr_inf_p (mpc_realref (op)))
            mpfr_set_inf (mpc_realref (rop), +1);
         else
            mpfr_set_nan (mpc_realref (rop));
         mpfr_set_nan (mpc_imagref (rop));
         inex_im = 0; /* Inf/NaN is exact */
      }
      else /* We have an infinity in at least one part. */ {
         inex_im = mpfr_atan2 (mpc_imagref (rop), mpc_imagref (op), mpc_realref (op),
                               MPC_RND_IM (rnd));
         mpfr_set_inf (mpc_realref (rop), +1);
      }
      return MPC_INEX(0, inex_im);
   }

   /* special cases: real and purely imaginary numbers */
   re_cmp = mpfr_cmp_ui (mpc_realref (op), 0);
   im_cmp = mpfr_cmp_ui (mpc_imagref (op), 0);
   if (im_cmp == 0) {
      if (re_cmp == 0) {
         inex_im = mpfr_atan2 (mpc_imagref (rop), mpc_imagref (op), mpc_realref (op),
                               MPC_RND_IM (rnd));
         mpfr_set_inf (mpc_realref (rop), -1);
         inex_re = 0; /* -Inf is exact */
      }
      else if (re_cmp > 0) {
         inex_re = mpfr_log (mpc_realref (rop), mpc_realref (op), MPC_RND_RE (rnd));
         inex_im = mpfr_set (mpc_imagref (rop), mpc_imagref (op), MPC_RND_IM (rnd));
      }
      else {
         /* op = x + 0*y; let w = -x = |x| */
         int negative_zero;
         mpfr_rnd_t rnd_im;

         negative_zero = mpfr_signbit (mpc_imagref (op));
         if (negative_zero)
            rnd_im = INV_RND (MPC_RND_IM (rnd));
         else
            rnd_im = MPC_RND_IM (rnd);
         w [0] = *mpc_realref (op);
         MPFR_CHANGE_SIGN (w);
         inex_re = mpfr_log (mpc_realref (rop), w, MPC_RND_RE (rnd));
         inex_im = mpfr_const_pi (mpc_imagref (rop), rnd_im);
         if (negative_zero) {
            mpc_conj (rop, rop, MPC_RNDNN);
            inex_im = -inex_im;
         }
      }
      return MPC_INEX(inex_re, inex_im);
   }
   else if (re_cmp == 0) {
      if (im_cmp > 0) {
         inex_re = mpfr_log (mpc_realref (rop), mpc_imagref (op), MPC_RND_RE (rnd));
         inex_im = mpfr_const_pi (mpc_imagref (rop), MPC_RND_IM (rnd));
         /* division by 2 does not change the ternary flag */
         mpfr_div_2ui (mpc_imagref (rop), mpc_imagref (rop), 1, GMP_RNDN);
      }
      else {
         w [0] = *mpc_imagref (op);
         MPFR_CHANGE_SIGN (w);
         inex_re = mpfr_log (mpc_realref (rop), w, MPC_RND_RE (rnd));
         inex_im = mpfr_const_pi (mpc_imagref (rop), INV_RND (MPC_RND_IM (rnd)));
         /* division by 2 does not change the ternary flag */
         mpfr_div_2ui (mpc_imagref (rop), mpc_imagref (rop), 1, GMP_RNDN);
         mpfr_neg (mpc_imagref (rop), mpc_imagref (rop), GMP_RNDN);
         inex_im = -inex_im; /* negate the ternary flag */
      }
      return MPC_INEX(inex_re, inex_im);
   }

   prec = MPC_PREC_RE(rop);
   mpfr_init2 (w, 2);
   /* let op = x + iy; log = 1/2 log (x^2 + y^2) + i atan2 (y, x)   */
   /* loop for the real part: 1/2 log (x^2 + y^2), fast, but unsafe */
   /* implementation                                                */
   ok = 0;
   for (loops = 1; !ok && loops <= 2; loops++) {
      prec += mpc_ceil_log2 (prec) + 4;
      mpfr_set_prec (w, prec);

      mpc_abs (w, op, GMP_RNDN);
         /* error 0.5 ulp */
      if (mpfr_inf_p (w))
         /* intermediate overflow; the logarithm may be representable.
            Intermediate underflow is impossible.                      */
         break;

      mpfr_log (w, w, GMP_RNDN);
         /* generic error of log: (2^(- exp(w)) + 0.5) ulp */

      if (mpfr_zero_p (w))
         /* impossible to round, switch to second algorithm */
         break;

      err = MPC_MAX (-mpfr_get_exp (w), 0) + 1;
         /* number of lost digits */
      ok = mpfr_can_round (w, prec - err, GMP_RNDN, GMP_RNDZ,
         mpfr_get_prec (mpc_realref (rop)) + (MPC_RND_RE (rnd) == GMP_RNDN));
   }

   if (!ok) {
      prec = MPC_PREC_RE(rop);
      mpfr_init2 (v, 2);
      /* compute 1/2 log (x^2 + y^2) = log |x| + 1/2 * log (1 + (y/x)^2)
            if |x| >= |y|; otherwise, exchange x and y                   */
      if (mpfr_cmpabs (mpc_realref (op), mpc_imagref (op)) >= 0) {
         x = mpc_realref (op);
         y = mpc_imagref (op);
      }
      else {
         x = mpc_imagref (op);
         y = mpc_realref (op);
      }

      do {
         prec += mpc_ceil_log2 (prec) + 4;
         mpfr_set_prec (v, prec);
         mpfr_set_prec (w, prec);

         mpfr_div (v, y, x, GMP_RNDD); /* error 1 ulp */
         mpfr_sqr (v, v, GMP_RNDD);
            /* generic error of multiplication:
               1 + 2*1*(2+1*2^(1-prec)) <= 5.0625 since prec >= 6 */
         mpfr_log1p (v, v, GMP_RNDD);
            /* error 1 + 4*5.0625 = 21.25 , see algorithms.tex */
         mpfr_div_2ui (v, v, 1, GMP_RNDD);
            /* If the result is 0, then there has been an underflow somewhere. */

         mpfr_abs (w, x, GMP_RNDN); /* exact */
         mpfr_log (w, w, GMP_RNDN); /* error 0.5 ulp */
         expw = mpfr_get_exp (w);
         sgnw = mpfr_signbit (w);

         mpfr_add (w, w, v, GMP_RNDN);
         if (!sgnw) /* v is positive, so no cancellation;
                       error 22.25 ulp; error counts lost bits */
            err = 5;
         else
            err =   MPC_MAX (5 + mpfr_get_exp (v),
                  /* 21.25 ulp (v) rewritten in ulp (result, now in w) */
                           -1 + expw             - mpfr_get_exp (w)
                  /* 0.5 ulp (previous w), rewritten in ulp (result) */
                  ) + 2;

         /* handle one special case: |x|=1, and (y/x)^2 underflows;
            then 1/2*log(x^2+y^2) \approx 1/2*y^2 also underflows.  */
         if (   (mpfr_cmp_si (x, -1) == 0 || mpfr_cmp_ui (x, 1) == 0)
             && mpfr_zero_p (w))
            underflow = 1;

      } while (!underflow &&
               !mpfr_can_round (w, prec - err, GMP_RNDN, GMP_RNDZ,
               mpfr_get_prec (mpc_realref (rop)) + (MPC_RND_RE (rnd) == GMP_RNDN)));
      mpfr_clear (v);
   }

   /* imaginary part */
   inex_im = mpfr_atan2 (mpc_imagref (rop), mpc_imagref (op), mpc_realref (op),
                         MPC_RND_IM (rnd));

   /* set the real part; cannot be done before if rop==op */
   if (underflow)
      /* create underflow in result */
      inex_re = mpfr_set_ui_2exp (mpc_realref (rop), 1,
                                  mpfr_get_emin_min () - 2, MPC_RND_RE (rnd));
   else
      inex_re = mpfr_set (mpc_realref (rop), w, MPC_RND_RE (rnd));
   mpfr_clear (w);
   return MPC_INEX(inex_re, inex_im);
}
Example #4
0
/* If x^y is exactly representable (with maybe a larger precision than z),
   round it in z and return the (mpc) inexact flag in [0, 10].

   If x^y is not exactly representable, return -1.

   If intermediate computations lead to numbers of more than maxprec bits,
   then abort and return -2 (in that case, to avoid loops, mpc_pow_exact
   should be called again with a larger value of maxprec).

   Assume one of Re(x) or Im(x) is non-zero, and y is non-zero (y is real).
*/
static int
mpc_pow_exact (mpc_ptr z, mpc_srcptr x, mpfr_srcptr y, mpc_rnd_t rnd,
               mp_prec_t maxprec)
{
  mp_exp_t ec, ed, ey, emin, emax;
  mpz_t my, a, b, c, d, u;
  unsigned long int t;
  int ret = -2;

  mpz_init (my);
  mpz_init (a);
  mpz_init (b);
  mpz_init (c);
  mpz_init (d);
  mpz_init (u);

  ey = mpfr_get_z_exp (my, y);
  /* normalize so that my is odd */
  t = mpz_scan1 (my, 0);
  ey += t;
  mpz_tdiv_q_2exp (my, my, t);

  if (mpfr_zero_p (MPC_RE(x)))
    {
      mpz_set_ui (c, 0);
      ec = 0;
    }
  else
    ec = mpfr_get_z_exp (c, MPC_RE(x));
  if (mpfr_zero_p (MPC_IM(x)))
    {
      mpz_set_ui (d, 0);
      ed = ec;
    }
  else
    {
      ed = mpfr_get_z_exp (d, MPC_IM(x));
      if (mpfr_zero_p (MPC_RE(x)))
        ec = ed;
    }
  /* x = c*2^ec + I * d*2^ed */
  /* equalize the exponents of x */
  if (ec < ed)
    {
      mpz_mul_2exp (d, d, ed - ec);
      if (mpz_sizeinbase (d, 2) > maxprec)
        goto end;
      ed = ec;
    }
  else if (ed < ec)
    {
      mpz_mul_2exp (c, c, ec - ed);
      if (mpz_sizeinbase (c, 2) > maxprec)
        goto end;
      ec = ed;
    }
  /* now ec=ed and x = (c + I * d) * 2^ec */

  /* divide by two if possible */
  if (mpz_cmp_ui (c, 0) == 0)
    {
      t = mpz_scan1 (d, 0);
      mpz_tdiv_q_2exp (d, d, t);
      ec += t;
    }
  else if (mpz_cmp_ui (d, 0) == 0)
    {
      t = mpz_scan1 (c, 0);
      mpz_tdiv_q_2exp (c, c, t);
      ec += t;
    }
  else /* neither c nor d is zero */
    {
      unsigned long v;
      t = mpz_scan1 (c, 0);
      v = mpz_scan1 (d, 0);
      if (v < t)
        t = v;
      mpz_tdiv_q_2exp (c, c, t);
      mpz_tdiv_q_2exp (d, d, t);
      ec += t;
    }

  /* now either one of c, d is odd */

  while (ey < 0)
    {
      /* check if x is a square */
      if (ec & 1)
        {
          mpz_mul_2exp (c, c, 1);
          mpz_mul_2exp (d, d, 1);
          ec --;
        }
      /* now ec is even */
      if (mpc_perfect_square_p (a, b, c, d) == 0)
        break;
      mpz_swap (a, c);
      mpz_swap (b, d);
      ec /= 2;
      ey ++;
    }

  if (ey < 0)
    {
      ret = -1; /* not representable */
      goto end;
    }

  /* Now ey >= 0, it thus suffices to check that x^my is representable.
     If my > 0, this is always true. If my < 0, we first try to invert
     (c+I*d)*2^ec.
  */
  if (mpz_cmp_ui (my, 0) < 0)
    {
      /* If my < 0, 1 / (c + I*d) = (c - I*d)/(c^2 + d^2), thus a sufficient
         condition is that c^2 + d^2 is a power of two, assuming |c| <> |d|.
         Assume a prime p <> 2 divides c^2 + d^2,
         then if p does not divide c or d, 1 / (c + I*d) cannot be exact.
         If p divides both c and d, then we can write c = p*c', d = p*d',
         and 1 / (c + I*d) = 1/p * 1/(c' + I*d'). This shows that if 1/(c+I*d)
         is exact, then 1/(c' + I*d') is exact too, and we are back to the
         previous case. In conclusion, a necessary and sufficient condition
         is that c^2 + d^2 is a power of two.
      */
      /* FIXME: we could first compute c^2+d^2 mod a limb for example */
      mpz_mul (a, c, c);
      mpz_addmul (a, d, d);
      t = mpz_scan1 (a, 0);
      if (mpz_sizeinbase (a, 2) != 1 + t) /* a is not a power of two */
        {
          ret = -1; /* not representable */
          goto end;
        }
      /* replace (c,d) by (c/(c^2+d^2), -d/(c^2+d^2)) */
      mpz_neg (d, d);
      ec = -ec - t;
      mpz_neg (my, my);
    }

  /* now ey >= 0 and my >= 0, and we want to compute
     [(c + I * d) * 2^ec] ^ (my * 2^ey).

     We first compute [(c + I * d) * 2^ec]^my, then square ey times. */
  t = mpz_sizeinbase (my, 2) - 1;
  mpz_set (a, c);
  mpz_set (b, d);
  ed = ec;
  /* invariant: (a + I*b) * 2^ed = ((c + I*d) * 2^ec)^trunc(my/2^t) */
  while (t-- > 0)
    {
      unsigned long v, w;
      /* square a + I*b */
      mpz_mul (u, a, b);
      mpz_mul (a, a, a);
      mpz_submul (a, b, b);
      mpz_mul_2exp (b, u, 1);
      ed *= 2;
      if (mpz_tstbit (my, t)) /* multiply by c + I*d */
        {
          mpz_mul (u, a, c);
          mpz_submul (u, b, d); /* ac-bd */
          mpz_mul (b, b, c);
          mpz_addmul (b, a, d); /* bc+ad */
          mpz_swap (a, u);
          ed += ec;
        }
      /* remove powers of two in (a,b) */
      if (mpz_cmp_ui (a, 0) == 0)
        {
          w = mpz_scan1 (b, 0);
          mpz_tdiv_q_2exp (b, b, w);
          ed += w;
        }
      else if (mpz_cmp_ui (b, 0) == 0)
        {
          w = mpz_scan1 (a, 0);
          mpz_tdiv_q_2exp (a, a, w);
          ed += w;
        }
      else
        {
          w = mpz_scan1 (a, 0);
          v = mpz_scan1 (b, 0);
          if (v < w)
            w = v;
          mpz_tdiv_q_2exp (a, a, w);
          mpz_tdiv_q_2exp (b, b, w);
          ed += w;
        }
      if (mpz_sizeinbase (a, 2) > maxprec || mpz_sizeinbase (b, 2) > maxprec)
        goto end;
    }
  /* now a+I*b = (c+I*d)^my */

  while (ey-- > 0)
    {
      unsigned long sa, sb;

      /* square a + I*b */
      mpz_mul (u, a, b);
      mpz_mul (a, a, a);
      mpz_submul (a, b, b);
      mpz_mul_2exp (b, u, 1);
      ed *= 2;

      /* divide by largest 2^n possible, to avoid many loops for e.g.,
         (2+2*I)^16777216 */
      sa = mpz_scan1 (a, 0);
      sb = mpz_scan1 (b, 0);
      sa = (sa <= sb) ? sa : sb;
      mpz_tdiv_q_2exp (a, a, sa);
      mpz_tdiv_q_2exp (b, b, sa);
      ed += sa;

      if (mpz_sizeinbase (a, 2) > maxprec || mpz_sizeinbase (b, 2) > maxprec)
        goto end;
    }

  /* save emin, emax */
  emin = mpfr_get_emin ();
  emax = mpfr_get_emax ();
  mpfr_set_emin (mpfr_get_emin_min ());
  mpfr_set_emax (mpfr_get_emax_max ());
  ret = mpfr_set_z (MPC_RE(z), a, MPC_RND_RE(rnd));
  ret = MPC_INEX(ret, mpfr_set_z (MPC_IM(z), b, MPC_RND_IM(rnd)));
  mpfr_mul_2si (MPC_RE(z), MPC_RE(z), ed, MPC_RND_RE(rnd));
  mpfr_mul_2si (MPC_IM(z), MPC_IM(z), ed, MPC_RND_IM(rnd));
  /* restore emin, emax */
  mpfr_set_emin (emin);
  mpfr_set_emax (emax);

 end:
  mpz_clear (my);
  mpz_clear (a);
  mpz_clear (b);
  mpz_clear (c);
  mpz_clear (d);
  mpz_clear (u);

  return ret;
}