static void reuse_cc_c (mpc_function* function, mpc_srcptr z, mpc_ptr got1, mpc_ptr got2, mpc_ptr exp1, mpc_ptr exp2) { known_signs_t ks = {1, 1}; function->pointer.CC_C (exp1, exp2, z, MPC_RNDNN, MPC_RNDNN); mpc_set (got1, z, MPC_RNDNN); /* exact */ function->pointer.CC_C (got1, got2, got1, MPC_RNDNN, MPC_RNDNN); if ( !same_mpc_value (got1, exp1, ks) || !same_mpc_value (got2, exp2, ks)) { printf ("Reuse error in first result of %s for\n", function->name); MPC_OUT (z); MPC_OUT (exp1); MPC_OUT (got1); MPC_OUT (exp2); MPC_OUT (got2); exit (1); } mpc_set (got2, z, MPC_RNDNN); /* exact */ function->pointer.CC_C (got1, got2, got2, MPC_RNDNN, MPC_RNDNN); if ( !same_mpc_value (got1, exp1, ks) || !same_mpc_value (got2, exp2, ks)) { printf ("Reuse error in second result of %s for\n", function->name); MPC_OUT (z); MPC_OUT (exp1); MPC_OUT (got1); MPC_OUT (exp2); MPC_OUT (got2); exit (1); } }
static void reuse_fc (mpc_function* function, mpc_ptr z, mpc_ptr x, mpfr_ptr expected) { mpc_set (x, z, MPC_RNDNN); /* exact */ function->pointer.FC (expected, z, MPFR_RNDN); function->pointer.FC (mpc_realref (x), x, MPFR_RNDN); if (!same_mpfr_value (mpc_realref (x), expected, 1)) { mpfr_t got; got[0] = mpc_realref(x)[0]; /* display sensible name */ printf ("Reuse error for %s(mpc_realref(z), z) for\n", function->name); MPC_OUT (z); MPFR_OUT (expected); MPFR_OUT (got); exit (1); } mpc_set (x, z, MPC_RNDNN); /* exact */ function->pointer.FC (mpc_imagref (x), x, MPFR_RNDN); if (!same_mpfr_value (mpc_imagref (x), expected, 1)) { mpfr_t got; got[0] = mpc_imagref(x)[0]; /* display sensible name */ printf ("Reuse error for %s(mpc_imagref(z), z) for \n", function->name); MPC_OUT (z); MPFR_OUT (expected); MPFR_OUT (got); exit (1); } }
static void check_different_precisions(void) { /* check reuse when real and imaginary part have different precisions. */ mpc_t z, expected, got; int res; mpc_init2(z, 128); mpc_init2(expected, 128); mpc_init2(got, 128); /* change precision of one part */ mpfr_set_prec (mpc_imagref (z), 32); mpfr_set_prec (mpc_imagref (expected), 32); mpfr_set_prec (mpc_imagref (got), 32); mpfr_set_str (mpc_realref (z), "0x100000000fp-32", 16, GMP_RNDN); mpfr_set_str (mpc_imagref (z), "-1", 2, GMP_RNDN); mpfr_set_str (mpc_realref (expected), "+1", 2, GMP_RNDN); mpfr_set_str (mpc_imagref (expected), "0x100000000fp-32", 16, GMP_RNDN); mpc_set (got, z, MPC_RNDNN); res = mpc_mul_i (got, got, +1, MPC_RNDNN); if (MPC_INEX_RE(res) != 0 || MPC_INEX_IM(res) >=0) { printf("Wrong inexact flag for mpc_mul_i(z, z, n)\n" " got (re=%2d, im=%2d)\nexpected (re= 0, im=-1)\n", MPC_INEX_RE(res), MPC_INEX_IM(res)); exit(1); } if (mpc_cmp(got, expected) != 0) { printf ("Error for mpc_mul_i(z, z, n) for\n"); MPC_OUT (z); printf ("n=+1\n"); MPC_OUT (expected); MPC_OUT (got); exit (1); } mpc_neg (expected, expected, MPC_RNDNN); mpc_set (got, z, MPC_RNDNN); mpc_mul_i (got, got, -1, MPC_RNDNN); if (mpc_cmp(got, expected) != 0) { printf ("Error for mpc_mul_i(z, z, n) for\n"); MPC_OUT (z); printf ("n=-1\n"); MPC_OUT (expected); MPC_OUT (got); exit (1); } mpc_clear (z); mpc_clear (expected); mpc_clear (got); }
static void tgeneric_cc_c (mpc_function *function, mpc_ptr op, mpc_ptr rop1, mpc_ptr rop2, mpc_ptr rop14, mpc_ptr rop24, mpc_ptr rop14rnd, mpc_ptr rop24rnd, mpc_rnd_t rnd1, mpc_rnd_t rnd2) { /* same as the previous function, but for mpc functions computing two results from one argument */ known_signs_t ks = {1, 1}; function->pointer.CC_C (rop14, rop24, op, rnd1, rnd2); function->pointer.CC_C (rop1, rop2, op, rnd1, rnd2); if ( MPFR_CAN_ROUND (mpc_realref (rop14), 1, MPC_PREC_RE (rop1), MPC_RND_RE (rnd1)) && MPFR_CAN_ROUND (mpc_imagref (rop14), 1, MPC_PREC_IM (rop1), MPC_RND_IM (rnd1)) && MPFR_CAN_ROUND (mpc_realref (rop24), 1, MPC_PREC_RE (rop2), MPC_RND_RE (rnd2)) && MPFR_CAN_ROUND (mpc_imagref (rop24), 1, MPC_PREC_IM (rop2), MPC_RND_IM (rnd2))) { mpc_set (rop14rnd, rop14, rnd1); mpc_set (rop24rnd, rop24, rnd2); } else return; if (!same_mpc_value (rop1, rop14rnd, ks)) { /* rounding failed for first result */ printf ("Rounding might be incorrect for the first result of %s at\n", function->name); MPC_OUT (op); printf ("with rounding mode (%s, %s)", mpfr_print_rnd_mode (MPC_RND_RE (rnd1)), mpfr_print_rnd_mode (MPC_RND_IM (rnd1))); printf ("\n%s gives ", function->name); MPC_OUT (rop1); printf ("%s quadruple precision gives ", function->name); MPC_OUT (rop14); printf ("and is rounded to "); MPC_OUT (rop14rnd); exit (1); } else if (!same_mpc_value (rop2, rop24rnd, ks)) { /* rounding failed for second result */ printf ("Rounding might be incorrect for the second result of %s at\n", function->name); MPC_OUT (op); printf ("with rounding mode (%s, %s)", mpfr_print_rnd_mode (MPC_RND_RE (rnd2)), mpfr_print_rnd_mode (MPC_RND_IM (rnd2))); printf ("\n%s gives ", function->name); MPC_OUT (rop2); printf ("%s quadruple precision gives ", function->name); MPC_OUT (rop24); printf ("and is rounded to "); MPC_OUT (rop24rnd); exit (1); } }
/** * @brief Get the roots computed as multiprecision complex numbers. * * @param roots A pointer to an array of mpc_t variables. if *roots == NULL, * MPSolve will take care of allocate and init those for you. You are in charge to free * and clear them when you don't need them anymore. * * @param radius A pointer to an array of rdpe_t where MPSolve should store the * inclusion radii. If *radius == NULL MPSolve will allocate those radii for you. * If radius == NULL no radii will be returned. */ int mps_context_get_roots_m (mps_context * s, mpc_t ** roots, rdpe_t ** radius) { int i; if (!*roots) { *roots = mpc_valloc (s->n); mpc_vinit2 (*roots, s->n, 0); } if (radius && !*radius) { *radius = rdpe_valloc (s->n); } { mpc_t * local_roots = *roots; rdpe_t * local_radius = radius ? *radius : NULL; for (i = 0; i < s->n; i++) { mpc_set_prec (local_roots[i], mpc_get_prec (s->root[i]->mvalue)); mpc_set (local_roots[i], s->root[i]->mvalue); if (radius) rdpe_set (local_radius[i], s->root[i]->drad); } } return 0; }
int mpc_atanh (mpc_ptr rop, mpc_srcptr op, mpc_rnd_t rnd) { /* atanh(op) = -i*atan(i*op) */ int inex; mpfr_t tmp; mpc_t z, a; MPC_RE (z)[0] = MPC_IM (op)[0]; MPC_IM (z)[0] = MPC_RE (op)[0]; MPFR_CHANGE_SIGN (MPC_RE (z)); /* Note reversal of precisions due to later multiplication by -i */ mpc_init3 (a, MPC_PREC_IM(rop), MPC_PREC_RE(rop)); inex = mpc_atan (a, z, RNDC (INV_RND (MPC_RND_IM (rnd)), MPC_RND_RE (rnd))); /* change a to -i*a, i.e., x+i*y to y-i*x */ tmp[0] = MPC_RE (a)[0]; MPC_RE (a)[0] = MPC_IM (a)[0]; MPC_IM (a)[0] = tmp[0]; MPFR_CHANGE_SIGN (MPC_IM (a)); mpc_set (rop, a, rnd); mpc_clear (a); return MPC_INEX (MPC_INEX_IM (inex), -MPC_INEX_RE (inex)); }
static void tgeneric_cci (mpc_function *function, mpc_ptr op1, int op2, mpc_ptr rop, mpc_ptr rop4, mpc_ptr rop4rnd, mpc_rnd_t rnd) { known_signs_t ks = {1, 1}; function->pointer.CCI (rop4, op1, op2, rnd); function->pointer.CCI (rop, op1, op2, rnd); if (MPFR_CAN_ROUND (mpc_realref (rop4), 1, MPC_PREC_RE (rop), MPC_RND_RE (rnd)) && MPFR_CAN_ROUND (mpc_imagref (rop4), 1, MPC_PREC_IM (rop), MPC_RND_IM (rnd))) mpc_set (rop4rnd, rop4, rnd); else return; if (same_mpc_value (rop, rop4rnd, ks)) return; printf ("Rounding in %s might be incorrect for\n", function->name); MPC_OUT (op1); printf ("op2=%d\n", op2); printf ("with rounding mode (%s, %s)", mpfr_print_rnd_mode (MPC_RND_RE (rnd)), mpfr_print_rnd_mode (MPC_RND_IM (rnd))); printf ("\n%s gives ", function->name); MPC_OUT (rop); printf ("%s quadruple precision gives ", function->name); MPC_OUT (rop4); printf ("and is rounded to "); MPC_OUT (rop4rnd); exit (1); }
int mpc_asinh (mpc_ptr rop, mpc_srcptr op, mpc_rnd_t rnd) { /* asinh(op) = -i*asin(i*op) */ int inex; mpc_t z, a; mpfr_t tmp; /* z = i*op */ MPC_RE (z)[0] = MPC_IM (op)[0]; MPC_IM (z)[0] = MPC_RE (op)[0]; MPFR_CHANGE_SIGN (MPC_RE (z)); /* Note reversal of precisions due to later multiplication by -i */ mpc_init3 (a, MPC_PREC_IM(rop), MPC_PREC_RE(rop)); inex = mpc_asin (a, z, RNDC (INV_RND (MPC_RND_IM (rnd)), MPC_RND_RE (rnd))); /* if a = asin(i*op) = x+i*y, and we want y-i*x */ /* change a to -i*a */ tmp[0] = MPC_RE (a)[0]; MPC_RE (a)[0] = MPC_IM (a)[0]; MPC_IM (a)[0] = tmp[0]; MPFR_CHANGE_SIGN (MPC_IM (a)); mpc_set (rop, a, MPC_RNDNN); /* exact */ mpc_clear (a); return MPC_INEX (MPC_INEX_IM (inex), -MPC_INEX_RE (inex)); }
/* for functions with one mpc_t output, two mpc_t inputs */ static void reuse_c_cc (mpc_function* function, mpc_srcptr z, mpc_srcptr x, mpc_ptr got, mpc_ptr expected) { known_signs_t ks = {1, 1}; mpc_set (got, z, MPC_RNDNN); /* exact */ function->pointer.C_CC (expected, z, x, MPC_RNDNN); function->pointer.C_CC (got, got, x, MPC_RNDNN); if (!same_mpc_value (got, expected, ks)) { printf ("Reuse error for %s(z, z, x) for\n", function->name); MPC_OUT (z); MPC_OUT (x); MPC_OUT (expected); MPC_OUT (got); exit (1); } mpc_set (got, x, MPC_RNDNN); /* exact */ function->pointer.C_CC (expected, z, x, MPC_RNDNN); function->pointer.C_CC (got, z, got, MPC_RNDNN); if (!same_mpc_value (got, expected, ks)) { printf ("Reuse error for %s(x, z, x) for\n", function->name); MPC_OUT (z); MPC_OUT (x); MPC_OUT (expected); MPC_OUT (got); exit (1); } mpc_set (got, x, MPC_RNDNN); /* exact */ function->pointer.C_CC (expected, x, x, MPC_RNDNN); function->pointer.C_CC (got, got, got, MPC_RNDNN); if (!same_mpc_value (got, expected, ks)) { printf ("Reuse error for %s(x, x, x) for\n", function->name); MPC_OUT (x); MPC_OUT (expected); MPC_OUT (got); exit (1); } }
int mpc_acosh (mpc_ptr rop, mpc_srcptr op, mpc_rnd_t rnd) { /* acosh(z) = NaN + i*NaN, if z=0+i*NaN -i*acos(z), if sign(Im(z)) = - i*acos(z), if sign(Im(z)) = + http://functions.wolfram.com/ElementaryFunctions/ArcCosh/27/02/03/01/01/ */ mpc_t a; mpfr_t tmp; int inex; if (mpfr_zero_p (MPC_RE (op)) && mpfr_nan_p (MPC_IM (op))) { mpfr_set_nan (MPC_RE (rop)); mpfr_set_nan (MPC_IM (rop)); return 0; } /* Note reversal of precisions due to later multiplication by i or -i */ mpc_init3 (a, MPC_PREC_IM(rop), MPC_PREC_RE(rop)); if (mpfr_signbit (MPC_IM (op))) { inex = mpc_acos (a, op, RNDC (INV_RND (MPC_RND_IM (rnd)), MPC_RND_RE (rnd))); /* change a to -i*a, i.e., -y+i*x to x+i*y */ tmp[0] = MPC_RE (a)[0]; MPC_RE (a)[0] = MPC_IM (a)[0]; MPC_IM (a)[0] = tmp[0]; MPFR_CHANGE_SIGN (MPC_IM (a)); inex = MPC_INEX (MPC_INEX_IM (inex), -MPC_INEX_RE (inex)); } else { inex = mpc_acos (a, op, RNDC (MPC_RND_IM (rnd), INV_RND(MPC_RND_RE (rnd)))); /* change a to i*a, i.e., y-i*x to x+i*y */ tmp[0] = MPC_RE (a)[0]; MPC_RE (a)[0] = MPC_IM (a)[0]; MPC_IM (a)[0] = tmp[0]; MPFR_CHANGE_SIGN (MPC_RE (a)); inex = MPC_INEX (-MPC_INEX_IM (inex), MPC_INEX_RE (inex)); } mpc_set (rop, a, rnd); mpc_clear (a); return inex; }
void add_diff(Sequence seq, size_t N) { Sequence hseq = seq + N / 2, end = seq + N; while (hseq < end) { mpc_set(temp, seq, RND); //temp = *seq mpc_add(seq, seq, hseq, RND); //*seq += *hseq mpc_sub(hseq, temp, hseq, RND); //*hseq = temp - *hseq seq++, hseq++; } }
static MPC_Object * GMPy_MPC_From_MPC(MPC_Object *obj, mpfr_prec_t rprec, mpfr_prec_t iprec, CTXT_Object *context) { MPC_Object *result = NULL; assert(MPC_Check(obj)); /* Optimize the critical case when prec==1 or obj is NaN or Inf. */ if ((rprec == 1 && iprec == 1) || (!mpfr_number_p(mpc_realref(obj->c)) && !mpfr_number_p(mpc_imagref(obj->c)))) { Py_INCREF((PyObject*)obj); return obj; } CHECK_CONTEXT(context); if (rprec == 0) rprec = GET_REAL_PREC(context); else if (rprec == 1) rprec = mpfr_get_prec(mpc_realref(obj->c)); if (iprec == 0) iprec = GET_IMAG_PREC(context); else if (iprec == 1) iprec = mpfr_get_prec(mpc_imagref(obj->c)); /* Try to identify when an additional reference to existing instance can * be returned. It is possible when (1) the precision matches, (2) the * exponent is valid and not in the range that might require subnormal- * ization, and (3) subnormalize is not enabled. */ if ((rprec == mpfr_get_prec(mpc_realref(obj->c))) && (iprec == mpfr_get_prec(mpc_imagref(obj->c))) && (!context->ctx.subnormalize) && (mpc_realref(obj->c)->_mpfr_exp >= (context->ctx.emin + mpfr_get_prec(mpc_realref(obj->c)) - 1)) && (mpc_realref(obj->c)->_mpfr_exp <= context->ctx.emax) && (mpc_imagref(obj->c)->_mpfr_exp >= (context->ctx.emin + mpfr_get_prec(mpc_imagref(obj->c)) - 1)) && (mpc_imagref(obj->c)->_mpfr_exp <= context->ctx.emax) ) { Py_INCREF((PyObject*)obj); return obj; } if ((result = GMPy_MPC_New(rprec, iprec, context))) { result->rc = mpc_set(result->c, obj->c, GET_MPC_ROUND(context)); _GMPy_MPC_Cleanup(&result, context); } return result; }
static void eval_fetch(oobject_t *p) { oregister_t *r; oobject_t v; r = &thread_main->r0; switch (r->t) { case t_void: *p = null; break; case t_word: oget_word(p); v = *p; *(oword_t *)v = r->v.w; break; case t_float: oget_float(p); v = *p; *(ofloat_t *)v = r->v.d; break; case t_rat: oget_rat(p); v = *p; *(orat_t )v = r->v.r; break; case t_mpz: oget_mpz(p); v = *p; mpz_set((ompz_t)v, ozr(r)); break; case t_mpq: oget_mpq(p); v = *p; mpq_set((ompq_t)v, oqr(r)); break; case t_mpr: oget_mpr(p); v = *p; mpfr_set((ompr_t)v, orr(r), thr_rnd); break; case t_cdd: oget_cdd(p); v = *p; *(ocdd_t *)v = r->v.dd; break; case t_cqq: oget_cqq(p); v = *p; cqq_set((ocqq_t)v, oqq(r)); break; default: assert(r->t == t_mpc); oget_mpc(p); v = *p; mpc_set((ompc_t)v, occ(r), thr_rndc); break; } }
// Computes q = exp(2 pi i tau). static void compute_q(mpc_t q, mpc_t tau) { mpc_t z0; mpf_t f0, f1; mpf_ptr fp0; pbc_mpui pwr; mpc_init(z0); mpf_init(f0); mpf_init(f1); //compute z0 = 2 pi i tau mpc_set(z0, tau); //first remove integral part of Re(tau) //since exp(2 pi i) = 1 //it seems |Re(tau)| < 1 anyway? fp0 = mpc_re(z0); mpf_trunc(f1, fp0); mpf_sub(fp0, fp0, f1); mpc_mul_mpf(z0, z0, pi); mpc_mul_ui(z0, z0, 2); mpc_muli(z0, z0); //compute q = exp(z0); //first write z0 = A + a + b i //where A is a (negative) integer //and a, b are in [-1, 1] //compute e^A separately fp0 = mpc_re(z0); pwr = mpf_get_ui(fp0); mpf_pow_ui(f0, recipeulere, pwr); mpf_add_ui(fp0, fp0, pwr); mpf_exp(f1, mpc_re(z0)); mpf_mul(f0, f1, f0); mpc_cis(q, mpc_im(z0)); /* old_mpc_exp(q, z0); */ mpc_mul_mpf(q, q, f0); mpc_clear(z0); mpf_clear(f0); mpf_clear(f1); }
static int mpc_div_imag (mpc_ptr rop, mpc_srcptr z, mpc_srcptr w, mpc_rnd_t rnd) /* Assumes z finite and w finite and non-zero, with real part of w a signed zero. */ { int inex_re, inex_im; int overlap = (rop == z) || (rop == w); int imag_z = mpfr_zero_p (mpc_realref (z)); mpfr_t wloc; mpc_t tmprop; mpc_ptr dest = (overlap) ? tmprop : rop; /* save signs of operands in case there are overlaps */ int zrs = MPFR_SIGNBIT (mpc_realref (z)); int zis = MPFR_SIGNBIT (mpc_imagref (z)); int wrs = MPFR_SIGNBIT (mpc_realref (w)); int wis = MPFR_SIGNBIT (mpc_imagref (w)); if (overlap) mpc_init3 (tmprop, MPC_PREC_RE (rop), MPC_PREC_IM (rop)); wloc[0] = mpc_imagref(w)[0]; /* copies mpfr struct IM(w) into wloc */ inex_re = mpfr_div (mpc_realref(dest), mpc_imagref(z), wloc, MPC_RND_RE(rnd)); mpfr_neg (wloc, wloc, MPFR_RNDN); /* changes the sign only in wloc, not in w; no need to correct later */ inex_im = mpfr_div (mpc_imagref(dest), mpc_realref(z), wloc, MPC_RND_IM(rnd)); if (overlap) { /* Note: we could use mpc_swap here, but this might cause problems if rop and tmprop have been allocated using different methods, since it will swap the significands of rop and tmprop. See http://lists.gforge.inria.fr/pipermail/mpc-discuss/2009-August/000504.html */ mpc_set (rop, tmprop, MPC_RNDNN); /* exact */ mpc_clear (tmprop); } /* correct signs of zeroes if necessary, which does not affect the inexact flags */ if (mpfr_zero_p (mpc_realref (rop))) mpfr_setsign (mpc_realref (rop), mpc_realref (rop), (zrs != wrs && zis != wis), MPFR_RNDN); /* exact */ if (imag_z) mpfr_setsign (mpc_imagref (rop), mpc_imagref (rop), (zis != wrs && zrs == wis), MPFR_RNDN); return MPC_INEX(inex_re, inex_im); }
/* functions with one input, one output */ static void tgeneric_cc (mpc_function *function, mpc_ptr op, mpc_ptr rop, mpc_ptr rop4, mpc_ptr rop4rnd, mpc_rnd_t rnd) { known_signs_t ks = {1, 1}; /* We compute the result with four times the precision and check whether the rounding is correct. Error reports in this part of the algorithm might still be wrong, though, since there are two consecutive roundings (but we try to avoid them). */ function->pointer.CC (rop4, op, rnd); function->pointer.CC (rop, op, rnd); /* can't use the mpfr_can_round function when argument is singular, use a custom macro instead. */ if (MPFR_CAN_ROUND (mpc_realref (rop4), 1, MPC_PREC_RE (rop), MPC_RND_RE (rnd)) && MPFR_CAN_ROUND (mpc_imagref (rop4), 1, MPC_PREC_IM (rop), MPC_RND_IM (rnd))) mpc_set (rop4rnd, rop4, rnd); else /* avoid double rounding error */ return; if (same_mpc_value (rop, rop4rnd, ks)) return; /* rounding failed */ printf ("Rounding in %s might be incorrect for\n", function->name); MPC_OUT (op); printf ("with rounding mode (%s, %s)", mpfr_print_rnd_mode (MPC_RND_RE (rnd)), mpfr_print_rnd_mode (MPC_RND_IM (rnd))); printf ("\n%s gives ", function->name); MPC_OUT (rop); printf ("%s quadruple precision gives ", function->name); MPC_OUT (rop4); printf ("and is rounded to "); MPC_OUT (rop4rnd); exit (1); }
static int mpc_sin_cos_imag (mpc_ptr rop_sin, mpc_ptr rop_cos, mpc_srcptr op, mpc_rnd_t rnd_sin, mpc_rnd_t rnd_cos) /* assumes that op is purely imaginary */ { int inex_sin_im = 0, inex_cos_re = 0; /* assume exact if not computed */ int overlap; mpc_t op_loc; overlap = (rop_sin == op || rop_cos == op); if (overlap) { mpc_init3 (op_loc, MPC_PREC_RE (op), MPC_PREC_IM (op)); mpc_set (op_loc, op, MPC_RNDNN); } else op_loc [0] = op [0]; if (rop_sin != NULL) { /* sin(+-O +i*y) = +-0 +i*sinh(y) */ mpfr_set (MPC_RE(rop_sin), MPC_RE(op_loc), GMP_RNDN); inex_sin_im = mpfr_sinh (MPC_IM(rop_sin), MPC_IM(op_loc), MPC_RND_IM(rnd_sin)); } if (rop_cos != NULL) { /* cos(-0 - i * y) = cos(+0 + i * y) = cosh(y) - i * 0, cos(-0 + i * y) = cos(+0 - i * y) = cosh(y) + i * 0, where y >= 0 */ if (mpfr_zero_p (MPC_IM (op_loc))) inex_cos_re = mpfr_set_ui (MPC_RE (rop_cos), 1ul, MPC_RND_RE (rnd_cos)); else inex_cos_re = mpfr_cosh (MPC_RE (rop_cos), MPC_IM (op_loc), MPC_RND_RE (rnd_cos)); mpfr_set_ui (MPC_IM (rop_cos), 0ul, MPC_RND_IM (rnd_cos)); if (mpfr_signbit (MPC_RE (op_loc)) == mpfr_signbit (MPC_IM (op_loc))) MPFR_CHANGE_SIGN (MPC_IM (rop_cos)); } if (overlap) mpc_clear (op_loc); return MPC_INEX12 (MPC_INEX (0, inex_sin_im), MPC_INEX (inex_cos_re, 0)); }
void ocopy(oobject_t *pointer, oobject_t value) { switch (otype(value)) { case t_word: oget_word(pointer); pointer = *pointer; *(oword_t *)pointer = *(oword_t *)value; break; case t_float: oget_float(pointer); pointer = *pointer; *(ofloat_t *)pointer = *(ofloat_t *)value; break; case t_rat: oget_rat(pointer); pointer = *pointer; *(orat_t)pointer = *(orat_t)value; break; case t_mpz: oget_mpz(pointer); pointer = *pointer; mpz_set((ompz_t)pointer, (ompz_t)value); break; case t_mpq: oget_mpq(pointer); pointer = *pointer; mpq_set((ompq_t)pointer, (ompq_t)value); break; case t_mpr: oget_mpr(pointer); pointer = *pointer; mpfr_set((ompr_t)pointer, (ompr_t)value, thr_rnd); break; case t_cdd: oget_cdd(pointer); pointer = *pointer; *(ocdd_t *)pointer = *(ocdd_t *)value; break; case t_cqq: oget_cqq(pointer); pointer = *pointer; cqq_set((ocqq_t)pointer, (ocqq_t)value); break; default: assert(otype(value) == t_mpc); oget_mpc(pointer); pointer = *pointer; mpc_set((ompc_t)pointer, (ompc_t)value, thr_rndc); break; } }
static void reuse_cuc (mpc_function* function, unsigned long ul, mpc_srcptr z, mpc_ptr got, mpc_ptr expected) { known_signs_t ks = {1, 1}; mpc_set (got, z, MPC_RNDNN); /* exact */ function->pointer.CUC (expected, ul, z,MPC_RNDNN); function->pointer.CUC (got, ul, got, MPC_RNDNN); if (!same_mpc_value (got, expected, ks)) { printf ("Reuse error for %s(z, n, z) for\n", function->name); printf ("n=%lu\n", ul); MPC_OUT (z); MPC_OUT (expected); MPC_OUT (got); exit (1); } }
static void reuse_cci (mpc_function* function, mpc_srcptr z, int i, mpc_ptr got, mpc_ptr expected) { known_signs_t ks = {1, 1}; mpc_set (got, z, MPC_RNDNN); /* exact */ function->pointer.CCI (expected, z, i, MPC_RNDNN); function->pointer.CCI (got, got, i, MPC_RNDNN); if (!same_mpc_value (got, expected, ks)) { printf ("Reuse error for %s(z, z, n) for\n", function->name); MPC_OUT (z); printf ("n=%d\n", i); MPC_OUT (expected); MPC_OUT (got); exit (1); } }
static void eval_setup(oregister_t *r, oobject_t v) { if (v == null) { r->t = t_void; r->v.o = null; } else { switch (r->t = otype(v)) { case t_word: r->v.w = *(oword_t *)v; break; case t_float: r->v.d = *(ofloat_t *)v; break; case t_rat: r->v.r = *(orat_t)v; break; case t_mpz: mpz_set(ozr(r), (ompz_t)v); break; case t_mpq: mpq_set(oqr(r), (ompq_t)v); break; case t_mpr: mpfr_set(orr(r), (ompr_t)v, thr_rnd); break; case t_cdd: r->v.dd = *(ocdd_t *)v; break; case t_cqq: cqq_set(oqq(r), (ocqq_t)v); break; default: assert(r->t == t_mpc); mpc_set(occ(r), (ompc_t)v, thr_rndc); break; } } }
void mps_maberth_s_wl (mps_context * s, int j, mps_cluster * cluster, mpc_t abcorr, pthread_mutex_t * aberth_mutexes) { int k; mps_root * root; cdpe_t z, temp; mpc_t diff, mroot; mpc_init2 (mroot, s->mpwp); mpc_init2 (diff, s->mpwp); pthread_mutex_lock (&aberth_mutexes[j]); mpc_set (mroot, s->root[j]->mvalue); pthread_mutex_unlock (&aberth_mutexes[j]); cdpe_set (temp, cdpe_zero); for (root = cluster->first; root != NULL; root = root->next) { k = root->k; if (k == j) continue; pthread_mutex_lock (&aberth_mutexes[k]); mpc_sub (diff, mroot, s->root[k]->mvalue); pthread_mutex_unlock (&aberth_mutexes[k]); mpc_get_cdpe (z, diff); if (cdpe_eq_zero(z)) continue; cdpe_inv_eq (z); cdpe_add_eq (temp, z); } mpc_set_cdpe (abcorr, temp); mpc_clear (mroot); mpc_clear (diff); }
/****************************************************** * SUBROUTINE MNEWTON_USR * ******************************************************* multiprecision computation ******************************************************/ void mnewton_usr (mpc_t x, rdpe_t rad, mpc_t corr, mps_boolean * again) { int i, m; rdpe_t ap, ax, eps, temp, apeps, atmp; cdpe_t ctmp; tmpc_t p, pp, pt, tmp; tmpc_init2 (p, mpwp); tmpc_init2 (pp, mpwp); tmpc_init2 (pt, mpwp); tmpc_init2 (tmp, mpwp); m = (int) (log (n + 1.0) / LOG2); if ((1 << m) <= n) m++; rdpe_set (eps, mp_epsilon); rdpe_mul_eq_d (eps, (double) 4 * n); mpc_get_cdpe (ctmp, x); cdpe_mod (ax, ctmp); mpc_set_ui (p, 1, 0); mpc_set_ui (pp, 0, 0); rdpe_set (ap, rdpe_one); for (i = 1; i <= m; i++) { mpc_sqr (tmp, p); mpc_mul (pt, x, tmp); mpc_add_eq_ui (pt, 1, 0); mpc_mul_eq (pp, x); mpc_mul_eq (pp, p); mpc_mul_eq_ui (pp, 2); mpc_add_eq (pp, tmp); mpc_set (p, pt); rdpe_mul_eq (ap, ax); mpc_get_cdpe (ctmp, p); cdpe_mod (atmp, ctmp); rdpe_add_eq (ap, atmp); } rdpe_mul_eq (ap, ax); mpc_div (corr, p, pp); mpc_get_cdpe (ctmp, p); cdpe_mod (temp, ctmp); rdpe_mul (apeps, ap, eps); rdpe_mul_eq_d (apeps, 3.0); *again = rdpe_gt (temp, apeps); rdpe_add (rad, temp, apeps); rdpe_mul_eq_d (rad, (double) n); mpc_get_cdpe (ctmp, pp); cdpe_mod (temp, ctmp); rdpe_div_eq (rad, temp); if (rdpe_eq (rad, rdpe_zero)) rdpe_mul (rad, ax, eps); tmpc_clear (tmp); tmpc_clear (pt); tmpc_clear (pp); tmpc_clear (p); }
mps_boolean mps_chebyshev_poly_meval (mps_context * ctx, mps_polynomial * poly, mpc_t x, mpc_t value, rdpe_t error) { long int wp = mpc_get_prec (x); /* Lower the working precision in case of limited precision coefficients * in the input polynomial. */ if (poly->prec > 0 && poly->prec < wp) wp = poly->prec; mps_chebyshev_poly * cpoly = MPS_CHEBYSHEV_POLY (poly); int i; mpc_t t0, t1, ctmp, ctmp2; rdpe_t ax, rtmp, rtmp2; mpc_rmod (ax, x); rdpe_set (error, rdpe_zero); /* Make sure that we have sufficient precision to perform the computation */ mps_polynomial_raise_data (ctx, poly, wp); mpc_init2 (t0, wp); mpc_init2 (t1, wp); mpc_init2 (ctmp, wp); mpc_init2 (ctmp2, wp); mpc_set (value, cpoly->mfpc[0]); mpc_set_ui (t0, 1U, 0U); if (poly->degree == 0) { return true; } mpc_set (t1, x); mpc_mul (ctmp, cpoly->mfpc[1], x); mpc_add_eq (value, ctmp); mpc_rmod (rtmp, ctmp); rdpe_add_eq (error, rtmp); for (i = 2; i <= poly->degree; i++) { mpc_mul (ctmp, x, t1); mpc_mul_eq_ui (ctmp, 2U); mpc_rmod (rtmp, ctmp); mpc_sub_eq (ctmp, t0); mpc_rmod (rtmp2, t0); rdpe_add_eq (rtmp, rtmp2); mpc_mul (ctmp2, ctmp, cpoly->mfpc[i]); mpc_add_eq (value, ctmp2); rdpe_mul_eq (rtmp, ax); rdpe_add_eq (error, rtmp); mpc_set (t0, t1); mpc_set (t1, ctmp); } mpc_clear (t0); mpc_clear (t1); mpc_clear (ctmp); mpc_clear (ctmp2); rdpe_set_2dl (rtmp, 2.0, -wp); rdpe_mul_eq (error, rtmp); return true; }
/* Put in z the value of x^y, rounded according to 'rnd'. Return the inexact flag in [0, 10]. */ int mpc_pow (mpc_ptr z, mpc_srcptr x, mpc_srcptr y, mpc_rnd_t rnd) { int ret = -2, loop, x_real, x_imag, y_real, z_real = 0, z_imag = 0; mpc_t t, u; mpfr_prec_t p, pr, pi, maxprec; int saved_underflow, saved_overflow; /* save the underflow or overflow flags from MPFR */ saved_underflow = mpfr_underflow_p (); saved_overflow = mpfr_overflow_p (); x_real = mpfr_zero_p (mpc_imagref(x)); y_real = mpfr_zero_p (mpc_imagref(y)); if (y_real && mpfr_zero_p (mpc_realref(y))) /* case y zero */ { if (x_real && mpfr_zero_p (mpc_realref(x))) { /* we define 0^0 to be (1, +0) since the real part is coherent with MPFR where 0^0 gives 1, and the sign of the imaginary part cannot be determined */ mpc_set_ui_ui (z, 1, 0, MPC_RNDNN); return 0; } else /* x^0 = 1 +/- i*0 even for x=NaN see algorithms.tex for the sign of zero */ { mpfr_t n; int inex, cx1; int sign_zi; /* cx1 < 0 if |x| < 1 cx1 = 0 if |x| = 1 cx1 > 0 if |x| > 1 */ mpfr_init (n); inex = mpc_norm (n, x, MPFR_RNDN); cx1 = mpfr_cmp_ui (n, 1); if (cx1 == 0 && inex != 0) cx1 = -inex; sign_zi = (cx1 < 0 && mpfr_signbit (mpc_imagref (y)) == 0) || (cx1 == 0 && mpfr_signbit (mpc_imagref (x)) != mpfr_signbit (mpc_realref (y))) || (cx1 > 0 && mpfr_signbit (mpc_imagref (y))); /* warning: mpc_set_ui_ui does not set Im(z) to -0 if Im(rnd)=RNDD */ ret = mpc_set_ui_ui (z, 1, 0, rnd); if (MPC_RND_IM (rnd) == MPFR_RNDD || sign_zi) mpc_conj (z, z, MPC_RNDNN); mpfr_clear (n); return ret; } } if (!mpc_fin_p (x) || !mpc_fin_p (y)) { /* special values: exp(y*log(x)) */ mpc_init2 (u, 2); mpc_log (u, x, MPC_RNDNN); mpc_mul (u, u, y, MPC_RNDNN); ret = mpc_exp (z, u, rnd); mpc_clear (u); goto end; } if (x_real) /* case x real */ { if (mpfr_zero_p (mpc_realref(x))) /* x is zero */ { /* special values: exp(y*log(x)) */ mpc_init2 (u, 2); mpc_log (u, x, MPC_RNDNN); mpc_mul (u, u, y, MPC_RNDNN); ret = mpc_exp (z, u, rnd); mpc_clear (u); goto end; } /* Special case 1^y = 1 */ if (mpfr_cmp_ui (mpc_realref(x), 1) == 0) { int s1, s2; s1 = mpfr_signbit (mpc_realref (y)); s2 = mpfr_signbit (mpc_imagref (x)); ret = mpc_set_ui (z, +1, rnd); /* the sign of the zero imaginary part is known in some cases (see algorithm.tex). In such cases we have (x +s*0i)^(y+/-0i) = x^y + s*sign(y)*0i where s = +/-1. We extend here this rule to fix the sign of the zero part. Note that the sign must also be set explicitly when rnd=RNDD because mpfr_set_ui(z_i, 0, rnd) always sets z_i to +0. */ if (MPC_RND_IM (rnd) == MPFR_RNDD || s1 != s2) mpc_conj (z, z, MPC_RNDNN); goto end; } /* x^y is real when: (a) x is real and y is integer (b) x is real non-negative and y is real */ if (y_real && (mpfr_integer_p (mpc_realref(y)) || mpfr_cmp_ui (mpc_realref(x), 0) >= 0)) { int s1, s2; s1 = mpfr_signbit (mpc_realref (y)); s2 = mpfr_signbit (mpc_imagref (x)); ret = mpfr_pow (mpc_realref(z), mpc_realref(x), mpc_realref(y), MPC_RND_RE(rnd)); ret = MPC_INEX(ret, mpfr_set_ui (mpc_imagref(z), 0, MPC_RND_IM(rnd))); /* the sign of the zero imaginary part is known in some cases (see algorithm.tex). In such cases we have (x +s*0i)^(y+/-0i) = x^y + s*sign(y)*0i where s = +/-1. We extend here this rule to fix the sign of the zero part. Note that the sign must also be set explicitly when rnd=RNDD because mpfr_set_ui(z_i, 0, rnd) always sets z_i to +0. */ if (MPC_RND_IM(rnd) == MPFR_RNDD || s1 != s2) mpfr_neg (mpc_imagref(z), mpc_imagref(z), MPC_RND_IM(rnd)); goto end; } /* (-1)^(n+I*t) is real for n integer and t real */ if (mpfr_cmp_si (mpc_realref(x), -1) == 0 && mpfr_integer_p (mpc_realref(y))) z_real = 1; /* for x real, x^y is imaginary when: (a) x is negative and y is half-an-integer (b) x = -1 and Re(y) is half-an-integer */ if ((mpfr_cmp_ui (mpc_realref(x), 0) < 0) && is_odd (mpc_realref(y), 1) && (y_real || mpfr_cmp_si (mpc_realref(x), -1) == 0)) z_imag = 1; } else /* x non real */ /* I^(t*I) and (-I)^(t*I) are real for t real, I^(n+t*I) and (-I)^(n+t*I) are real for n even and t real, and I^(n+t*I) and (-I)^(n+t*I) are imaginary for n odd and t real (s*I)^n is real for n even and imaginary for n odd */ if ((mpc_cmp_si_si (x, 0, 1) == 0 || mpc_cmp_si_si (x, 0, -1) == 0 || (mpfr_cmp_ui (mpc_realref(x), 0) == 0 && y_real)) && mpfr_integer_p (mpc_realref(y))) { /* x is I or -I, and Re(y) is an integer */ if (is_odd (mpc_realref(y), 0)) z_imag = 1; /* Re(y) odd: z is imaginary */ else z_real = 1; /* Re(y) even: z is real */ } else /* (t+/-t*I)^(2n) is imaginary for n odd and real for n even */ if (mpfr_cmpabs (mpc_realref(x), mpc_imagref(x)) == 0 && y_real && mpfr_integer_p (mpc_realref(y)) && is_odd (mpc_realref(y), 0) == 0) { if (is_odd (mpc_realref(y), -1)) /* y/2 is odd */ z_imag = 1; else z_real = 1; } pr = mpfr_get_prec (mpc_realref(z)); pi = mpfr_get_prec (mpc_imagref(z)); p = (pr > pi) ? pr : pi; p += 12; /* experimentally, seems to give less than 10% of failures in Ziv's strategy; probably wrong now since q is not computed */ if (p < 64) p = 64; mpc_init2 (u, p); mpc_init2 (t, p); pr += MPC_RND_RE(rnd) == MPFR_RNDN; pi += MPC_RND_IM(rnd) == MPFR_RNDN; maxprec = MPC_MAX_PREC (z); x_imag = mpfr_zero_p (mpc_realref(x)); for (loop = 0;; loop++) { int ret_exp; mpfr_exp_t dr, di; mpfr_prec_t q; mpc_log (t, x, MPC_RNDNN); mpc_mul (t, t, y, MPC_RNDNN); /* Compute q such that |Re (y log x)|, |Im (y log x)| < 2^q. We recompute it at each loop since we might get different bounds if the precision is not enough. */ q = mpfr_get_exp (mpc_realref(t)) > 0 ? mpfr_get_exp (mpc_realref(t)) : 0; if (mpfr_get_exp (mpc_imagref(t)) > (mpfr_exp_t) q) q = mpfr_get_exp (mpc_imagref(t)); mpfr_clear_overflow (); mpfr_clear_underflow (); ret_exp = mpc_exp (u, t, MPC_RNDNN); if (mpfr_underflow_p () || mpfr_overflow_p ()) { /* under- and overflow flags are set by mpc_exp */ mpc_set (z, u, MPC_RNDNN); ret = ret_exp; goto exact; } /* Since the error bound is global, we have to take into account the exponent difference between the real and imaginary parts. We assume either the real or the imaginary part of u is not zero. */ dr = mpfr_zero_p (mpc_realref(u)) ? mpfr_get_exp (mpc_imagref(u)) : mpfr_get_exp (mpc_realref(u)); di = mpfr_zero_p (mpc_imagref(u)) ? dr : mpfr_get_exp (mpc_imagref(u)); if (dr > di) { di = dr - di; dr = 0; } else { dr = di - dr; di = 0; } /* the term -3 takes into account the factor 4 in the complex error (see algorithms.tex) plus one due to the exponent difference: if z = a + I*b, where the relative error on z is at most 2^(-p), and EXP(a) = EXP(b) + k, the relative error on b is at most 2^(k-p) */ if ((z_imag || (p > q + 3 + dr && mpfr_can_round (mpc_realref(u), p - q - 3 - dr, MPFR_RNDN, MPFR_RNDZ, pr))) && (z_real || (p > q + 3 + di && mpfr_can_round (mpc_imagref(u), p - q - 3 - di, MPFR_RNDN, MPFR_RNDZ, pi)))) break; /* if Re(u) is not known to be zero, assume it is a normal number, i.e., neither zero, Inf or NaN, otherwise we might enter an infinite loop */ MPC_ASSERT (z_imag || mpfr_number_p (mpc_realref(u))); /* idem for Im(u) */ MPC_ASSERT (z_real || mpfr_number_p (mpc_imagref(u))); if (ret == -2) /* we did not yet call mpc_pow_exact, or it aborted because intermediate computations had > maxprec bits */ { /* check exact cases (see algorithms.tex) */ if (y_real) { maxprec *= 2; ret = mpc_pow_exact (z, x, mpc_realref(y), rnd, maxprec); if (ret != -1 && ret != -2) goto exact; } p += dr + di + 64; } else p += p / 2; mpc_set_prec (t, p); mpc_set_prec (u, p); } if (z_real) { /* When the result is real (see algorithm.tex for details), Im(x^y) = + sign(imag(y))*0i, if |x| > 1 + sign(imag(x))*sign(real(y))*0i, if |x| = 1 - sign(imag(y))*0i, if |x| < 1 */ mpfr_t n; int inex, cx1; int sign_zi, sign_rex, sign_imx; /* cx1 < 0 if |x| < 1 cx1 = 0 if |x| = 1 cx1 > 0 if |x| > 1 */ sign_rex = mpfr_signbit (mpc_realref (x)); sign_imx = mpfr_signbit (mpc_imagref (x)); mpfr_init (n); inex = mpc_norm (n, x, MPFR_RNDN); cx1 = mpfr_cmp_ui (n, 1); if (cx1 == 0 && inex != 0) cx1 = -inex; sign_zi = (cx1 < 0 && mpfr_signbit (mpc_imagref (y)) == 0) || (cx1 == 0 && sign_imx != mpfr_signbit (mpc_realref (y))) || (cx1 > 0 && mpfr_signbit (mpc_imagref (y))); /* copy RE(y) to n since if z==y we will destroy Re(y) below */ mpfr_set_prec (n, mpfr_get_prec (mpc_realref (y))); mpfr_set (n, mpc_realref (y), MPFR_RNDN); ret = mpfr_set (mpc_realref(z), mpc_realref(u), MPC_RND_RE(rnd)); if (y_real && (x_real || x_imag)) { /* FIXME: with y_real we assume Im(y) is really 0, which is the case for example when y comes from pow_fr, but in case Im(y) is +0 or -0, we might get different results */ mpfr_set_ui (mpc_imagref (z), 0, MPC_RND_IM (rnd)); fix_sign (z, sign_rex, sign_imx, n); ret = MPC_INEX(ret, 0); /* imaginary part is exact */ } else { ret = MPC_INEX (ret, mpfr_set_ui (mpc_imagref (z), 0, MPC_RND_IM (rnd))); /* warning: mpfr_set_ui does not set Im(z) to -0 if Im(rnd) = RNDD */ if (MPC_RND_IM (rnd) == MPFR_RNDD || sign_zi) mpc_conj (z, z, MPC_RNDNN); } mpfr_clear (n); } else if (z_imag) { ret = mpfr_set (mpc_imagref(z), mpc_imagref(u), MPC_RND_IM(rnd)); /* if z is imaginary and y real, then x cannot be real */ if (y_real && x_imag) { int sign_rex = mpfr_signbit (mpc_realref (x)); /* If z overlaps with y we set Re(z) before checking Re(y) below, but in that case y=0, which was dealt with above. */ mpfr_set_ui (mpc_realref (z), 0, MPC_RND_RE (rnd)); /* Note: fix_sign only does something when y is an integer, then necessarily y = 1 or 3 (mod 4), and in that case the sign of Im(x) is irrelevant. */ fix_sign (z, sign_rex, 0, mpc_realref (y)); ret = MPC_INEX(0, ret); } else ret = MPC_INEX(mpfr_set_ui (mpc_realref(z), 0, MPC_RND_RE(rnd)), ret); } else ret = mpc_set (z, u, rnd); exact: mpc_clear (t); mpc_clear (u); /* restore underflow and overflow flags from MPFR */ if (saved_underflow) mpfr_set_underflow (); if (saved_overflow) mpfr_set_overflow (); end: return ret; }
int mpc_sqrt (mpc_ptr a, mpc_srcptr b, mpc_rnd_t rnd) { int ok_w, ok_t = 0; mpfr_t w, t; mp_rnd_t rnd_w, rnd_t; mp_prec_t prec_w, prec_t; /* the rounding mode and the precision required for w and t, which can */ /* be either the real or the imaginary part of a */ mp_prec_t prec; int inex_w, inex_t = 1, inex, loops = 0; /* comparison of the real/imaginary part of b with 0 */ const int re_cmp = mpfr_cmp_ui (MPC_RE (b), 0); const int im_cmp = mpfr_cmp_ui (MPC_IM (b), 0); /* we need to know the sign of Im(b) when it is +/-0 */ const int im_sgn = mpfr_signbit (MPC_IM (b)) == 0? 0 : -1; /* special values */ /* sqrt(x +i*Inf) = +Inf +I*Inf, even if x = NaN */ /* sqrt(x -i*Inf) = +Inf -I*Inf, even if x = NaN */ if (mpfr_inf_p (MPC_IM (b))) { mpfr_set_inf (MPC_RE (a), +1); mpfr_set_inf (MPC_IM (a), im_sgn); return MPC_INEX (0, 0); } if (mpfr_inf_p (MPC_RE (b))) { if (mpfr_signbit (MPC_RE (b))) { if (mpfr_number_p (MPC_IM (b))) { /* sqrt(-Inf +i*y) = +0 +i*Inf, when y positive */ /* sqrt(-Inf +i*y) = +0 -i*Inf, when y positive */ mpfr_set_ui (MPC_RE (a), 0, GMP_RNDN); mpfr_set_inf (MPC_IM (a), im_sgn); return MPC_INEX (0, 0); } else { /* sqrt(-Inf +i*NaN) = NaN +/-i*Inf */ mpfr_set_nan (MPC_RE (a)); mpfr_set_inf (MPC_IM (a), im_sgn); return MPC_INEX (0, 0); } } else { if (mpfr_number_p (MPC_IM (b))) { /* sqrt(+Inf +i*y) = +Inf +i*0, when y positive */ /* sqrt(+Inf +i*y) = +Inf -i*0, when y positive */ mpfr_set_inf (MPC_RE (a), +1); mpfr_set_ui (MPC_IM (a), 0, GMP_RNDN); if (im_sgn) mpc_conj (a, a, MPC_RNDNN); return MPC_INEX (0, 0); } else { /* sqrt(+Inf -i*Inf) = +Inf -i*Inf */ /* sqrt(+Inf +i*Inf) = +Inf +i*Inf */ /* sqrt(+Inf +i*NaN) = +Inf +i*NaN */ return mpc_set (a, b, rnd); } } } /* sqrt(x +i*NaN) = NaN +i*NaN, if x is not infinite */ /* sqrt(NaN +i*y) = NaN +i*NaN, if y is not infinite */ if (mpfr_nan_p (MPC_RE (b)) || mpfr_nan_p (MPC_IM (b))) { mpfr_set_nan (MPC_RE (a)); mpfr_set_nan (MPC_IM (a)); return MPC_INEX (0, 0); } /* purely real */ if (im_cmp == 0) { if (re_cmp == 0) { mpc_set_ui_ui (a, 0, 0, MPC_RNDNN); if (im_sgn) mpc_conj (a, a, MPC_RNDNN); return MPC_INEX (0, 0); } else if (re_cmp > 0) { inex_w = mpfr_sqrt (MPC_RE (a), MPC_RE (b), MPC_RND_RE (rnd)); mpfr_set_ui (MPC_IM (a), 0, GMP_RNDN); if (im_sgn) mpc_conj (a, a, MPC_RNDNN); return MPC_INEX (inex_w, 0); } else { mpfr_init2 (w, MPFR_PREC (MPC_RE (b))); mpfr_neg (w, MPC_RE (b), GMP_RNDN); if (im_sgn) { inex_w = -mpfr_sqrt (MPC_IM (a), w, INV_RND (MPC_RND_IM (rnd))); mpfr_neg (MPC_IM (a), MPC_IM (a), GMP_RNDN); } else inex_w = mpfr_sqrt (MPC_IM (a), w, MPC_RND_IM (rnd)); mpfr_set_ui (MPC_RE (a), 0, GMP_RNDN); mpfr_clear (w); return MPC_INEX (0, inex_w); } } /* purely imaginary */ if (re_cmp == 0) { mpfr_t y; y[0] = MPC_IM (b)[0]; /* If y/2 underflows, so does sqrt(y/2) */ mpfr_div_2ui (y, y, 1, GMP_RNDN); if (im_cmp > 0) { inex_w = mpfr_sqrt (MPC_RE (a), y, MPC_RND_RE (rnd)); inex_t = mpfr_sqrt (MPC_IM (a), y, MPC_RND_IM (rnd)); } else { mpfr_neg (y, y, GMP_RNDN); inex_w = mpfr_sqrt (MPC_RE (a), y, MPC_RND_RE (rnd)); inex_t = -mpfr_sqrt (MPC_IM (a), y, INV_RND (MPC_RND_IM (rnd))); mpfr_neg (MPC_IM (a), MPC_IM (a), GMP_RNDN); } return MPC_INEX (inex_w, inex_t); } prec = MPC_MAX_PREC(a); mpfr_init (w); mpfr_init (t); if (re_cmp >= 0) { rnd_w = MPC_RND_RE (rnd); prec_w = MPFR_PREC (MPC_RE (a)); rnd_t = MPC_RND_IM(rnd); prec_t = MPFR_PREC (MPC_IM (a)); } else { rnd_w = MPC_RND_IM(rnd); prec_w = MPFR_PREC (MPC_IM (a)); rnd_t = MPC_RND_RE(rnd); prec_t = MPFR_PREC (MPC_RE (a)); if (im_cmp < 0) { rnd_w = INV_RND(rnd_w); rnd_t = INV_RND(rnd_t); } } do { loops ++; prec += (loops <= 2) ? mpc_ceil_log2 (prec) + 4 : prec / 2; mpfr_set_prec (w, prec); mpfr_set_prec (t, prec); /* let b = x + iy */ /* w = sqrt ((|x| + sqrt (x^2 + y^2)) / 2), rounded down */ /* total error bounded by 3 ulps */ inex_w = mpc_abs (w, b, GMP_RNDD); if (re_cmp < 0) inex_w |= mpfr_sub (w, w, MPC_RE (b), GMP_RNDD); else inex_w |= mpfr_add (w, w, MPC_RE (b), GMP_RNDD); inex_w |= mpfr_div_2ui (w, w, 1, GMP_RNDD); inex_w |= mpfr_sqrt (w, w, GMP_RNDD); ok_w = mpfr_can_round (w, (mp_exp_t) prec - 2, GMP_RNDD, GMP_RNDU, prec_w + (rnd_w == GMP_RNDN)); if (!inex_w || ok_w) { /* t = y / 2w, rounded away */ /* total error bounded by 7 ulps */ const mp_rnd_t r = im_sgn ? GMP_RNDD : GMP_RNDU; inex_t = mpfr_div (t, MPC_IM (b), w, r); inex_t |= mpfr_div_2ui (t, t, 1, r); ok_t = mpfr_can_round (t, (mp_exp_t) prec - 3, r, GMP_RNDZ, prec_t + (rnd_t == GMP_RNDN)); /* As for w; since t was rounded away, we check whether rounding to 0 is possible. */ } } while ((inex_w && !ok_w) || (inex_t && !ok_t)); if (re_cmp > 0) inex = MPC_INEX (mpfr_set (MPC_RE (a), w, MPC_RND_RE(rnd)), mpfr_set (MPC_IM (a), t, MPC_RND_IM(rnd))); else if (im_cmp > 0) inex = MPC_INEX (mpfr_set (MPC_RE(a), t, MPC_RND_RE(rnd)), mpfr_set (MPC_IM(a), w, MPC_RND_IM(rnd))); else inex = MPC_INEX (mpfr_neg (MPC_RE (a), t, MPC_RND_RE(rnd)), mpfr_neg (MPC_IM (a), w, MPC_RND_IM(rnd))); mpfr_clear (w); mpfr_clear (t); return inex; }
int mpc_div (mpc_ptr a, mpc_srcptr b, mpc_srcptr c, mpc_rnd_t rnd) { int ok_re = 0, ok_im = 0; mpc_t res, c_conj; mpfr_t q; mpfr_prec_t prec; int inex, inexact_prod, inexact_norm, inexact_re, inexact_im, loops = 0; int underflow_norm, overflow_norm, underflow_prod, overflow_prod; int underflow_re = 0, overflow_re = 0, underflow_im = 0, overflow_im = 0; mpfr_rnd_t rnd_re = MPC_RND_RE (rnd), rnd_im = MPC_RND_IM (rnd); int saved_underflow, saved_overflow; int tmpsgn; mpfr_exp_t e, emin, emax, emid; /* for scaling of exponents */ mpc_t b_scaled, c_scaled; mpfr_t b_re, b_im, c_re, c_im; /* According to the C standard G.3, there are three types of numbers: */ /* finite (both parts are usual real numbers; contains 0), infinite */ /* (at least one part is a real infinity) and all others; the latter */ /* are numbers containing a nan, but no infinity, and could reasonably */ /* be called nan. */ /* By G.5.1.4, infinite/finite=infinite; finite/infinite=0; */ /* all other divisions that are not finite/finite return nan+i*nan. */ /* Division by 0 could be handled by the following case of division by */ /* a real; we handle it separately instead. */ if (mpc_zero_p (c)) /* both Re(c) and Im(c) are zero */ return mpc_div_zero (a, b, c, rnd); else if (mpc_inf_p (b) && mpc_fin_p (c)) /* either Re(b) or Im(b) is infinite and both Re(c) and Im(c) are ordinary */ return mpc_div_inf_fin (a, b, c); else if (mpc_fin_p (b) && mpc_inf_p (c)) return mpc_div_fin_inf (a, b, c); else if (!mpc_fin_p (b) || !mpc_fin_p (c)) { mpc_set_nan (a); return MPC_INEX (0, 0); } else if (mpfr_zero_p(mpc_imagref(c))) return mpc_div_real (a, b, c, rnd); else if (mpfr_zero_p(mpc_realref(c))) return mpc_div_imag (a, b, c, rnd); prec = MPC_MAX_PREC(a); mpc_init2 (res, 2); mpfr_init (q); /* compute scaling of exponents: none of Re(c) and Im(c) can be zero, but one of Re(b) or Im(b) could be zero */ e = mpfr_get_exp (mpc_realref (c)); emin = emax = e; e = mpfr_get_exp (mpc_imagref (c)); if (e > emax) emax = e; else if (e < emin) emin = e; if (!mpfr_zero_p (mpc_realref (b))) { e = mpfr_get_exp (mpc_realref (b)); if (e > emax) emax = e; else if (e < emin) emin = e; } if (!mpfr_zero_p (mpc_imagref (b))) { e = mpfr_get_exp (mpc_imagref (b)); if (e > emax) emax = e; else if (e < emin) emin = e; } /* all input exponents are in [emin, emax] */ emid = emin / 2 + emax / 2; /* scale the inputs */ b_re[0] = mpc_realref (b)[0]; if (!mpfr_zero_p (mpc_realref (b))) MPFR_EXP(b_re) = MPFR_EXP(mpc_realref (b)) - emid; b_im[0] = mpc_imagref (b)[0]; if (!mpfr_zero_p (mpc_imagref (b))) MPFR_EXP(b_im) = MPFR_EXP(mpc_imagref (b)) - emid; c_re[0] = mpc_realref (c)[0]; MPFR_EXP(c_re) = MPFR_EXP(mpc_realref (c)) - emid; c_im[0] = mpc_imagref (c)[0]; MPFR_EXP(c_im) = MPFR_EXP(mpc_imagref (c)) - emid; /* create the scaled inputs without allocating new memory */ mpc_realref (b_scaled)[0] = b_re[0]; mpc_imagref (b_scaled)[0] = b_im[0]; mpc_realref (c_scaled)[0] = c_re[0]; mpc_imagref (c_scaled)[0] = c_im[0]; /* create the conjugate of c in c_conj without allocating new memory */ mpc_realref (c_conj)[0] = mpc_realref (c_scaled)[0]; mpc_imagref (c_conj)[0] = mpc_imagref (c_scaled)[0]; MPFR_CHANGE_SIGN (mpc_imagref (c_conj)); /* save the underflow or overflow flags from MPFR */ saved_underflow = mpfr_underflow_p (); saved_overflow = mpfr_overflow_p (); do { loops ++; prec += loops <= 2 ? mpc_ceil_log2 (prec) + 5 : prec / 2; mpc_set_prec (res, prec); mpfr_set_prec (q, prec); /* first compute norm(c_scaled) */ mpfr_clear_underflow (); mpfr_clear_overflow (); inexact_norm = mpc_norm (q, c_scaled, MPFR_RNDU); underflow_norm = mpfr_underflow_p (); overflow_norm = mpfr_overflow_p (); if (underflow_norm) mpfr_set_ui (q, 0ul, MPFR_RNDN); /* to obtain divisions by 0 later on */ /* now compute b_scaled*conjugate(c_scaled) */ mpfr_clear_underflow (); mpfr_clear_overflow (); inexact_prod = mpc_mul (res, b_scaled, c_conj, MPC_RNDZZ); inexact_re = MPC_INEX_RE (inexact_prod); inexact_im = MPC_INEX_IM (inexact_prod); underflow_prod = mpfr_underflow_p (); overflow_prod = mpfr_overflow_p (); /* unfortunately, does not distinguish between under-/overflow in real or imaginary parts hopefully, the side-effects of mpc_mul do indeed raise the mpfr exceptions */ if (overflow_prod) { /* FIXME: in case overflow_norm is also true, the code below is wrong, since the after division by the norm, we might end up with finite real and/or imaginary parts. A workaround would be to scale the inputs (in case the exponents are within the same range). */ int isinf = 0; /* determine if the real part of res is the maximum or the minimum representable number */ tmpsgn = mpfr_sgn (mpc_realref(res)); if (tmpsgn > 0) { mpfr_nextabove (mpc_realref(res)); isinf = mpfr_inf_p (mpc_realref(res)); mpfr_nextbelow (mpc_realref(res)); } else if (tmpsgn < 0) { mpfr_nextbelow (mpc_realref(res)); isinf = mpfr_inf_p (mpc_realref(res)); mpfr_nextabove (mpc_realref(res)); } if (isinf) { mpfr_set_inf (mpc_realref(res), tmpsgn); overflow_re = 1; } /* same for the imaginary part */ tmpsgn = mpfr_sgn (mpc_imagref(res)); isinf = 0; if (tmpsgn > 0) { mpfr_nextabove (mpc_imagref(res)); isinf = mpfr_inf_p (mpc_imagref(res)); mpfr_nextbelow (mpc_imagref(res)); } else if (tmpsgn < 0) { mpfr_nextbelow (mpc_imagref(res)); isinf = mpfr_inf_p (mpc_imagref(res)); mpfr_nextabove (mpc_imagref(res)); } if (isinf) { mpfr_set_inf (mpc_imagref(res), tmpsgn); overflow_im = 1; } mpc_set (a, res, rnd); goto end; } /* divide the product by the norm */ if (inexact_norm == 0 && (inexact_re == 0 || inexact_im == 0)) { /* The division has good chances to be exact in at least one part. */ /* Since this can cause problems when not rounding to the nearest, */ /* we use the division code of mpfr, which handles the situation. */ mpfr_clear_underflow (); mpfr_clear_overflow (); inexact_re |= mpfr_div (mpc_realref (res), mpc_realref (res), q, MPFR_RNDZ); underflow_re = mpfr_underflow_p (); overflow_re = mpfr_overflow_p (); ok_re = !inexact_re || underflow_re || overflow_re || mpfr_can_round (mpc_realref (res), prec - 4, MPFR_RNDN, MPFR_RNDZ, MPC_PREC_RE(a) + (rnd_re == MPFR_RNDN)); if (ok_re) /* compute imaginary part */ { mpfr_clear_underflow (); mpfr_clear_overflow (); inexact_im |= mpfr_div (mpc_imagref (res), mpc_imagref (res), q, MPFR_RNDZ); underflow_im = mpfr_underflow_p (); overflow_im = mpfr_overflow_p (); ok_im = !inexact_im || underflow_im || overflow_im || mpfr_can_round (mpc_imagref (res), prec - 4, MPFR_RNDN, MPFR_RNDZ, MPC_PREC_IM(a) + (rnd_im == MPFR_RNDN)); } } else { /* The division is inexact, so for efficiency reasons we invert q */ /* only once and multiply by the inverse. */ if (mpfr_ui_div (q, 1ul, q, MPFR_RNDZ) || inexact_norm) { /* if 1/q is inexact, the approximations of the real and imaginary part below will be inexact, unless RE(res) or IM(res) is zero */ inexact_re |= !mpfr_zero_p (mpc_realref (res)); inexact_im |= !mpfr_zero_p (mpc_imagref (res)); } mpfr_clear_underflow (); mpfr_clear_overflow (); inexact_re |= mpfr_mul (mpc_realref (res), mpc_realref (res), q, MPFR_RNDZ); underflow_re = mpfr_underflow_p (); overflow_re = mpfr_overflow_p (); ok_re = !inexact_re || underflow_re || overflow_re || mpfr_can_round (mpc_realref (res), prec - 4, MPFR_RNDN, MPFR_RNDZ, MPC_PREC_RE(a) + (rnd_re == MPFR_RNDN)); if (ok_re) /* compute imaginary part */ { mpfr_clear_underflow (); mpfr_clear_overflow (); inexact_im |= mpfr_mul (mpc_imagref (res), mpc_imagref (res), q, MPFR_RNDZ); underflow_im = mpfr_underflow_p (); overflow_im = mpfr_overflow_p (); ok_im = !inexact_im || underflow_im || overflow_im || mpfr_can_round (mpc_imagref (res), prec - 4, MPFR_RNDN, MPFR_RNDZ, MPC_PREC_IM(a) + (rnd_im == MPFR_RNDN)); } } } while ((!ok_re || !ok_im) && !underflow_norm && !overflow_norm && !underflow_prod && !overflow_prod); inex = mpc_set (a, res, rnd); inexact_re = MPC_INEX_RE (inex); inexact_im = MPC_INEX_IM (inex); end: /* fix values and inexact flags in case of overflow/underflow */ /* FIXME: heuristic, certainly does not cover all cases */ if (overflow_re || (underflow_norm && !underflow_prod)) { mpfr_set_inf (mpc_realref (a), mpfr_sgn (mpc_realref (res))); inexact_re = mpfr_sgn (mpc_realref (res)); } else if (underflow_re || (overflow_norm && !overflow_prod)) { inexact_re = mpfr_signbit (mpc_realref (res)) ? 1 : -1; mpfr_set_zero (mpc_realref (a), -inexact_re); } if (overflow_im || (underflow_norm && !underflow_prod)) { mpfr_set_inf (mpc_imagref (a), mpfr_sgn (mpc_imagref (res))); inexact_im = mpfr_sgn (mpc_imagref (res)); } else if (underflow_im || (overflow_norm && !overflow_prod)) { inexact_im = mpfr_signbit (mpc_imagref (res)) ? 1 : -1; mpfr_set_zero (mpc_imagref (a), -inexact_im); } mpc_clear (res); mpfr_clear (q); /* restore underflow and overflow flags from MPFR */ if (saved_underflow) mpfr_set_underflow (); if (saved_overflow) mpfr_set_overflow (); return MPC_INEX (inexact_re, inexact_im); }
/* Put in z the value of x^y, rounded according to 'rnd'. Return the inexact flag in [0, 10]. */ int mpc_pow (mpc_ptr z, mpc_srcptr x, mpc_srcptr y, mpc_rnd_t rnd) { int ret = -2, loop, x_real, y_real, z_real = 0, z_imag = 0; mpc_t t, u; mp_prec_t p, q, pr, pi, maxprec; long Q; x_real = mpfr_zero_p (MPC_IM(x)); y_real = mpfr_zero_p (MPC_IM(y)); if (y_real && mpfr_zero_p (MPC_RE(y))) /* case y zero */ { if (x_real && mpfr_zero_p (MPC_RE(x))) /* 0^0 = NaN +i*NaN */ { mpfr_set_nan (MPC_RE(z)); mpfr_set_nan (MPC_IM(z)); return 0; } else /* x^0 = 1 +/- i*0 even for x=NaN see algorithms.tex for the sign of zero */ { mpfr_t n; int inex, cx1; int sign_zi; /* cx1 < 0 if |x| < 1 cx1 = 0 if |x| = 1 cx1 > 0 if |x| > 1 */ mpfr_init (n); inex = mpc_norm (n, x, GMP_RNDN); cx1 = mpfr_cmp_ui (n, 1); if (cx1 == 0 && inex != 0) cx1 = -inex; sign_zi = (cx1 < 0 && mpfr_signbit (MPC_IM (y)) == 0) || (cx1 == 0 && mpfr_signbit (MPC_IM (x)) != mpfr_signbit (MPC_RE (y))) || (cx1 > 0 && mpfr_signbit (MPC_IM (y))); /* warning: mpc_set_ui_ui does not set Im(z) to -0 if Im(rnd)=RNDD */ ret = mpc_set_ui_ui (z, 1, 0, rnd); if (MPC_RND_IM (rnd) == GMP_RNDD || sign_zi) mpc_conj (z, z, MPC_RNDNN); mpfr_clear (n); return ret; } } if (mpfr_nan_p (MPC_RE(x)) || mpfr_nan_p (MPC_IM(x)) || mpfr_nan_p (MPC_RE(y)) || mpfr_nan_p (MPC_IM(y)) || mpfr_inf_p (MPC_RE(x)) || mpfr_inf_p (MPC_IM(x)) || mpfr_inf_p (MPC_RE(y)) || mpfr_inf_p (MPC_IM(y))) { /* special values: exp(y*log(x)) */ mpc_init2 (u, 2); mpc_log (u, x, MPC_RNDNN); mpc_mul (u, u, y, MPC_RNDNN); ret = mpc_exp (z, u, rnd); mpc_clear (u); goto end; } if (x_real) /* case x real */ { if (mpfr_zero_p (MPC_RE(x))) /* x is zero */ { /* special values: exp(y*log(x)) */ mpc_init2 (u, 2); mpc_log (u, x, MPC_RNDNN); mpc_mul (u, u, y, MPC_RNDNN); ret = mpc_exp (z, u, rnd); mpc_clear (u); goto end; } /* Special case 1^y = 1 */ if (mpfr_cmp_ui (MPC_RE(x), 1) == 0) { int s1, s2; s1 = mpfr_signbit (MPC_RE (y)); s2 = mpfr_signbit (MPC_IM (x)); ret = mpc_set_ui (z, +1, rnd); /* the sign of the zero imaginary part is known in some cases (see algorithm.tex). In such cases we have (x +s*0i)^(y+/-0i) = x^y + s*sign(y)*0i where s = +/-1. We extend here this rule to fix the sign of the zero part. Note that the sign must also be set explicitly when rnd=RNDD because mpfr_set_ui(z_i, 0, rnd) always sets z_i to +0. */ if (MPC_RND_IM (rnd) == GMP_RNDD || s1 != s2) mpc_conj (z, z, MPC_RNDNN); goto end; } /* x^y is real when: (a) x is real and y is integer (b) x is real non-negative and y is real */ if (y_real && (mpfr_integer_p (MPC_RE(y)) || mpfr_cmp_ui (MPC_RE(x), 0) >= 0)) { int s1, s2; s1 = mpfr_signbit (MPC_RE (y)); s2 = mpfr_signbit (MPC_IM (x)); ret = mpfr_pow (MPC_RE(z), MPC_RE(x), MPC_RE(y), MPC_RND_RE(rnd)); ret = MPC_INEX(ret, mpfr_set_ui (MPC_IM(z), 0, MPC_RND_IM(rnd))); /* the sign of the zero imaginary part is known in some cases (see algorithm.tex). In such cases we have (x +s*0i)^(y+/-0i) = x^y + s*sign(y)*0i where s = +/-1. We extend here this rule to fix the sign of the zero part. Note that the sign must also be set explicitly when rnd=RNDD because mpfr_set_ui(z_i, 0, rnd) always sets z_i to +0. */ if (MPC_RND_IM(rnd) == GMP_RNDD || s1 != s2) mpfr_neg (MPC_IM(z), MPC_IM(z), MPC_RND_IM(rnd)); goto end; } /* (-1)^(n+I*t) is real for n integer and t real */ if (mpfr_cmp_si (MPC_RE(x), -1) == 0 && mpfr_integer_p (MPC_RE(y))) z_real = 1; /* for x real, x^y is imaginary when: (a) x is negative and y is half-an-integer (b) x = -1 and Re(y) is half-an-integer */ if (mpfr_cmp_ui (MPC_RE(x), 0) < 0 && is_odd (MPC_RE(y), 1) && (y_real || mpfr_cmp_si (MPC_RE(x), -1) == 0)) z_imag = 1; } else /* x non real */ /* I^(t*I) and (-I)^(t*I) are real for t real, I^(n+t*I) and (-I)^(n+t*I) are real for n even and t real, and I^(n+t*I) and (-I)^(n+t*I) are imaginary for n odd and t real (s*I)^n is real for n even and imaginary for n odd */ if ((mpc_cmp_si_si (x, 0, 1) == 0 || mpc_cmp_si_si (x, 0, -1) == 0 || (mpfr_cmp_ui (MPC_RE(x), 0) == 0 && y_real)) && mpfr_integer_p (MPC_RE(y))) { /* x is I or -I, and Re(y) is an integer */ if (is_odd (MPC_RE(y), 0)) z_imag = 1; /* Re(y) odd: z is imaginary */ else z_real = 1; /* Re(y) even: z is real */ } else /* (t+/-t*I)^(2n) is imaginary for n odd and real for n even */ if (mpfr_cmpabs (MPC_RE(x), MPC_IM(x)) == 0 && y_real && mpfr_integer_p (MPC_RE(y)) && is_odd (MPC_RE(y), 0) == 0) { if (is_odd (MPC_RE(y), -1)) /* y/2 is odd */ z_imag = 1; else z_real = 1; } /* first bound |Re(y log(x))|, |Im(y log(x)| < 2^q */ mpc_init2 (t, 64); mpc_log (t, x, MPC_RNDNN); mpc_mul (t, t, y, MPC_RNDNN); /* the default maximum exponent for MPFR is emax=2^30-1, thus if t > log(2^emax) = emax*log(2), then exp(t) will overflow */ if (mpfr_cmp_ui_2exp (MPC_RE(t), 372130558, 1) > 0) goto overflow; /* the default minimum exponent for MPFR is emin=-2^30+1, thus the smallest representable value is 2^(emin-1), and if t < log(2^(emin-1)) = (emin-1)*log(2), then exp(t) will underflow */ if (mpfr_cmp_si_2exp (MPC_RE(t), -372130558, 1) < 0) goto underflow; q = mpfr_get_exp (MPC_RE(t)) > 0 ? mpfr_get_exp (MPC_RE(t)) : 0; if (mpfr_get_exp (MPC_IM(t)) > (mp_exp_t) q) q = mpfr_get_exp (MPC_IM(t)); pr = mpfr_get_prec (MPC_RE(z)); pi = mpfr_get_prec (MPC_IM(z)); p = (pr > pi) ? pr : pi; p += 11; /* experimentally, seems to give less than 10% of failures in Ziv's strategy */ mpc_init2 (u, p); pr += MPC_RND_RE(rnd) == GMP_RNDN; pi += MPC_RND_IM(rnd) == GMP_RNDN; maxprec = MPFR_PREC(MPC_RE(z)); if (MPFR_PREC(MPC_IM(z)) > maxprec) maxprec = MPFR_PREC(MPC_IM(z)); for (loop = 0;; loop++) { mp_exp_t dr, di; if (p + q > 64) /* otherwise we reuse the initial approximation t of y*log(x), avoiding two computations */ { mpc_set_prec (t, p + q); mpc_log (t, x, MPC_RNDNN); mpc_mul (t, t, y, MPC_RNDNN); } mpc_exp (u, t, MPC_RNDNN); /* Since the error bound is global, we have to take into account the exponent difference between the real and imaginary parts. We assume either the real or the imaginary part of u is not zero. */ dr = mpfr_zero_p (MPC_RE(u)) ? mpfr_get_exp (MPC_IM(u)) : mpfr_get_exp (MPC_RE(u)); di = mpfr_zero_p (MPC_IM(u)) ? dr : mpfr_get_exp (MPC_IM(u)); if (dr > di) { di = dr - di; dr = 0; } else { dr = di - dr; di = 0; } /* the term -3 takes into account the factor 4 in the complex error (see algorithms.tex) plus one due to the exponent difference: if z = a + I*b, where the relative error on z is at most 2^(-p), and EXP(a) = EXP(b) + k, the relative error on b is at most 2^(k-p) */ if ((z_imag || mpfr_can_round (MPC_RE(u), p - 3 - dr, GMP_RNDN, GMP_RNDZ, pr)) && (z_real || mpfr_can_round (MPC_IM(u), p - 3 - di, GMP_RNDN, GMP_RNDZ, pi))) break; /* if Re(u) is not known to be zero, assume it is a normal number, i.e., neither zero, Inf or NaN, otherwise we might enter an infinite loop */ MPC_ASSERT (z_imag || mpfr_number_p (MPC_RE(u))); /* idem for Im(u) */ MPC_ASSERT (z_real || mpfr_number_p (MPC_IM(u))); if (ret == -2) /* we did not yet call mpc_pow_exact, or it aborted because intermediate computations had > maxprec bits */ { /* check exact cases (see algorithms.tex) */ if (y_real) { maxprec *= 2; ret = mpc_pow_exact (z, x, MPC_RE(y), rnd, maxprec); if (ret != -1 && ret != -2) goto exact; } p += dr + di + 64; } else p += p / 2; mpc_set_prec (t, p + q); mpc_set_prec (u, p); } if (z_real) { /* When the result is real (see algorithm.tex for details), Im(x^y) = + sign(imag(y))*0i, if |x| > 1 + sign(imag(x))*sign(real(y))*0i, if |x| = 1 - sign(imag(y))*0i, if |x| < 1 */ mpfr_t n; int inex, cx1; int sign_zi; /* cx1 < 0 if |x| < 1 cx1 = 0 if |x| = 1 cx1 > 0 if |x| > 1 */ mpfr_init (n); inex = mpc_norm (n, x, GMP_RNDN); cx1 = mpfr_cmp_ui (n, 1); if (cx1 == 0 && inex != 0) cx1 = -inex; sign_zi = (cx1 < 0 && mpfr_signbit (MPC_IM (y)) == 0) || (cx1 == 0 && mpfr_signbit (MPC_IM (x)) != mpfr_signbit (MPC_RE (y))) || (cx1 > 0 && mpfr_signbit (MPC_IM (y))); ret = mpfr_set (MPC_RE(z), MPC_RE(u), MPC_RND_RE(rnd)); /* warning: mpfr_set_ui does not set Im(z) to -0 if Im(rnd) = RNDD */ ret = MPC_INEX (ret, mpfr_set_ui (MPC_IM (z), 0, MPC_RND_IM (rnd))); if (MPC_RND_IM (rnd) == GMP_RNDD || sign_zi) mpc_conj (z, z, MPC_RNDNN); mpfr_clear (n); } else if (z_imag) { ret = mpfr_set (MPC_IM(z), MPC_IM(u), MPC_RND_IM(rnd)); ret = MPC_INEX(mpfr_set_ui (MPC_RE(z), 0, MPC_RND_RE(rnd)), ret); } else ret = mpc_set (z, u, rnd); exact: mpc_clear (t); mpc_clear (u); end: return ret; underflow: /* If we have an underflow, we know that |z| is too small to be represented, but depending on arg(z), we should return +/-0 +/- I*0. We assume t is the approximation of y*log(x), thus we want exp(t) = exp(Re(t))+exp(I*Im(t)). FIXME: this part of code is not 100% rigorous, since we don't consider rounding errors. */ mpc_init2 (u, 64); mpfr_const_pi (MPC_RE(u), GMP_RNDN); mpfr_div_2exp (MPC_RE(u), MPC_RE(u), 1, GMP_RNDN); /* Pi/2 */ mpfr_remquo (MPC_RE(u), &Q, MPC_IM(t), MPC_RE(u), GMP_RNDN); if (mpfr_sgn (MPC_RE(u)) < 0) Q--; /* corresponds to positive remainder */ mpfr_set_ui (MPC_RE(z), 0, GMP_RNDN); mpfr_set_ui (MPC_IM(z), 0, GMP_RNDN); switch (Q & 3) { case 0: /* first quadrant: round to (+0 +0) */ ret = MPC_INEX(-1, -1); break; case 1: /* second quadrant: round to (-0 +0) */ mpfr_neg (MPC_RE(z), MPC_RE(z), GMP_RNDN); ret = MPC_INEX(1, -1); break; case 2: /* third quadrant: round to (-0 -0) */ mpfr_neg (MPC_RE(z), MPC_RE(z), GMP_RNDN); mpfr_neg (MPC_IM(z), MPC_IM(z), GMP_RNDN); ret = MPC_INEX(1, 1); break; case 3: /* fourth quadrant: round to (+0 -0) */ mpfr_neg (MPC_IM(z), MPC_IM(z), GMP_RNDN); ret = MPC_INEX(-1, 1); break; } goto clear_t_and_u; overflow: /* If we have an overflow, we know that |z| is too large to be represented, but depending on arg(z), we should return +/-Inf +/- I*Inf. We assume t is the approximation of y*log(x), thus we want exp(t) = exp(Re(t))+exp(I*Im(t)). FIXME: this part of code is not 100% rigorous, since we don't consider rounding errors. */ mpc_init2 (u, 64); mpfr_const_pi (MPC_RE(u), GMP_RNDN); mpfr_div_2exp (MPC_RE(u), MPC_RE(u), 1, GMP_RNDN); /* Pi/2 */ /* the quotient is rounded to the nearest integer in mpfr_remquo */ mpfr_remquo (MPC_RE(u), &Q, MPC_IM(t), MPC_RE(u), GMP_RNDN); if (mpfr_sgn (MPC_RE(u)) < 0) Q--; /* corresponds to positive remainder */ switch (Q & 3) { case 0: /* first quadrant */ mpfr_set_inf (MPC_RE(z), 1); mpfr_set_inf (MPC_IM(z), 1); ret = MPC_INEX(1, 1); break; case 1: /* second quadrant */ mpfr_set_inf (MPC_RE(z), -1); mpfr_set_inf (MPC_IM(z), 1); ret = MPC_INEX(-1, 1); break; case 2: /* third quadrant */ mpfr_set_inf (MPC_RE(z), -1); mpfr_set_inf (MPC_IM(z), -1); ret = MPC_INEX(-1, -1); break; case 3: /* fourth quadrant */ mpfr_set_inf (MPC_RE(z), 1); mpfr_set_inf (MPC_IM(z), -1); ret = MPC_INEX(1, -1); break; } clear_t_and_u: mpc_clear (t); mpc_clear (u); return ret; }
int mpc_asin (mpc_ptr rop, mpc_srcptr op, mpc_rnd_t rnd) { mpfr_prec_t p, p_re, p_im, incr_p = 0; mpfr_rnd_t rnd_re, rnd_im; mpc_t z1; int inex; /* special values */ if (mpfr_nan_p (mpc_realref (op)) || mpfr_nan_p (mpc_imagref (op))) { if (mpfr_inf_p (mpc_realref (op)) || mpfr_inf_p (mpc_imagref (op))) { mpfr_set_nan (mpc_realref (rop)); mpfr_set_inf (mpc_imagref (rop), mpfr_signbit (mpc_imagref (op)) ? -1 : +1); } else if (mpfr_zero_p (mpc_realref (op))) { mpfr_set (mpc_realref (rop), mpc_realref (op), GMP_RNDN); mpfr_set_nan (mpc_imagref (rop)); } else { mpfr_set_nan (mpc_realref (rop)); mpfr_set_nan (mpc_imagref (rop)); } return 0; } if (mpfr_inf_p (mpc_realref (op)) || mpfr_inf_p (mpc_imagref (op))) { int inex_re; if (mpfr_inf_p (mpc_realref (op))) { int inf_im = mpfr_inf_p (mpc_imagref (op)); inex_re = set_pi_over_2 (mpc_realref (rop), (mpfr_signbit (mpc_realref (op)) ? -1 : 1), MPC_RND_RE (rnd)); mpfr_set_inf (mpc_imagref (rop), (mpfr_signbit (mpc_imagref (op)) ? -1 : 1)); if (inf_im) mpfr_div_2ui (mpc_realref (rop), mpc_realref (rop), 1, GMP_RNDN); } else { mpfr_set_zero (mpc_realref (rop), (mpfr_signbit (mpc_realref (op)) ? -1 : 1)); inex_re = 0; mpfr_set_inf (mpc_imagref (rop), (mpfr_signbit (mpc_imagref (op)) ? -1 : 1)); } return MPC_INEX (inex_re, 0); } /* pure real argument */ if (mpfr_zero_p (mpc_imagref (op))) { int inex_re; int inex_im; int s_im; s_im = mpfr_signbit (mpc_imagref (op)); if (mpfr_cmp_ui (mpc_realref (op), 1) > 0) { if (s_im) inex_im = -mpfr_acosh (mpc_imagref (rop), mpc_realref (op), INV_RND (MPC_RND_IM (rnd))); else inex_im = mpfr_acosh (mpc_imagref (rop), mpc_realref (op), MPC_RND_IM (rnd)); inex_re = set_pi_over_2 (mpc_realref (rop), (mpfr_signbit (mpc_realref (op)) ? -1 : 1), MPC_RND_RE (rnd)); if (s_im) mpc_conj (rop, rop, MPC_RNDNN); } else if (mpfr_cmp_si (mpc_realref (op), -1) < 0) { mpfr_t minus_op_re; minus_op_re[0] = mpc_realref (op)[0]; MPFR_CHANGE_SIGN (minus_op_re); if (s_im) inex_im = -mpfr_acosh (mpc_imagref (rop), minus_op_re, INV_RND (MPC_RND_IM (rnd))); else inex_im = mpfr_acosh (mpc_imagref (rop), minus_op_re, MPC_RND_IM (rnd)); inex_re = set_pi_over_2 (mpc_realref (rop), (mpfr_signbit (mpc_realref (op)) ? -1 : 1), MPC_RND_RE (rnd)); if (s_im) mpc_conj (rop, rop, MPC_RNDNN); } else { inex_im = mpfr_set_ui (mpc_imagref (rop), 0, MPC_RND_IM (rnd)); if (s_im) mpfr_neg (mpc_imagref (rop), mpc_imagref (rop), GMP_RNDN); inex_re = mpfr_asin (mpc_realref (rop), mpc_realref (op), MPC_RND_RE (rnd)); } return MPC_INEX (inex_re, inex_im); } /* pure imaginary argument */ if (mpfr_zero_p (mpc_realref (op))) { int inex_im; int s; s = mpfr_signbit (mpc_realref (op)); mpfr_set_ui (mpc_realref (rop), 0, GMP_RNDN); if (s) mpfr_neg (mpc_realref (rop), mpc_realref (rop), GMP_RNDN); inex_im = mpfr_asinh (mpc_imagref (rop), mpc_imagref (op), MPC_RND_IM (rnd)); return MPC_INEX (0, inex_im); } /* regular complex: asin(z) = -i*log(i*z+sqrt(1-z^2)) */ p_re = mpfr_get_prec (mpc_realref(rop)); p_im = mpfr_get_prec (mpc_imagref(rop)); rnd_re = MPC_RND_RE(rnd); rnd_im = MPC_RND_IM(rnd); p = p_re >= p_im ? p_re : p_im; mpc_init2 (z1, p); while (1) { mpfr_exp_t ex, ey, err; p += mpc_ceil_log2 (p) + 3 + incr_p; /* incr_p is zero initially */ incr_p = p / 2; mpfr_set_prec (mpc_realref(z1), p); mpfr_set_prec (mpc_imagref(z1), p); /* z1 <- z^2 */ mpc_sqr (z1, op, MPC_RNDNN); /* err(x) <= 1/2 ulp(x), err(y) <= 1/2 ulp(y) */ /* z1 <- 1-z1 */ ex = mpfr_get_exp (mpc_realref(z1)); mpfr_ui_sub (mpc_realref(z1), 1, mpc_realref(z1), GMP_RNDN); mpfr_neg (mpc_imagref(z1), mpc_imagref(z1), GMP_RNDN); ex = ex - mpfr_get_exp (mpc_realref(z1)); ex = (ex <= 0) ? 0 : ex; /* err(x) <= 2^ex * ulp(x) */ ex = ex + mpfr_get_exp (mpc_realref(z1)) - p; /* err(x) <= 2^ex */ ey = mpfr_get_exp (mpc_imagref(z1)) - p - 1; /* err(y) <= 2^ey */ ex = (ex >= ey) ? ex : ey; /* err(x), err(y) <= 2^ex, i.e., the norm of the error is bounded by |h|<=2^(ex+1/2) */ /* z1 <- sqrt(z1): if z1 = z + h, then sqrt(z1) = sqrt(z) + h/2/sqrt(t) */ ey = mpfr_get_exp (mpc_realref(z1)) >= mpfr_get_exp (mpc_imagref(z1)) ? mpfr_get_exp (mpc_realref(z1)) : mpfr_get_exp (mpc_imagref(z1)); /* we have |z1| >= 2^(ey-1) thus 1/|z1| <= 2^(1-ey) */ mpc_sqrt (z1, z1, MPC_RNDNN); ex = (2 * ex + 1) - 2 - (ey - 1); /* |h^2/4/|t| <= 2^ex */ ex = (ex + 1) / 2; /* ceil(ex/2) */ /* express ex in terms of ulp(z1) */ ey = mpfr_get_exp (mpc_realref(z1)) <= mpfr_get_exp (mpc_imagref(z1)) ? mpfr_get_exp (mpc_realref(z1)) : mpfr_get_exp (mpc_imagref(z1)); ex = ex - ey + p; /* take into account the rounding error in the mpc_sqrt call */ err = (ex <= 0) ? 1 : ex + 1; /* err(x) <= 2^err * ulp(x), err(y) <= 2^err * ulp(y) */ /* z1 <- i*z + z1 */ ex = mpfr_get_exp (mpc_realref(z1)); ey = mpfr_get_exp (mpc_imagref(z1)); mpfr_sub (mpc_realref(z1), mpc_realref(z1), mpc_imagref(op), GMP_RNDN); mpfr_add (mpc_imagref(z1), mpc_imagref(z1), mpc_realref(op), GMP_RNDN); if (mpfr_cmp_ui (mpc_realref(z1), 0) == 0 || mpfr_cmp_ui (mpc_imagref(z1), 0) == 0) continue; ex -= mpfr_get_exp (mpc_realref(z1)); /* cancellation in x */ ey -= mpfr_get_exp (mpc_imagref(z1)); /* cancellation in y */ ex = (ex >= ey) ? ex : ey; /* maximum cancellation */ err += ex; err = (err <= 0) ? 1 : err + 1; /* rounding error in sub/add */ /* z1 <- log(z1): if z1 = z + h, then log(z1) = log(z) + h/t with |t| >= min(|z1|,|z|) */ ex = mpfr_get_exp (mpc_realref(z1)); ey = mpfr_get_exp (mpc_imagref(z1)); ex = (ex >= ey) ? ex : ey; err += ex - p; /* revert to absolute error <= 2^err */ mpc_log (z1, z1, GMP_RNDN); err -= ex - 1; /* 1/|t| <= 1/|z| <= 2^(1-ex) */ /* express err in terms of ulp(z1) */ ey = mpfr_get_exp (mpc_realref(z1)) <= mpfr_get_exp (mpc_imagref(z1)) ? mpfr_get_exp (mpc_realref(z1)) : mpfr_get_exp (mpc_imagref(z1)); err = err - ey + p; /* take into account the rounding error in the mpc_log call */ err = (err <= 0) ? 1 : err + 1; /* z1 <- -i*z1 */ mpfr_swap (mpc_realref(z1), mpc_imagref(z1)); mpfr_neg (mpc_imagref(z1), mpc_imagref(z1), GMP_RNDN); if (mpfr_can_round (mpc_realref(z1), p - err, GMP_RNDN, GMP_RNDZ, p_re + (rnd_re == GMP_RNDN)) && mpfr_can_round (mpc_imagref(z1), p - err, GMP_RNDN, GMP_RNDZ, p_im + (rnd_im == GMP_RNDN))) break; } inex = mpc_set (rop, z1, rnd); mpc_clear (z1); return inex; }
int mpc_tan (mpc_ptr rop, mpc_srcptr op, mpc_rnd_t rnd) { mpc_t x, y; mpfr_prec_t prec; mpfr_exp_t err; int ok = 0; int inex; /* special values */ if (!mpc_fin_p (op)) { if (mpfr_nan_p (mpc_realref (op))) { if (mpfr_inf_p (mpc_imagref (op))) /* tan(NaN -i*Inf) = +/-0 -i */ /* tan(NaN +i*Inf) = +/-0 +i */ { /* exact unless 1 is not in exponent range */ inex = mpc_set_si_si (rop, 0, (MPFR_SIGN (mpc_imagref (op)) < 0) ? -1 : +1, rnd); } else /* tan(NaN +i*y) = NaN +i*NaN, when y is finite */ /* tan(NaN +i*NaN) = NaN +i*NaN */ { mpfr_set_nan (mpc_realref (rop)); mpfr_set_nan (mpc_imagref (rop)); inex = MPC_INEX (0, 0); /* always exact */ } } else if (mpfr_nan_p (mpc_imagref (op))) { if (mpfr_cmp_ui (mpc_realref (op), 0) == 0) /* tan(-0 +i*NaN) = -0 +i*NaN */ /* tan(+0 +i*NaN) = +0 +i*NaN */ { mpc_set (rop, op, rnd); inex = MPC_INEX (0, 0); /* always exact */ } else /* tan(x +i*NaN) = NaN +i*NaN, when x != 0 */ { mpfr_set_nan (mpc_realref (rop)); mpfr_set_nan (mpc_imagref (rop)); inex = MPC_INEX (0, 0); /* always exact */ } } else if (mpfr_inf_p (mpc_realref (op))) { if (mpfr_inf_p (mpc_imagref (op))) /* tan(-Inf -i*Inf) = -/+0 -i */ /* tan(-Inf +i*Inf) = -/+0 +i */ /* tan(+Inf -i*Inf) = +/-0 -i */ /* tan(+Inf +i*Inf) = +/-0 +i */ { const int sign_re = mpfr_signbit (mpc_realref (op)); int inex_im; mpfr_set_ui (mpc_realref (rop), 0, MPC_RND_RE (rnd)); mpfr_setsign (mpc_realref (rop), mpc_realref (rop), sign_re, MPFR_RNDN); /* exact, unless 1 is not in exponent range */ inex_im = mpfr_set_si (mpc_imagref (rop), mpfr_signbit (mpc_imagref (op)) ? -1 : +1, MPC_RND_IM (rnd)); inex = MPC_INEX (0, inex_im); } else /* tan(-Inf +i*y) = tan(+Inf +i*y) = NaN +i*NaN, when y is finite */ { mpfr_set_nan (mpc_realref (rop)); mpfr_set_nan (mpc_imagref (rop)); inex = MPC_INEX (0, 0); /* always exact */ } } else /* tan(x -i*Inf) = +0*sin(x)*cos(x) -i, when x is finite */ /* tan(x +i*Inf) = +0*sin(x)*cos(x) +i, when x is finite */ { mpfr_t c; mpfr_t s; int inex_im; mpfr_init (c); mpfr_init (s); mpfr_sin_cos (s, c, mpc_realref (op), MPFR_RNDN); mpfr_set_ui (mpc_realref (rop), 0, MPC_RND_RE (rnd)); mpfr_setsign (mpc_realref (rop), mpc_realref (rop), mpfr_signbit (c) != mpfr_signbit (s), MPFR_RNDN); /* exact, unless 1 is not in exponent range */ inex_im = mpfr_set_si (mpc_imagref (rop), (mpfr_signbit (mpc_imagref (op)) ? -1 : +1), MPC_RND_IM (rnd)); inex = MPC_INEX (0, inex_im); mpfr_clear (s); mpfr_clear (c); } return inex; } if (mpfr_zero_p (mpc_realref (op))) /* tan(-0 -i*y) = -0 +i*tanh(y), when y is finite. */ /* tan(+0 +i*y) = +0 +i*tanh(y), when y is finite. */ { int inex_im; mpfr_set (mpc_realref (rop), mpc_realref (op), MPC_RND_RE (rnd)); inex_im = mpfr_tanh (mpc_imagref (rop), mpc_imagref (op), MPC_RND_IM (rnd)); return MPC_INEX (0, inex_im); } if (mpfr_zero_p (mpc_imagref (op))) /* tan(x -i*0) = tan(x) -i*0, when x is finite. */ /* tan(x +i*0) = tan(x) +i*0, when x is finite. */ { int inex_re; inex_re = mpfr_tan (mpc_realref (rop), mpc_realref (op), MPC_RND_RE (rnd)); mpfr_set (mpc_imagref (rop), mpc_imagref (op), MPC_RND_IM (rnd)); return MPC_INEX (inex_re, 0); } /* ordinary (non-zero) numbers */ /* tan(op) = sin(op) / cos(op). We use the following algorithm with rounding away from 0 for all operations, and working precision w: (1) x = A(sin(op)) (2) y = A(cos(op)) (3) z = A(x/y) the error on Im(z) is at most 81 ulp, the error on Re(z) is at most 7 ulp if k < 2, 8 ulp if k = 2, else 5+k ulp, where k = Exp(Re(x))+Exp(Re(y))-2min{Exp(Re(y)), Exp(Im(y))}-Exp(Re(x/y)) see proof in algorithms.tex. */ prec = MPC_MAX_PREC(rop); mpc_init2 (x, 2); mpc_init2 (y, 2); err = 7; do { mpfr_exp_t k, exr, eyr, eyi, ezr; ok = 0; /* FIXME: prevent addition overflow */ prec += mpc_ceil_log2 (prec) + err; mpc_set_prec (x, prec); mpc_set_prec (y, prec); /* rounding away from zero: except in the cases x=0 or y=0 (processed above), sin x and cos y are never exact, so rounding away from 0 is rounding towards 0 and adding one ulp to the absolute value */ mpc_sin_cos (x, y, op, MPC_RNDZZ, MPC_RNDZZ); MPFR_ADD_ONE_ULP (mpc_realref (x)); MPFR_ADD_ONE_ULP (mpc_imagref (x)); MPFR_ADD_ONE_ULP (mpc_realref (y)); MPFR_ADD_ONE_ULP (mpc_imagref (y)); MPC_ASSERT (mpfr_zero_p (mpc_realref (x)) == 0); if ( mpfr_inf_p (mpc_realref (x)) || mpfr_inf_p (mpc_imagref (x)) || mpfr_inf_p (mpc_realref (y)) || mpfr_inf_p (mpc_imagref (y))) { /* If the real or imaginary part of x is infinite, it means that Im(op) was large, in which case the result is sign(tan(Re(op)))*0 + sign(Im(op))*I, where sign(tan(Re(op))) = sign(Re(x))*sign(Re(y)). */ int inex_re, inex_im; mpfr_set_ui (mpc_realref (rop), 0, MPFR_RNDN); if (mpfr_sgn (mpc_realref (x)) * mpfr_sgn (mpc_realref (y)) < 0) { mpfr_neg (mpc_realref (rop), mpc_realref (rop), MPFR_RNDN); inex_re = 1; } else inex_re = -1; /* +0 is rounded down */ if (mpfr_sgn (mpc_imagref (op)) > 0) { mpfr_set_ui (mpc_imagref (rop), 1, MPFR_RNDN); inex_im = 1; } else { mpfr_set_si (mpc_imagref (rop), -1, MPFR_RNDN); inex_im = -1; } inex = MPC_INEX(inex_re, inex_im); goto end; } exr = mpfr_get_exp (mpc_realref (x)); eyr = mpfr_get_exp (mpc_realref (y)); eyi = mpfr_get_exp (mpc_imagref (y)); /* some parts of the quotient may be exact */ inex = mpc_div (x, x, y, MPC_RNDZZ); /* OP is no pure real nor pure imaginary, so in theory the real and imaginary parts of its tangent cannot be null. However due to rouding errors this might happen. Consider for example tan(1+14*I) = 1.26e-10 + 1.00*I. For small precision sin(op) and cos(op) differ only by a factor I, thus after mpc_div x = I and its real part is zero. */ if (mpfr_zero_p (mpc_realref (x)) || mpfr_zero_p (mpc_imagref (x))) { err = prec; /* double precision */ continue; } if (MPC_INEX_RE (inex)) MPFR_ADD_ONE_ULP (mpc_realref (x)); if (MPC_INEX_IM (inex)) MPFR_ADD_ONE_ULP (mpc_imagref (x)); MPC_ASSERT (mpfr_zero_p (mpc_realref (x)) == 0); ezr = mpfr_get_exp (mpc_realref (x)); /* FIXME: compute k = Exp(Re(x))+Exp(Re(y))-2min{Exp(Re(y)), Exp(Im(y))}-Exp(Re(x/y)) avoiding overflow */ k = exr - ezr + MPC_MAX(-eyr, eyr - 2 * eyi); err = k < 2 ? 7 : (k == 2 ? 8 : (5 + k)); /* Can the real part be rounded? */ ok = (!mpfr_number_p (mpc_realref (x))) || mpfr_can_round (mpc_realref(x), prec - err, MPFR_RNDN, MPFR_RNDZ, MPC_PREC_RE(rop) + (MPC_RND_RE(rnd) == MPFR_RNDN)); if (ok) { /* Can the imaginary part be rounded? */ ok = (!mpfr_number_p (mpc_imagref (x))) || mpfr_can_round (mpc_imagref(x), prec - 6, MPFR_RNDN, MPFR_RNDZ, MPC_PREC_IM(rop) + (MPC_RND_IM(rnd) == MPFR_RNDN)); } } while (ok == 0); inex = mpc_set (rop, x, rnd); end: mpc_clear (x); mpc_clear (y); return inex; }