Пример #1
0
void
fmpr_get_fmpq(fmpq_t y, const fmpr_t x)
{
    if (fmpr_is_zero(x))
    {
        fmpq_zero(y);
    }
    else if (fmpr_is_special(x) || COEFF_IS_MPZ(*fmpr_expref(x)))
    {
        printf("exception: fmpr_get_fmpq: cannot convert to rational\n");
        abort();
    }
    else
    {
        long exp = *fmpr_expref(x);

        fmpz_set_ui(fmpq_denref(y), 1UL);

        if (exp >= 0)
        {
            fmpz_mul_2exp(fmpq_numref(y), fmpr_manref(x), exp);
        }
        else
        {
            fmpz_set(fmpq_numref(y), fmpr_manref(x));
            fmpz_mul_2exp(fmpq_denref(y), fmpq_denref(y), -exp);
        }
    }
}
Пример #2
0
int
fmpr_get_mpfr(mpfr_t x, const fmpr_t y, mpfr_rnd_t rnd)
{
    int r;

    if (fmpr_is_special(y))
    {
        if (fmpr_is_zero(y)) mpfr_set_zero(x, 0);
        else if (fmpr_is_pos_inf(y)) mpfr_set_inf(x, 1);
        else if (fmpr_is_neg_inf(y)) mpfr_set_inf(x, -1);
        else mpfr_set_nan(x);
        r = 0;
    }
    else if (COEFF_IS_MPZ(*fmpr_expref(y)))
    {
        flint_printf("exception: exponent too large to convert to mpfr");
        abort();
    }
    else
    {
        if (!COEFF_IS_MPZ(*fmpr_manref(y)))
#if defined(__MINGW64__) 
            r = mpfr_set_sj_2exp(x, *fmpr_manref(y), *fmpr_expref(y), rnd);
#else
            r = mpfr_set_si_2exp(x, *fmpr_manref(y), *fmpr_expref(y), rnd);
#endif
        else
            r = mpfr_set_z_2exp(x, COEFF_TO_PTR(*fmpr_manref(y)), *fmpr_expref(y), rnd);

        if (!mpfr_regular_p(x))
        {
            flint_printf("exception: exponent too large to convert to mpfr");
            abort();
        }
    }
Пример #3
0
void
fmpr_randtest_not_zero(fmpr_t x, flint_rand_t state, slong bits, slong mag_bits)
{
    fmpz_randtest_not_zero(fmpr_manref(x), state, bits);
    fmpz_randtest(fmpr_expref(x), state, mag_bits);
    fmpz_sub_ui(fmpr_expref(x), fmpr_expref(x), fmpz_bits(fmpr_manref(x)));
    _fmpr_normalise(fmpr_manref(x), fmpr_expref(x), bits, FMPR_RND_DOWN);
}
Пример #4
0
slong
fmpr_sub(fmpr_t z, const fmpr_t x, const fmpr_t y, slong prec, fmpr_rnd_t rnd)
{
    slong shift, xn, yn;
    mp_limb_t xtmp, ytmp;
    mp_ptr xptr, yptr;
    fmpz xv, yv;
    const fmpz * xexp;
    const fmpz * yexp;
    int xsign, ysign;

    if (fmpr_is_special(x) || fmpr_is_special(y))
    {
        return _fmpr_sub_special(z, x, y, prec, rnd);
    }

    shift = _fmpz_sub_small(fmpr_expref(y), fmpr_expref(x));

    if (shift >= 0)
    {
        xexp = fmpr_expref(x);
        yexp = fmpr_expref(y);
        xv = *fmpr_manref(x);
        yv = *fmpr_manref(y);
    }
    else
    {
        xexp = fmpr_expref(y);
        yexp = fmpr_expref(x);
        xv = *fmpr_manref(y);
        yv = *fmpr_manref(x);
    }

    FMPZ_GET_MPN_READONLY(xsign, xn, xptr, xtmp, xv)
    FMPZ_GET_MPN_READONLY(ysign, yn, yptr, ytmp, yv)

    if (shift >= 0)
    {
        ysign = !ysign;
    }
    else
    {
        shift = -shift;
        xsign = !xsign;
    }

    if ((xn == 1) && (yn == 1) && (shift < FLINT_BITS))
        return _fmpr_add_1x1(z, xptr[0], xsign, xexp, yptr[0], ysign, yexp, shift, prec, rnd);
    else
        return _fmpr_add_mpn(z, xptr, xn, xsign, xexp, yptr, yn, ysign, yexp, shift, prec, rnd);
}
Пример #5
0
long
_fmpr_mul_mpn(fmpr_t z,
    mp_srcptr xman, mp_size_t xn, const fmpz_t xexp,
    mp_srcptr yman, mp_size_t yn, const fmpz_t yexp,
    int negative, long prec, fmpr_rnd_t rnd)
{
    long zn, alloc, ret, shift;
    mp_limb_t tmp_stack[MUL_STACK_ALLOC];
    mp_ptr tmp;

    zn = xn + yn;
    alloc = zn;

    MUL_TMP_ALLOC

    if (yn == 1)
    {
        mp_limb_t cy = mpn_mul_1(tmp, xman, xn, yman[0]);
        tmp[zn - 1] = cy;
        zn = zn - (cy == 0);
    }
    else
    {
        mpn_mul(tmp, xman, xn, yman, yn);
        zn = zn - (tmp[zn - 1] == 0);
    }

    ret = _fmpr_set_round_mpn(&shift, fmpr_manref(z), tmp, zn, negative, prec, rnd);
    fmpz_add2_fmpz_si_inline(fmpr_expref(z), xexp, yexp, shift);

    MUL_TMP_FREE

    return ret;
}
Пример #6
0
int
fmpr_cmpabs(const fmpr_t x, const fmpr_t y)
{
    int res, xsign, ysign;
    fmpr_t t;

    if (fmpr_equal(x, y))
        return 0;

    if (fmpr_is_special(x) || fmpr_is_special(y))
    {
        if (fmpr_is_nan(x) || fmpr_is_nan(y))
            return 0;
        if (fmpr_is_zero(x)) return -1;
        if (fmpr_is_zero(y)) return 1;
        if (fmpr_is_inf(x)) return fmpr_is_inf(y) ? 0 : 1;
        if (fmpr_is_inf(y)) return -1;
        return -1;
    }

    /* Reduces to integer comparison if bottom exponents are the same */
    if (fmpz_equal(fmpr_expref(x), fmpr_expref(y)))
    {
        res = fmpz_cmpabs(fmpr_manref(x), fmpr_manref(y));
        if (res != 0)
            res = (res < 0) ? -1 : 1;
    }
    else
    {
        /* TODO: compare position of top exponents to avoid subtraction */
        xsign = fmpr_sgn(x);
        ysign = fmpr_sgn(y);

        fmpr_init(t);
        if (xsign == ysign)
            fmpr_sub(t, x, y, 2, FMPR_RND_DOWN);
        else
            fmpr_add(t, x, y, 2, FMPR_RND_DOWN);
        res = fmpr_sgn(t) * xsign;
        fmpr_clear(t);
    }

    return res;
}
Пример #7
0
void
fmprb_add_error_2exp_fmpz(fmprb_t x, const fmpz_t err)
{
    fmpr_t t;
    fmpr_init(t);
    fmpz_one(fmpr_manref(t));
    fmpz_set(fmpr_expref(t), err);
    fmprb_add_error_fmpr(x, t);
    fmpr_clear(t);
}
Пример #8
0
Файл: rsqrt.c Проект: isuruf/arb
slong
fmpr_rsqrt(fmpr_t y, const fmpr_t x, slong prec, fmpr_rnd_t rnd)
{
    slong r;

    if (fmpr_is_special(x))
    {
        if (fmpr_is_zero(x))
            fmpr_pos_inf(y);
        else if (fmpr_is_pos_inf(x))
            fmpr_zero(y);
        else
            fmpr_nan(y);

        return FMPR_RESULT_EXACT;
    }

    if (fmpr_sgn(x) < 0)
    {
        fmpr_nan(y);
        return FMPR_RESULT_EXACT;
    }

    /* special case: 4^n */
    if (fmpz_is_one(fmpr_manref(x)) && fmpz_is_even(fmpr_expref(x)))
    {
        r = fmpr_set_round(y, x, prec, rnd);
        fmpz_tdiv_q_2exp(fmpr_expref(y), fmpr_expref(y), 1);
        fmpz_neg(fmpr_expref(y), fmpr_expref(y));
        return r;
    }

    {
        fmpr_t t;
        fmpz_t e;

        fmpr_init(t);
        fmpz_init(e);

        fmpz_neg(e, fmpr_expref(x));
        if (fmpz_is_odd(e))
            fmpz_add_ui(e, e, 1);
        fmpr_mul_2exp_fmpz(t, x, e);

        CALL_MPFR_FUNC(r, mpfr_rec_sqrt, y, t, prec, rnd);

        fmpz_tdiv_q_2exp(e, e, 1);
        fmpr_mul_2exp_fmpz(y, y, e);

        fmpr_clear(t);
        fmpz_clear(e);

        return r;
    }
}
Пример #9
0
int
fmpr_cmp(const fmpr_t x, const fmpr_t y)
{
    int res, xsign, ysign;
    fmpr_t t;

    if (fmpr_equal(x, y))
        return 0;

    if (fmpr_is_special(x) || fmpr_is_special(y))
    {
        if (fmpr_is_nan(x) || fmpr_is_nan(y))
            return 0;
        if (fmpr_is_zero(y)) return fmpr_sgn(x);
        if (fmpr_is_zero(x)) return -fmpr_sgn(y);
        if (fmpr_is_pos_inf(x)) return 1;
        if (fmpr_is_neg_inf(y)) return 1;
        return -1;
    }

    xsign = fmpr_sgn(x);
    ysign = fmpr_sgn(y);

    if (xsign != ysign)
        return (xsign < 0) ? -1 : 1;

    /* Reduces to integer comparison if bottom exponents are the same */
    if (fmpz_equal(fmpr_expref(x), fmpr_expref(y)))
        return fmpz_cmp(fmpr_manref(x), fmpr_manref(y)) < 0 ? -1 : 1;

    /* TODO: compare position of top exponents to avoid subtraction */

    fmpr_init(t);
    fmpr_sub(t, x, y, 2, FMPR_RND_DOWN);
    res = fmpr_sgn(t);
    fmpr_clear(t);

    return res;
}
Пример #10
0
/* computes x + y * 2^shift (optionally negated) */
slong
_fmpr_add_mpn(fmpr_t z,
        mp_srcptr xman, mp_size_t xn, int xsign, const fmpz_t xexp,
        mp_srcptr yman, mp_size_t yn, int ysign, const fmpz_t yexp,
        slong shift, slong prec, fmpr_rnd_t rnd)
{
    slong tn, zn, alloc, ret, shift_bits, shift_limbs;
    int negative;
    mp_limb_t tmp_stack[ADD_STACK_ALLOC];
    mp_limb_t cy;
    mp_ptr tmp, tmp2;

    shift_limbs = shift / FLINT_BITS;
    shift_bits = shift % FLINT_BITS;

    /* x does not overlap with y or the result -- outcome is
       equivalent to adding/subtracting a small number to/from y
       and rounding */
    if (shift > xn * FLINT_BITS &&
        prec != FMPR_PREC_EXACT &&
        xn * FLINT_BITS + prec - (FLINT_BITS * (yn - 1)) < shift)
    {
        zn = (prec + FLINT_BITS - 1) / FLINT_BITS;
        zn = FLINT_MAX(zn, yn) + 2;
        shift_limbs = zn - yn;
        alloc = zn;

        ADD_TMP_ALLOC

        flint_mpn_zero(tmp, shift_limbs);
        flint_mpn_copyi(tmp + shift_limbs, yman, yn);

        if (xsign == ysign)
        {
            tmp[0] = 1;
        }
        else
        {
            mpn_sub_1(tmp, tmp, zn, 1);
            while (tmp[zn-1] == 0)
                zn--;
        }

        ret = _fmpr_set_round_mpn(&shift, fmpr_manref(z), tmp, zn, ysign, prec, rnd);
        shift -= shift_limbs * FLINT_BITS;
        fmpz_add_si_inline(fmpr_expref(z), yexp, shift);

        ADD_TMP_FREE

        return ret;
    }
Пример #11
0
slong
fmpr_mul_fmpz(fmpr_t z, const fmpr_t x, const fmpz_t y, slong prec, fmpr_rnd_t rnd)
{
    fmpz xv, yv;
    fmpz yexp;

    if (fmpr_is_special(x) || fmpz_is_zero(y))
    {
        if (fmpr_is_zero(x))
        {
            fmpr_zero(z);
        }
        else if (fmpz_is_zero(y) && fmpr_is_finite(x))
        {
            fmpr_zero(z);
        }
        else if (fmpr_is_inf(x) && !fmpz_is_zero(y))
        {
            if (fmpr_sgn(x) == fmpz_sgn(y))
                fmpr_pos_inf(z);
            else
                fmpr_neg_inf(z);
        }
        else
        {
            fmpr_nan(z);
        }

        return FMPR_RESULT_EXACT;
    }

    xv = *fmpr_manref(x);
    yv = *y;

    if (!COEFF_IS_MPZ(xv) && !COEFF_IS_MPZ(yv))
    {
        mp_limb_t ytmp;
        unsigned int bc;
        ytmp = FLINT_ABS(yv);
        count_trailing_zeros(bc, ytmp);
        ytmp >>= bc;
        yexp = bc;

        return _fmpr_mul_1x1(z, FLINT_ABS(xv), fmpr_expref(x),
            ytmp, &yexp, (xv ^ yv) < 0, prec, rnd);
    }
Пример #12
0
void
arf_get_fmpr(fmpr_t y, const arf_t x)
{
    if (arf_is_special(x))
    {
        if (arf_is_zero(x))
            fmpr_zero(y);
        else if (arf_is_pos_inf(x))
            fmpr_pos_inf(y);
        else if (arf_is_neg_inf(x))
            fmpr_neg_inf(y);
        else
            fmpr_nan(y);
    }
    else
    {
        arf_get_fmpz_2exp(fmpr_manref(y), fmpr_expref(y), x);
    }
}
Пример #13
0
void
arf_set_fmpr(arf_t y, const fmpr_t x)
{
    if (fmpr_is_special(x))
    {
        if (fmpr_is_zero(x))
            arf_zero(y);
        else if (fmpr_is_pos_inf(x))
            arf_pos_inf(y);
        else if (fmpr_is_neg_inf(x))
            arf_neg_inf(y);
        else
            arf_nan(y);
    }
    else
    {
        arf_set_fmpz(y, fmpr_manref(x));
        fmpz_add_inline(ARF_EXPREF(y), ARF_EXPREF(y), fmpr_expref(x));
    }
}
Пример #14
0
slong
fmpr_div(fmpr_t z, const fmpr_t x, const fmpr_t y, slong prec, fmpr_rnd_t rnd)
{
    if (fmpr_is_special(x) || fmpr_is_special(y))
    {
        _fmpr_div_special(z, x, y);
        return FMPR_RESULT_EXACT;
    }

    /* division by power of two <=> shift exponents */
    if (fmpz_is_pm1(fmpr_manref(y)))
    {
        if (fmpz_is_one(fmpr_manref(y)))
            fmpz_set(fmpr_manref(z), fmpr_manref(x));
        else
            fmpz_neg(fmpr_manref(z), fmpr_manref(x));
        fmpz_sub(fmpr_expref(z), fmpr_expref(x), fmpr_expref(y));
        return _fmpr_normalise(fmpr_manref(z), fmpr_expref(z), prec, rnd);
    }
    else
    {
        slong xbits, ybits, extra, extra_pad, extra_control;
        int negative;
        fmpz_t t, u;

        /* todo: work out exact needed shift */
        xbits = fmpz_bits(fmpr_manref(x));
        ybits = fmpz_bits(fmpr_manref(y));

        extra = prec - xbits + ybits;
        extra = FLINT_MAX(extra, 0);

        extra_pad = 32;
        extra_control = 24;
        extra += extra_pad;

        fmpz_init(t);
        fmpz_init(u);

        fmpz_mul_2exp(t, fmpr_manref(x), extra);
        fmpz_tdiv_q(u, t, fmpr_manref(y));

        if (low_bits_are_zero(u, extra_control))
        {
            fmpz_t v;
            fmpz_init(v);
            fmpz_mul(v, u, fmpr_manref(y));

            negative = fmpz_sgn(fmpr_manref(x)) != fmpz_sgn(fmpr_manref(y));

            if (!fmpz_equal(t, v))
            {
                if (negative)
                    fmpz_sub_ui(u, u, 1);
                else
                    fmpz_add_ui(u, u, 1);
            }

            fmpz_clear(v);
        }

        fmpz_swap(fmpr_manref(z), u);

        fmpz_clear(t);
        fmpz_clear(u);

        fmpz_sub(fmpr_expref(z), fmpr_expref(x), fmpr_expref(y));
        fmpz_sub_ui(fmpr_expref(z), fmpr_expref(z), extra);

        return _fmpr_normalise(fmpr_manref(z), fmpr_expref(z), prec, rnd);
    }
}
Пример #15
0
slong
fmpr_add_naive(fmpr_t z, const fmpr_t x, const fmpr_t y, slong prec, fmpr_rnd_t rnd)
{
    slong shift, xsize, ysize;

    if (fmpr_is_special(x) || fmpr_is_special(y))
    {
        return _fmpr_add_special(z, x, y, prec, rnd);
    }

    shift = _fmpz_sub_small(fmpr_expref(x), fmpr_expref(y));

    if (shift == 0)
    {
        fmpz_add(fmpr_manref(z), fmpr_manref(x), fmpr_manref(y));
        fmpz_set(fmpr_expref(z), fmpr_expref(x));
    }
    else if (shift > 0)
    {
        ysize = _fmpz_size(fmpr_manref(y)) * FLINT_BITS;

        /* x and y do not overlap */
        if (shift > ysize && prec != FMPR_PREC_EXACT)
        {
            /* y does not overlap with result */
            if (ysize + prec - (slong) fmpz_bits(fmpr_manref(x)) < shift)
            {
                return _fmpr_add_eps(z, x, fmpz_sgn(fmpr_manref(y)), prec, rnd);
            }
        }

        fmpz_add_mul2exp(fmpr_manref(z), fmpr_manref(y), fmpr_manref(x), shift);
        fmpz_set(fmpr_expref(z), fmpr_expref(y));
    }
    else
    {
        shift = -shift;

        xsize = _fmpz_size(fmpr_manref(x)) * FLINT_BITS;

        /* x and y do not overlap */
        if (shift > xsize && prec != FMPR_PREC_EXACT)
        {
            /* y does not overlap with result */
            if (xsize + prec - (slong) fmpz_bits(fmpr_manref(y)) < shift)
            {
                return _fmpr_add_eps(z, y, fmpz_sgn(fmpr_manref(x)), prec, rnd);
            }
        }

        fmpz_add_mul2exp(fmpr_manref(z), fmpr_manref(x), fmpr_manref(y), shift);
        fmpz_set(fmpr_expref(z), fmpr_expref(x));
    }

    return _fmpr_normalise(fmpr_manref(z), fmpr_expref(z), prec, rnd);
}
Пример #16
0
static void
_fmprb_gamma(fmprb_t y, const fmprb_t x, long prec, int inverse)
{
    int reflect;
    long r, n, wp;
    fmprb_t t, u, v;

    if (fmprb_is_exact(x))
    {
        const fmpr_struct * mid = fmprb_midref(x);

        if (fmpr_is_special(mid))
        {
            if (!inverse && fmpr_is_pos_inf(mid))
            {
                fmprb_set(y, x);
            }
            else if (fmpr_is_nan(mid) || fmpr_is_neg_inf(mid) || !inverse)
            {
                fmpr_nan(fmprb_midref(y));
                fmpr_pos_inf(fmprb_radref(y));
            }
            else
            {
                fmprb_zero(y);
            }
            return;
        }
        else
        {
            const fmpz exp = *fmpr_expref(mid);
            const fmpz man = *fmpr_manref(mid);

            /* fast gamma(n), gamma(n/2) or gamma(n/4) */
            if (!COEFF_IS_MPZ(exp) && (exp >= -2) &&
                ((double) fmpz_bits(&man) + exp < prec))
            {
                fmpq_t a;
                fmpq_init(a);
                fmpr_get_fmpq(a, mid);
                fmprb_gamma_fmpq(y, a, prec + 2 * inverse);
                if (inverse)
                    fmprb_ui_div(y, 1, y, prec);
                fmpq_clear(a);
                return;
            }
        }
    }

    wp = prec + FLINT_BIT_COUNT(prec);

    gamma_stirling_choose_param_fmprb(&reflect, &r, &n, x, 1, 0, wp);

    fmprb_init(t);
    fmprb_init(u);
    fmprb_init(v);

    if (reflect)
    {
        /* gamma(x) = (rf(1-x, r) * pi) / (gamma(1-x+r) sin(pi x)) */
        fmprb_sub_ui(t, x, 1, wp);
        fmprb_neg(t, t);
        gamma_rising_fmprb_ui_bsplit(u, t, r, wp);
        fmprb_const_pi(v, wp);
        fmprb_mul(u, u, v, wp);
        fmprb_add_ui(t, t, r, wp);
        gamma_stirling_eval_fmprb(v, t, n, 0, wp);
        fmprb_exp(v, v, wp);
        fmprb_sin_pi(t, x, wp);
        fmprb_mul(v, v, t, wp);
    }
    else
    {
        /* gamma(x) = gamma(x+r) / rf(x,r) */
        fmprb_add_ui(t, x, r, wp);
        gamma_stirling_eval_fmprb(u, t, n, 0, wp);
        fmprb_exp(u, u, prec);
        gamma_rising_fmprb_ui_bsplit(v, x, r, wp);
    }

    if (inverse)
        fmprb_div(y, v, u, prec);
    else
        fmprb_div(y, u, v, prec);

    fmprb_clear(t);
    fmprb_clear(u);
    fmprb_clear(v);
}
Пример #17
0
/* convert to an fmpz poly with a common exponent and coefficients
   at most prec bits, also bounding input error plus rounding error */
void _fmprb_poly_get_fmpz_poly_2exp(fmpr_t error, fmpz_t exp, fmpz  * coeffs,
                            fmprb_srcptr A, long lenA, long prec)
{
    fmpz_t top_exp, bot_exp;
    long shift;
    long i;
    int rounding;

    fmpz_init(top_exp);
    fmpz_init(bot_exp);

    if (!_fmprb_poly_mid_get_hull(bot_exp, top_exp, A, lenA))
    {
        fmpz_zero(exp);
        _fmpz_vec_zero(coeffs, lenA);
        fmpr_zero(error);
        for (i = 0; i < lenA; i++)
        {
            if (fmpr_cmp(fmprb_radref(A + i), error) > 0)
                fmpr_set(error, fmprb_radref(A + i));
        }

        return;   /* no need to clear fmpzs */
    }

    /* only take as much precision as necessary */
    shift = _fmpz_sub_small(top_exp, bot_exp);
    prec = FLINT_MIN(prec, shift);

    fmpz_sub_ui(exp, top_exp, prec);

    /* extract integer polynomial */
    rounding = 0;
    for (i = 0; i < lenA; i++)
        rounding |= fmpr_get_fmpz_fixed_fmpz(coeffs + i,
                        fmprb_midref(A + i), exp);

    fmpr_zero(error);

    /* compute maximum of input errors */
    for (i = 0; i < lenA; i++)
    {
        if (fmpr_cmp(fmprb_radref(A + i), error) > 0)
            fmpr_set(error, fmprb_radref(A + i));
    }

    /* add rounding error */
    if (rounding)
    {
        fmpr_t t;
        fmpr_init(t);

        fmpz_set_ui(fmpr_manref(t), 1UL);
        fmpz_set(fmpr_expref(t), exp);

        fmpr_add(error, error, t, FMPRB_RAD_PREC, FMPR_RND_UP);

        fmpr_clear(t);
    }

    fmpz_clear(top_exp);
}
Пример #18
0
void
fmpr_divappr_abs_ubound(fmpr_t z, const fmpr_t x, const fmpr_t y, slong prec)
{
    if (fmpr_is_special(x) || fmpr_is_special(y) || fmpz_is_pm1(fmpr_manref(y)))
    {
        fmpr_div(z, x, y, prec, FMPR_RND_UP);
        fmpr_abs(z, z);
    }
    else
    {
        fmpz_t t, u;
        slong xbits, ybits, tbits, ubits, shift;

        xbits = fmpz_bits(fmpr_manref(x));
        ybits = fmpz_bits(fmpr_manref(y));

        fmpz_init(t);
        fmpz_init(u);

        ubits = FLINT_MIN(ybits, prec);
        tbits = prec + ubits + 1;

        /* upper bound for |x|, shifted */
        if (xbits <= tbits)
        {
            fmpz_mul_2exp(t, fmpr_manref(x), tbits - xbits);
            fmpz_abs(t, t);
        }
        else if (fmpz_sgn(fmpr_manref(x)) > 0)
        {
            fmpz_cdiv_q_2exp(t, fmpr_manref(x), xbits - tbits);
        }
        else
        {
            fmpz_fdiv_q_2exp(t, fmpr_manref(x), xbits - tbits);
            fmpz_neg(t, t);
        }

        /* lower bound for |y|, shifted */
        if (ybits <= ubits)
            fmpz_mul_2exp(u, fmpr_manref(y), ubits - ybits);
        else
            fmpz_tdiv_q_2exp(u, fmpr_manref(y), ybits - ubits);
        fmpz_abs(u, u);

        fmpz_cdiv_q(fmpr_manref(z), t, u);

        shift = (ubits - ybits) - (tbits - xbits);
        fmpz_sub(fmpr_expref(z), fmpr_expref(x), fmpr_expref(y));
        if (shift >= 0)
            fmpz_add_ui(fmpr_expref(z), fmpr_expref(z), shift);
        else
            fmpz_sub_ui(fmpr_expref(z), fmpr_expref(z), -shift);

        _fmpr_normalise(fmpr_manref(z), fmpr_expref(z), prec, FMPR_RND_UP);

        fmpz_clear(t);
        fmpz_clear(u);
    }
}