long _fmpr_mul_mpn(fmpr_t z, mp_srcptr xman, mp_size_t xn, const fmpz_t xexp, mp_srcptr yman, mp_size_t yn, const fmpz_t yexp, int negative, long prec, fmpr_rnd_t rnd) { long zn, alloc, ret, shift; mp_limb_t tmp_stack[MUL_STACK_ALLOC]; mp_ptr tmp; zn = xn + yn; alloc = zn; MUL_TMP_ALLOC if (yn == 1) { mp_limb_t cy = mpn_mul_1(tmp, xman, xn, yman[0]); tmp[zn - 1] = cy; zn = zn - (cy == 0); } else { mpn_mul(tmp, xman, xn, yman, yn); zn = zn - (tmp[zn - 1] == 0); } ret = _fmpr_set_round_mpn(&shift, fmpr_manref(z), tmp, zn, negative, prec, rnd); fmpz_add2_fmpz_si_inline(fmpr_expref(z), xexp, yexp, shift); MUL_TMP_FREE return ret; }
/* computes x + y * 2^shift (optionally negated) */ slong _fmpr_add_mpn(fmpr_t z, mp_srcptr xman, mp_size_t xn, int xsign, const fmpz_t xexp, mp_srcptr yman, mp_size_t yn, int ysign, const fmpz_t yexp, slong shift, slong prec, fmpr_rnd_t rnd) { slong tn, zn, alloc, ret, shift_bits, shift_limbs; int negative; mp_limb_t tmp_stack[ADD_STACK_ALLOC]; mp_limb_t cy; mp_ptr tmp, tmp2; shift_limbs = shift / FLINT_BITS; shift_bits = shift % FLINT_BITS; /* x does not overlap with y or the result -- outcome is equivalent to adding/subtracting a small number to/from y and rounding */ if (shift > xn * FLINT_BITS && prec != FMPR_PREC_EXACT && xn * FLINT_BITS + prec - (FLINT_BITS * (yn - 1)) < shift) { zn = (prec + FLINT_BITS - 1) / FLINT_BITS; zn = FLINT_MAX(zn, yn) + 2; shift_limbs = zn - yn; alloc = zn; ADD_TMP_ALLOC flint_mpn_zero(tmp, shift_limbs); flint_mpn_copyi(tmp + shift_limbs, yman, yn); if (xsign == ysign) { tmp[0] = 1; } else { mpn_sub_1(tmp, tmp, zn, 1); while (tmp[zn-1] == 0) zn--; } ret = _fmpr_set_round_mpn(&shift, fmpr_manref(z), tmp, zn, ysign, prec, rnd); shift -= shift_limbs * FLINT_BITS; fmpz_add_si_inline(fmpr_expref(z), yexp, shift); ADD_TMP_FREE return ret; }