void mpq_neg (mpq_ptr dst, mpq_srcptr src) { mp_size_t num_size = src->_mp_num._mp_size; if (src != dst) { mp_size_t num_abs_size = ABS(num_size); mp_size_t den_size = src->_mp_den._mp_size; MPZ_REALLOC (mpq_numref(dst), num_abs_size); MPZ_REALLOC (mpq_denref(dst), den_size); MPN_COPY (dst->_mp_num._mp_d, src->_mp_num._mp_d, num_abs_size); MPN_COPY (dst->_mp_den._mp_d, src->_mp_den._mp_d, den_size); dst->_mp_den._mp_size = den_size; } dst->_mp_num._mp_size = -num_size; }
unsigned long int mpz_fdiv_r_ui (mpz_ptr rem, mpz_srcptr dividend, unsigned long int divisor) { mp_size_t ns, nn; mp_ptr np; mp_limb_t rl; if (divisor == 0) DIVIDE_BY_ZERO; ns = SIZ(dividend); if (ns == 0) { SIZ(rem) = 0; return 0; } nn = ABS(ns); np = PTR(dividend); #if BITS_PER_ULONG > GMP_NUMB_BITS /* avoid warnings about shift amount */ if (divisor > GMP_NUMB_MAX) { mp_limb_t dp[2]; mp_ptr rp, qp; mp_size_t rn; TMP_DECL; MPZ_REALLOC (rem, 2); rp = PTR(rem); if (nn == 1) /* tdiv_qr requirements; tested above for 0 */ { rl = np[0]; rp[0] = rl; } else { TMP_MARK; dp[0] = divisor & GMP_NUMB_MASK; dp[1] = divisor >> GMP_NUMB_BITS; qp = TMP_ALLOC_LIMBS (nn - 2 + 1); mpn_tdiv_qr (qp, rp, (mp_size_t) 0, np, nn, dp, (mp_size_t) 2); TMP_FREE; rl = rp[0] + (rp[1] << GMP_NUMB_BITS); } if (rl != 0 && ns < 0) { rl = divisor - rl; rp[0] = rl & GMP_NUMB_MASK; rp[1] = rl >> GMP_NUMB_BITS; }
void mpz_urandomb (mpz_ptr rop, gmp_randstate_t rstate, unsigned long int nbits) { mp_ptr rp; mp_size_t size; size = BITS_TO_LIMBS (nbits); rp = MPZ_REALLOC (rop, size); _gmp_rand (rp, rstate, nbits); MPN_NORMALIZE (rp, size); SIZ (rop) = size; }
void mpz_rrandomb (mpz_ptr x, gmp_randstate_t rstate, mp_bitcnt_t nbits) { mp_size_t nl; nl = (nbits + GMP_NUMB_BITS - 1) / GMP_NUMB_BITS; if (nbits != 0) { MPZ_REALLOC (x, nl); gmp_rrandomb (PTR(x), rstate, nbits); } SIZ(x) = nl; }
mpir_ui mpz_fdiv_q_ui (mpz_ptr quot, mpz_srcptr dividend, mpir_ui divisor) { mp_size_t ns, nn, qn; mp_ptr np, qp; mp_limb_t rl; if (divisor == 0) DIVIDE_BY_ZERO; ns = SIZ(dividend); if (ns == 0) { SIZ(quot) = 0; return 0; } nn = ABS(ns); MPZ_REALLOC (quot, nn); qp = PTR(quot); np = PTR(dividend); #if BITS_PER_UI > GMP_NUMB_BITS /* avoid warnings about shift amount */ if (divisor > GMP_NUMB_MAX) { mp_limb_t dp[2], rp[2]; if (nn == 1) /* tdiv_qr requirements; tested above for 0 */ { qp[0] = 0; rl = np[0]; qn = 1; /* a white lie, fixed below */ } else { dp[0] = divisor & GMP_NUMB_MASK; dp[1] = divisor >> GMP_NUMB_BITS; mpn_tdiv_qr (qp, rp, (mp_size_t) 0, np, nn, dp, (mp_size_t) 2); rl = rp[0] + (rp[1] << GMP_NUMB_BITS); qn = nn - 2 + 1; } if (rl != 0 && ns < 0) { mpn_incr_u (qp, (mp_limb_t) 1); rl = divisor - rl; } qn -= qp[qn - 1] == 0; qn -= qn != 0 && qp[qn - 1] == 0; }
void mpz_set_ui (mpz_ptr dest, unsigned long int val) { mp_size_t size; dest->_mp_d[0] = val & GMP_NUMB_MASK; size = val != 0; #if BITS_PER_ULONG > GMP_NUMB_BITS /* avoid warnings about shift amount */ if (val > GMP_NUMB_MAX) { MPZ_REALLOC (dest, 2); dest->_mp_d[1] = val >> GMP_NUMB_BITS; size = 2; }
void mpz_set (mpz_ptr w, mpz_srcptr u) { mp_ptr wp, up; mp_size_t usize, size; usize = SIZ(u); size = ABS (usize); wp = MPZ_REALLOC (w, size); up = PTR(u); MPN_COPY (wp, up, size); SIZ(w) = usize; }
void mpz_set_sx (mpz_ptr z, intmax_t v) { uintmax_t i, uv = (v < 0 ? -v : v); #if NLIMBS == 1 z->_mp_d[0] = (mp_limb_t)uv; z->_mp_size = v < 0 ? -NLIMBS : v ? NLIMBS : 0; #else if(NLIMBS > z->_mp_alloc) MPZ_REALLOC(z, NLIMBS); for( i = 0 ; i < NLIMBS && uv ; ++i ) { z->_mp_d[i] = uv & GMP_NUMB_MASK; uv >>= GMP_NUMB_BITS; } z->_mp_size = v < 0 ? -i : v ? i : 0; #endif }
void mpz_set_si (mpz_ptr dest, signed long int val) { mp_size_t size; mp_limb_t vl; vl = (mp_limb_t) (unsigned long int) (val >= 0 ? val : -val); dest->_mp_d[0] = vl & GMP_NUMB_MASK; size = vl != 0; #if GMP_NAIL_BITS != 0 if (vl > GMP_NUMB_MAX) { MPZ_REALLOC (dest, 2); dest->_mp_d[1] = vl >> GMP_NUMB_BITS; size = 2; }
static void refmpz_mul (mpz_t w, const mpz_t u, const mpz_t v) { mp_size_t usize = u->_mp_size; mp_size_t vsize = v->_mp_size; mp_size_t wsize; mp_size_t sign_product; mp_ptr up, vp; mp_ptr wp; mp_size_t talloc; sign_product = usize ^ vsize; usize = ABS (usize); vsize = ABS (vsize); if (usize == 0 || vsize == 0) { SIZ (w) = 0; return; } talloc = usize + vsize; up = u->_mp_d; vp = v->_mp_d; wp = __GMP_ALLOCATE_FUNC_LIMBS (talloc); if (usize > vsize) refmpn_mul (wp, up, usize, vp, vsize); else refmpn_mul (wp, vp, vsize, up, usize); wsize = usize + vsize; wsize -= wp[wsize - 1] == 0; MPZ_REALLOC (w, wsize); MPN_COPY (PTR(w), wp, wsize); SIZ(w) = sign_product < 0 ? -wsize : wsize; __GMP_FREE_FUNC_LIMBS (wp, talloc); }
unsigned long int mpz_tdiv_q_ui (mpz_ptr quot, mpz_srcptr dividend, unsigned long int divisor) { mp_size_t ns, nn, qn; mp_ptr np, qp; mp_limb_t rl; if (divisor == 0) DIVIDE_BY_ZERO; ns = SIZ(dividend); if (ns == 0) { SIZ(quot) = 0; return 0; } nn = ABS(ns); MPZ_REALLOC (quot, nn); qp = PTR(quot); np = PTR(dividend); #if BITS_PER_ULONG > GMP_NUMB_BITS /* avoid warnings about shift amount */ if (divisor > GMP_NUMB_MAX) { mp_limb_t dp[2], rp[2]; if (nn == 1) /* tdiv_qr requirements; tested above for 0 */ { SIZ(quot) = 0; rl = np[0]; return rl; } dp[0] = divisor & GMP_NUMB_MASK; dp[1] = divisor >> GMP_NUMB_BITS; mpn_tdiv_qr (qp, rp, (mp_size_t) 0, np, nn, dp, (mp_size_t) 2); rl = rp[0] + (rp[1] << GMP_NUMB_BITS); qn = nn - 2 + 1; qn -= qp[qn - 1] == 0; qn -= qn != 0 && qp[qn - 1] == 0; }
void mpz_set_f (mpz_ptr w, mpf_srcptr u) { mp_ptr wp, up; mp_size_t size; mp_exp_t exp; /* abs(u)<1 truncates to zero */ exp = EXP (u); if (exp <= 0) { SIZ(w) = 0; return; } MPZ_REALLOC (w, exp); wp = PTR(w); up = PTR(u); size = SIZ (u); SIZ(w) = (size >= 0 ? exp : -exp); size = ABS (size); if (exp > size) { /* pad with low zeros to get a total "exp" many limbs */ mp_size_t zeros = exp - size; MPN_ZERO (wp, zeros); wp += zeros; } else { /* exp<=size, trucate to the high "exp" many limbs */ up += (size - exp); size = exp; } MPN_COPY (wp, up, size); }
mp_exp_t mpfr_get_z_exp (mpz_ptr z, mpfr_srcptr f) { mp_size_t fn; int sh; MPFR_ASSERTD (MPFR_IS_FP (f)); if (MPFR_UNLIKELY (MPFR_IS_ZERO (f))) { mpz_set_ui (z, 0); return __gmpfr_emin; } fn = MPFR_LIMB_SIZE(f); /* check whether allocated space for z is enough */ if (MPFR_UNLIKELY (ALLOC (z) < fn)) MPZ_REALLOC (z, fn); MPFR_UNSIGNED_MINUS_MODULO (sh, MPFR_PREC (f)); if (MPFR_LIKELY (sh)) mpn_rshift (PTR (z), MPFR_MANT (f), fn, sh); else MPN_COPY (PTR (z), MPFR_MANT (f), fn); SIZ(z) = MPFR_IS_NEG (f) ? -fn : fn; /* Test if the result is representable. Later, we could choose to return MPFR_EXP_MIN if it isn't, or perhaps MPFR_EXP_MAX to signal an error. The mantissa would still be meaningful. */ MPFR_ASSERTD ((mp_exp_unsigned_t) MPFR_GET_EXP (f) - MPFR_EXP_MIN >= (mp_exp_unsigned_t) MPFR_PREC(f)); return MPFR_GET_EXP (f) - MPFR_PREC (f); }
void mpz_powm_ui (mpz_ptr r, mpz_srcptr b, unsigned long int el, mpz_srcptr m) { mp_ptr xp, tp, qp, mp, bp; mp_size_t xn, tn, mn, bn; int m_zero_cnt; int c; mp_limb_t e; TMP_DECL; mp = PTR(m); mn = ABSIZ(m); if (mn == 0) DIVIDE_BY_ZERO; if (el == 0) { /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0 depending on if MOD equals 1. */ SIZ(r) = (mn == 1 && mp[0] == 1) ? 0 : 1; PTR(r)[0] = 1; return; } TMP_MARK; /* Normalize m (i.e. make its most significant bit set) as required by division functions below. */ count_leading_zeros (m_zero_cnt, mp[mn - 1]); m_zero_cnt -= GMP_NAIL_BITS; if (m_zero_cnt != 0) { mp_ptr new_mp = TMP_ALLOC_LIMBS (mn); mpn_lshift (new_mp, mp, mn, m_zero_cnt); mp = new_mp; } bn = ABSIZ(b); bp = PTR(b); if (bn > mn) { /* Reduce possibly huge base. Use a function call to reduce, since we don't want the quotient allocation to live until function return. */ mp_ptr new_bp = TMP_ALLOC_LIMBS (mn); reduce (new_bp, bp, bn, mp, mn); bp = new_bp; bn = mn; /* Canonicalize the base, since we are potentially going to multiply with it quite a few times. */ MPN_NORMALIZE (bp, bn); } if (bn == 0) { SIZ(r) = 0; TMP_FREE; return; } tp = TMP_ALLOC_LIMBS (2 * mn + 1); xp = TMP_ALLOC_LIMBS (mn); qp = TMP_ALLOC_LIMBS (mn + 1); MPN_COPY (xp, bp, bn); xn = bn; e = el; count_leading_zeros (c, e); e = (e << c) << 1; /* shift the exp bits to the left, lose msb */ c = BITS_PER_MP_LIMB - 1 - c; /* Main loop. */ /* If m is already normalized (high bit of high limb set), and b is the same size, but a bigger value, and e==1, then there's no modular reductions done and we can end up with a result out of range at the end. */ if (c == 0) { if (xn == mn && mpn_cmp (xp, mp, mn) >= 0) mpn_sub_n (xp, xp, mp, mn); goto finishup; } while (c != 0) { mpn_sqr_n (tp, xp, xn); tn = 2 * xn; tn -= tp[tn - 1] == 0; if (tn < mn) { MPN_COPY (xp, tp, tn); xn = tn; } else { mpn_tdiv_qr (qp, xp, 0L, tp, tn, mp, mn); xn = mn; } if ((mp_limb_signed_t) e < 0) { mpn_mul (tp, xp, xn, bp, bn); tn = xn + bn; tn -= tp[tn - 1] == 0; if (tn < mn) { MPN_COPY (xp, tp, tn); xn = tn; } else { mpn_tdiv_qr (qp, xp, 0L, tp, tn, mp, mn); xn = mn; } } e <<= 1; c--; } finishup: /* We shifted m left m_zero_cnt steps. Adjust the result by reducing it with the original MOD. */ if (m_zero_cnt != 0) { mp_limb_t cy; cy = mpn_lshift (tp, xp, xn, m_zero_cnt); tp[xn] = cy; xn += cy != 0; if (xn < mn) { MPN_COPY (xp, tp, xn); } else { mpn_tdiv_qr (qp, xp, 0L, tp, xn, mp, mn); xn = mn; } mpn_rshift (xp, xp, xn, m_zero_cnt); } MPN_NORMALIZE (xp, xn); if ((el & 1) != 0 && SIZ(b) < 0 && xn != 0) { mp = PTR(m); /* want original, unnormalized m */ mpn_sub (xp, mp, mn, xp, xn); xn = mn; MPN_NORMALIZE (xp, xn); } MPZ_REALLOC (r, xn); SIZ (r) = xn; MPN_COPY (PTR(r), xp, xn); TMP_FREE; }
void mpz_tdiv_r (mpz_ptr rem, mpz_srcptr num, mpz_srcptr den) { mp_size_t ql; mp_size_t ns, ds, nl, dl; mp_ptr np, dp, qp, rp; TMP_DECL; ns = SIZ (num); ds = SIZ (den); nl = ABS (ns); dl = ABS (ds); ql = nl - dl + 1; if (dl == 0) DIVIDE_BY_ZERO; MPZ_REALLOC (rem, dl); if (ql <= 0) { if (num != rem) { mp_ptr np, rp; np = PTR (num); rp = PTR (rem); MPN_COPY (rp, np, nl); SIZ (rem) = SIZ (num); } return; } TMP_MARK; qp = (mp_ptr) TMP_ALLOC (ql * BYTES_PER_MP_LIMB); rp = PTR (rem); np = PTR (num); dp = PTR (den); /* FIXME: We should think about how to handle the temporary allocation. Perhaps mpn_tdiv_qr should handle it, since it anyway often needs to allocate temp space. */ /* Copy denominator to temporary space if it overlaps with the remainder. */ if (dp == rp) { mp_ptr tp; tp = (mp_ptr) TMP_ALLOC (dl * BYTES_PER_MP_LIMB); MPN_COPY (tp, dp, dl); dp = tp; } /* Copy numerator to temporary space if it overlaps with the remainder. */ if (np == rp) { mp_ptr tp; tp = (mp_ptr) TMP_ALLOC (nl * BYTES_PER_MP_LIMB); MPN_COPY (tp, np, nl); np = tp; } mpn_tdiv_qr (qp, rp, 0L, np, nl, dp, dl); MPN_NORMALIZE (rp, dl); SIZ (rem) = ns >= 0 ? dl : -dl; TMP_FREE; }
void check_z (void) { static const struct { const char *fmt; const char *z; const char *want; } data[] = { { "%Zd", "0", "0" }, { "%Zd", "1", "1" }, { "%Zd", "123", "123" }, { "%Zd", "-1", "-1" }, { "%Zd", "-123", "-123" }, { "%+Zd", "0", "+0" }, { "%+Zd", "123", "+123" }, { "%+Zd", "-123", "-123" }, { "%Zx", "123", "7b" }, { "%ZX", "123", "7B" }, { "%Zx", "-123", "-7b" }, { "%ZX", "-123", "-7B" }, { "%Zo", "123", "173" }, { "%Zo", "-123", "-173" }, { "%#Zx", "0", "0" }, { "%#ZX", "0", "0" }, { "%#Zx", "123", "0x7b" }, { "%#ZX", "123", "0X7B" }, { "%#Zx", "-123", "-0x7b" }, { "%#ZX", "-123", "-0X7B" }, { "%#Zo", "0", "0" }, { "%#Zo", "123", "0173" }, { "%#Zo", "-123", "-0173" }, { "%10Zd", "0", " 0" }, { "%10Zd", "123", " 123" }, { "%10Zd", "-123", " -123" }, { "%-10Zd", "0", "0 " }, { "%-10Zd", "123", "123 " }, { "%-10Zd", "-123", "-123 " }, { "%+10Zd", "123", " +123" }, { "%+-10Zd", "123", "+123 " }, { "%+10Zd", "-123", " -123" }, { "%+-10Zd", "-123", "-123 " }, { "%08Zd", "0", "00000000" }, { "%08Zd", "123", "00000123" }, { "%08Zd", "-123", "-0000123" }, { "%+08Zd", "0", "+0000000" }, { "%+08Zd", "123", "+0000123" }, { "%+08Zd", "-123", "-0000123" }, { "%#08Zx", "0", "00000000" }, { "%#08Zx", "123", "0x00007b" }, { "%#08Zx", "-123", "-0x0007b" }, { "%+#08Zx", "0", "+0000000" }, { "%+#08Zx", "123", "+0x0007b" }, { "%+#08Zx", "-123", "-0x0007b" }, { "%.0Zd", "0", "" }, { "%.1Zd", "0", "0" }, { "%.2Zd", "0", "00" }, { "%.3Zd", "0", "000" }, }; int i, j; mpz_t z; char *nfmt; mp_size_t nsize, zeros; mpz_init (z); for (i = 0; i < numberof (data); i++) { mpz_set_str_or_abort (z, data[i].z, 0); /* don't try negatives or forced sign in hex or octal */ if (mpz_fits_slong_p (z) && ! (hex_or_octal_p (data[i].fmt) && (strchr (data[i].fmt, '+') != NULL || mpz_sgn(z) < 0))) { check_plain (data[i].want, data[i].fmt, mpz_get_si (z)); } check_one (data[i].want, data[i].fmt, z); /* Same again, with %N and possibly some high zero limbs */ nfmt = __gmp_allocate_strdup (data[i].fmt); for (j = 0; nfmt[j] != '\0'; j++) if (nfmt[j] == 'Z') nfmt[j] = 'N'; for (zeros = 0; zeros <= 3; zeros++) { nsize = ABSIZ(z)+zeros; MPZ_REALLOC (z, nsize); nsize = (SIZ(z) >= 0 ? nsize : -nsize); refmpn_zero (PTR(z)+ABSIZ(z), zeros); check_one (data[i].want, nfmt, PTR(z), nsize); } __gmp_free_func (nfmt, strlen(nfmt)+1); } mpz_clear (z); }
void mpz_mul (mpz_ptr w, mpz_srcptr u, mpz_srcptr v) { mp_size_t usize; mp_size_t vsize; mp_size_t wsize; mp_size_t sign_product; mp_ptr up, vp; mp_ptr wp; mp_ptr free_me; size_t free_me_size; mp_limb_t cy_limb; TMP_DECL; usize = SIZ (u); vsize = SIZ (v); sign_product = usize ^ vsize; usize = ABS (usize); vsize = ABS (vsize); if (usize < vsize) { MPZ_SRCPTR_SWAP (u, v); MP_SIZE_T_SWAP (usize, vsize); } if (vsize == 0) { SIZ (w) = 0; return; } #if HAVE_NATIVE_mpn_mul_2 if (vsize <= 2) { wp = MPZ_REALLOC (w, usize+vsize); if (vsize == 1) cy_limb = mpn_mul_1 (wp, PTR (u), usize, PTR (v)[0]); else { cy_limb = mpn_mul_2 (wp, PTR (u), usize, PTR (v)); usize++; } wp[usize] = cy_limb; usize += (cy_limb != 0); SIZ (w) = (sign_product >= 0 ? usize : -usize); return; } #else if (vsize == 1) { wp = MPZ_REALLOC (w, usize+1); cy_limb = mpn_mul_1 (wp, PTR (u), usize, PTR (v)[0]); wp[usize] = cy_limb; usize += (cy_limb != 0); SIZ (w) = (sign_product >= 0 ? usize : -usize); return; } #endif TMP_MARK; free_me = NULL; up = PTR (u); vp = PTR (v); wp = PTR (w); /* Ensure W has space enough to store the result. */ wsize = usize + vsize; if (ALLOC (w) < wsize) { if (wp == up || wp == vp) { free_me = wp; free_me_size = ALLOC (w); } else (*__gmp_free_func) (wp, (size_t) ALLOC (w) * GMP_LIMB_BYTES); ALLOC (w) = wsize; wp = __GMP_ALLOCATE_FUNC_LIMBS (wsize); PTR (w) = wp; } else { /* Make U and V not overlap with W. */ if (wp == up) { /* W and U are identical. Allocate temporary space for U. */ up = TMP_ALLOC_LIMBS (usize); /* Is V identical too? Keep it identical with U. */ if (wp == vp) vp = up; /* Copy to the temporary space. */ MPN_COPY (up, wp, usize); } else if (wp == vp) { /* W and V are identical. Allocate temporary space for V. */ vp = TMP_ALLOC_LIMBS (vsize); /* Copy to the temporary space. */ MPN_COPY (vp, wp, vsize); } } if (up == vp) { mpn_sqr (wp, up, usize); cy_limb = wp[wsize - 1]; } else { cy_limb = mpn_mul (wp, up, usize, vp, vsize); } wsize -= cy_limb == 0; SIZ (w) = sign_product < 0 ? -wsize : wsize; if (free_me != NULL) (*__gmp_free_func) (free_me, free_me_size * GMP_LIMB_BYTES); TMP_FREE; }
/* Get a limb pointer for writing, previous contents is intact. */ mp_limb_t * mpz_limbs_modify (mpz_ptr x, mp_size_t n) { assert (n > 0); return MPZ_REALLOC (x, n); }
/* mpz_tdiv_qr(quot,rem,dividend,divisor) -- Set QUOT to DIVIDEND/DIVISOR, and REM to DIVIDEND mod DIVISOR. Copyright 1991, 1993, 1994, 2000, 2001, 2005 Free Software Foundation, Inc. This file is part of the GNU MP Library. The GNU MP Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU MP Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU MP Library; see the file COPYING.LIB. If not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "gmp.h" #include "gmp-impl.h" #include "longlong.h" #ifdef BERKELEY_MP #include "mp.h" #endif void #ifndef BERKELEY_MP mpz_tdiv_qr (mpz_ptr quot, mpz_ptr rem, mpz_srcptr num, mpz_srcptr den) #else /* BERKELEY_MP */ mdiv (mpz_srcptr num, mpz_srcptr den, mpz_ptr quot, mpz_ptr rem) #endif /* BERKELEY_MP */ { mp_size_t ql; mp_size_t ns, ds, nl, dl; mp_ptr np, dp, qp, rp; TMP_DECL; ns = SIZ (num); ds = SIZ (den); nl = ABS (ns); dl = ABS (ds); ql = nl - dl + 1; if (dl == 0) DIVIDE_BY_ZERO; MPZ_REALLOC (rem, dl); if (ql <= 0) { if (num != rem) { mp_ptr np, rp; np = PTR (num); rp = PTR (rem); MPN_COPY (rp, np, nl); SIZ (rem) = SIZ (num); } /* This needs to follow the assignment to rem, in case the numerator and quotient are the same. */ SIZ (quot) = 0; return; } MPZ_REALLOC (quot, ql); TMP_MARK; qp = PTR (quot); rp = PTR (rem); np = PTR (num); dp = PTR (den); /* FIXME: We should think about how to handle the temporary allocation. Perhaps mpn_tdiv_qr should handle it, since it anyway often needs to allocate temp space. */ /* Copy denominator to temporary space if it overlaps with the quotient or remainder. */ if (dp == rp || dp == qp) { mp_ptr tp; tp = (mp_ptr) TMP_ALLOC (dl * BYTES_PER_MP_LIMB); MPN_COPY (tp, dp, dl); dp = tp; } /* Copy numerator to temporary space if it overlaps with the quotient or remainder. */ if (np == rp || np == qp) { mp_ptr tp; tp = (mp_ptr) TMP_ALLOC (nl * BYTES_PER_MP_LIMB); MPN_COPY (tp, np, nl); np = tp; } mpn_tdiv_qr (qp, rp, 0L, np, nl, dp, dl); ql -= qp[ql - 1] == 0; MPN_NORMALIZE (rp, dl); SIZ (quot) = (ns ^ ds) >= 0 ? ql : -ql; SIZ (rem) = ns >= 0 ? dl : -dl; TMP_FREE; }
void mpz_setbit (mpz_ptr d, mp_bitcnt_t bit_idx) { mp_size_t dsize = SIZ (d); mp_ptr dp = PTR (d); mp_size_t limb_idx; mp_limb_t mask; limb_idx = bit_idx / GMP_NUMB_BITS; mask = CNST_LIMB(1) << (bit_idx % GMP_NUMB_BITS); if (dsize >= 0) { if (limb_idx < dsize) { dp[limb_idx] |= mask; } else { /* Ugh. The bit should be set outside of the end of the number. We have to increase the size of the number. */ dp = MPZ_REALLOC (d, limb_idx + 1); SIZ (d) = limb_idx + 1; MPN_ZERO (dp + dsize, limb_idx - dsize); dp[limb_idx] = mask; } } else { /* Simulate two's complement arithmetic, i.e. simulate 1. Set OP = ~(OP - 1) [with infinitely many leading ones]. 2. Set the bit. 3. Set OP = ~OP + 1. */ dsize = -dsize; if (limb_idx < dsize) { mp_size_t zero_bound; /* No index upper bound on this loop, we're sure there's a non-zero limb sooner or later. */ zero_bound = 0; while (dp[zero_bound] == 0) zero_bound++; if (limb_idx > zero_bound) { mp_limb_t dlimb; dlimb = dp[limb_idx] & ~mask; dp[limb_idx] = dlimb; if (UNLIKELY ((dlimb == 0) + limb_idx == dsize)) /* dsize == limb_idx + 1 */ { /* high limb became zero, must normalize */ MPN_NORMALIZE (dp, limb_idx); SIZ (d) = -limb_idx; } } else if (limb_idx == zero_bound) { dp[limb_idx] = ((dp[limb_idx] - 1) & ~mask) + 1; ASSERT (dp[limb_idx] != 0); } else { MPN_DECR_U (dp + limb_idx, dsize - limb_idx, mask); dsize -= dp[dsize - 1] == 0; SIZ (d) = -dsize; } } } }
REGPARM_ATTR (1) static void mpz_aorsmul (mpz_ptr w, mpz_srcptr x, mpz_srcptr y, mp_size_t sub) { mp_size_t xsize, ysize, tsize, wsize, wsize_signed; mp_ptr wp, tp; mp_limb_t c, high; TMP_DECL; /* w unaffected if x==0 or y==0 */ xsize = SIZ(x); ysize = SIZ(y); if (xsize == 0 || ysize == 0) return; /* make x the bigger of the two */ if (ABS(ysize) > ABS(xsize)) { MPZ_SRCPTR_SWAP (x, y); MP_SIZE_T_SWAP (xsize, ysize); } sub ^= ysize; ysize = ABS(ysize); /* use mpn_addmul_1/mpn_submul_1 if possible */ if (ysize == 1) { mpz_aorsmul_1 (w, x, PTR(y)[0], sub); return; } sub ^= xsize; xsize = ABS(xsize); wsize_signed = SIZ(w); sub ^= wsize_signed; wsize = ABS(wsize_signed); tsize = xsize + ysize; wp = MPZ_REALLOC (w, MAX (wsize, tsize) + 1); if (wsize_signed == 0) { /* Nothing to add to, just set w=x*y. No w==x or w==y overlap here, since we know x,y!=0 but w==0. */ high = mpn_mul (wp, PTR(x),xsize, PTR(y),ysize); tsize -= (high == 0); SIZ(w) = (sub >= 0 ? tsize : -tsize); return; } TMP_MARK; tp = TMP_ALLOC_LIMBS (tsize); high = mpn_mul (tp, PTR(x),xsize, PTR(y),ysize); tsize -= (high == 0); ASSERT (tp[tsize-1] != 0); if (sub >= 0) { mp_srcptr up = wp; mp_size_t usize = wsize; if (usize < tsize) { up = tp; usize = tsize; tp = wp; tsize = wsize; wsize = usize; } c = mpn_add (wp, up,usize, tp,tsize); wp[wsize] = c; wsize += (c != 0); } else { mp_srcptr up = wp; mp_size_t usize = wsize; if (mpn_cmp_twosizes_lt (up,usize, tp,tsize)) { up = tp; usize = tsize; tp = wp; tsize = wsize; wsize = usize; wsize_signed = -wsize_signed; } ASSERT_NOCARRY (mpn_sub (wp, up,usize, tp,tsize)); wsize = usize; MPN_NORMALIZE (wp, wsize); } SIZ(w) = (wsize_signed >= 0 ? wsize : -wsize); TMP_FREE; }
/* shared by mpq_inp_str */ size_t mpz_inp_str_nowhite (mpz_ptr x, FILE *stream, int base, int c, size_t nread) { char *str; size_t alloc_size, str_size; int negative; mp_size_t xsize; const unsigned char *digit_value; ASSERT_ALWAYS (EOF == -1); /* FIXME: handle this by adding explicit */ /* comparisons of c and EOF before each */ /* read of digit_value[]. */ digit_value = digit_value_tab; if (base > 36) { /* For bases > 36, use the collating sequence 0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz. */ digit_value += 224; if (base > 62) return 0; /* too large base */ } negative = 0; if (c == '-') { negative = 1; c = getc (stream); nread++; } if (c == EOF || digit_value[c] >= (base == 0 ? 10 : base)) return 0; /* error if no digits */ /* If BASE is 0, try to find out the base by looking at the initial characters. */ if (base == 0) { base = 10; if (c == '0') { base = 8; c = getc (stream); nread++; if (c == 'x' || c == 'X') { base = 16; c = getc (stream); nread++; } else if (c == 'b' || c == 'B') { base = 2; c = getc (stream); nread++; } } } /* Skip leading zeros. */ while (c == '0') { c = getc (stream); nread++; } alloc_size = 100; str = (char *) (*__gmp_allocate_func) (alloc_size); str_size = 0; while (c != EOF) { int dig; dig = digit_value[c]; if (dig >= base) break; if (str_size >= alloc_size) { size_t old_alloc_size = alloc_size; alloc_size = alloc_size * 3 / 2; str = (char *) (*__gmp_reallocate_func) (str, old_alloc_size, alloc_size); } str[str_size++] = dig; c = getc (stream); } nread += str_size; ungetc (c, stream); nread--; /* Make sure the string is not empty, mpn_set_str would fail. */ if (str_size == 0) { x->_mp_size = 0; } else { xsize = (((mp_size_t) (str_size / __mp_bases[base].chars_per_bit_exactly)) / GMP_NUMB_BITS + 2); MPZ_REALLOC (x, xsize); /* Convert the byte array in base BASE to our bignum format. */ xsize = mpn_set_str (x->_mp_d, (unsigned char *) str, str_size, base); x->_mp_size = negative ? -xsize : xsize; } (*__gmp_free_func) (str, alloc_size); return nread; }
REGPARM_ATTR (1) static void cfdiv_r_2exp (mpz_ptr w, mpz_srcptr u, unsigned long cnt, int dir) { mp_size_t usize, abs_usize, limb_cnt, i; mp_srcptr up; mp_ptr wp; mp_limb_t high; usize = SIZ(u); if (usize == 0) { SIZ(w) = 0; return; } limb_cnt = cnt / GMP_NUMB_BITS; cnt %= GMP_NUMB_BITS; abs_usize = ABS (usize); /* MPZ_REALLOC(w) below is only when w!=u, so we can fetch PTR(u) here nice and early */ up = PTR(u); if ((usize ^ dir) < 0) { /* Round towards zero, means just truncate */ if (w == u) { /* if already smaller than limb_cnt then do nothing */ if (abs_usize <= limb_cnt) return; wp = PTR(w); } else { i = MIN (abs_usize, limb_cnt+1); MPZ_REALLOC (w, i); wp = PTR(w); MPN_COPY (wp, up, i); /* if smaller than limb_cnt then only the copy is needed */ if (abs_usize <= limb_cnt) { SIZ(w) = usize; return; } } } else { /* Round away from zero, means twos complement if non-zero */ /* if u!=0 and smaller than divisor, then must negate */ if (abs_usize <= limb_cnt) goto negate; /* if non-zero low limb, then must negate */ for (i = 0; i < limb_cnt; i++) if (up[i] != 0) goto negate; /* if non-zero partial limb, then must negate */ if ((up[limb_cnt] & LOW_MASK (cnt)) != 0) goto negate; /* otherwise low bits of u are zero, so that's the result */ SIZ(w) = 0; return; negate: /* twos complement negation to get 2**cnt-u */ MPZ_REALLOC (w, limb_cnt+1); up = PTR(u); wp = PTR(w); /* Ones complement */ i = MIN (abs_usize, limb_cnt+1); mpn_com_n (wp, up, i); for ( ; i <= limb_cnt; i++) wp[i] = GMP_NUMB_MAX; /* Twos complement. Since u!=0 in the relevant part, the twos complement never gives 0 and a carry, so can use MPN_INCR_U. */ MPN_INCR_U (wp, limb_cnt+1, CNST_LIMB(1)); usize = -usize; } /* Mask the high limb */ high = wp[limb_cnt]; high &= LOW_MASK (cnt); wp[limb_cnt] = high; /* Strip any consequent high zeros */ while (high == 0) { limb_cnt--; if (limb_cnt < 0) { SIZ(w) = 0; return; } high = wp[limb_cnt]; } limb_cnt++; SIZ(w) = (usize >= 0 ? limb_cnt : -limb_cnt); }
size_t mpz_inp_raw (mpz_ptr x, FILE *fp) { unsigned char csize_bytes[4]; mp_size_t csize, abs_xsize, i; size_t abs_csize; char *cp; mp_ptr xp, sp, ep; mp_limb_t slimb, elimb; if (fp == 0) fp = stdin; /* 4 bytes for size */ if (fread (csize_bytes, sizeof (csize_bytes), 1, fp) != 1) return 0; csize = ( (mp_size_t) csize_bytes[0] << 24) + ((mp_size_t) csize_bytes[1] << 16) + ((mp_size_t) csize_bytes[2] << 8) + ((mp_size_t) csize_bytes[3]); /* Sign extend if necessary. Could write "csize -= ((csize & 0x80000000L) << 1)", but that tickles a bug in gcc 3.0 for powerpc64 on AIX. */ if (sizeof (csize) > 4 && csize & 0x80000000L) csize -= 0x80000000L << 1; abs_csize = ABS (csize); /* round up to a multiple of limbs */ abs_xsize = (abs_csize*8 + GMP_NUMB_BITS-1) / GMP_NUMB_BITS; if (abs_xsize != 0) { MPZ_REALLOC (x, abs_xsize); xp = PTR(x); /* Get limb boundaries right in the read, for the benefit of the non-nails case. */ xp[0] = 0; cp = (char *) (xp + abs_xsize) - abs_csize; if (fread (cp, abs_csize, 1, fp) != 1) return 0; if (GMP_NAIL_BITS == 0) { /* Reverse limbs to least significant first, and byte swap. If abs_xsize is odd then on the last iteration elimb and slimb are the same. It doesn't seem extra code to handle that case separately, to save an NTOH. */ sp = xp; ep = xp + abs_xsize-1; for (i = 0; i < (abs_xsize+1)/2; i++) { NTOH_LIMB_FETCH (elimb, ep); NTOH_LIMB_FETCH (slimb, sp); *sp++ = elimb; *ep-- = slimb; } } else { /* It ought to be possible to do the transformation in-place, but for now it's easier to use an extra temporary area. */ mp_limb_t byte, limb; int bits; mp_size_t tpos; mp_ptr tp; TMP_DECL; TMP_MARK; tp = TMP_ALLOC_LIMBS (abs_xsize); limb = 0; bits = 0; tpos = 0; for (i = abs_csize-1; i >= 0; i--) { byte = (unsigned char) cp[i]; limb |= (byte << bits); bits += 8; if (bits >= GMP_NUMB_BITS) { ASSERT (tpos < abs_xsize); tp[tpos++] = limb & GMP_NUMB_MASK; bits -= GMP_NUMB_BITS; ASSERT (bits < 8); limb = byte >> (8 - bits); } } if (bits != 0) { ASSERT (tpos < abs_xsize); tp[tpos++] = limb; } ASSERT (tpos == abs_xsize); MPN_COPY (xp, tp, abs_xsize); TMP_FREE; }
void mpz_urandomm (mpz_ptr rop, gmp_randstate_t rstate, mpz_srcptr n) { mp_ptr rp, np, nlast; mp_size_t nbits, size; int count; int pow2; int cmp; TMP_DECL; size = ABSIZ (n); if (size == 0) DIVIDE_BY_ZERO; nlast = &PTR (n)[size - 1]; /* Detect whether n is a power of 2. */ pow2 = POW2_P (*nlast); if (pow2 != 0) for (np = PTR (n); np < nlast; np++) if (*np != 0) { pow2 = 0; /* Mark n as `not a power of two'. */ break; } count_leading_zeros (count, *nlast); nbits = size * GMP_NUMB_BITS - (count - GMP_NAIL_BITS) - pow2; if (nbits == 0) /* nbits == 0 means that n was == 1. */ { SIZ (rop) = 0; return; } TMP_MARK; np = PTR (n); if (rop == n) { mp_ptr tp; tp = TMP_ALLOC_LIMBS (size); MPN_COPY (tp, np, size); np = tp; } /* Here the allocated size can be one too much if n is a power of (2^GMP_NUMB_BITS) but it's convenient for using mpn_cmp below. */ rp = MPZ_REALLOC (rop, size); /* Clear last limb to prevent the case in which size is one too much. */ rp[size - 1] = 0; count = MAX_URANDOMM_ITER; /* Set iteration count limit. */ do { _gmp_rand (rp, rstate, nbits); MPN_CMP (cmp, rp, np, size); } while (cmp >= 0 && --count != 0); if (count == 0) /* Too many iterations; return result mod n == result - n */ mpn_sub_n (rp, rp, np, size); MPN_NORMALIZE (rp, size); SIZ (rop) = size; TMP_FREE; }
void mpz_gcdext (mpz_ptr g, mpz_ptr s, mpz_ptr t, mpz_srcptr a, mpz_srcptr b) { mp_size_t asize, bsize; mp_ptr tmp_ap, tmp_bp; mp_size_t gsize, ssize, tmp_ssize; mp_ptr gp, tmp_gp, tmp_sp; TMP_DECL; /* mpn_gcdext requires that Usize >= Vsize. Therefore, we often have to swap U and V. The computed cofactor will be the "smallest" one, which is faster to produce. The wanted one will be computed here; this is needed anyway when both are requested. */ asize = ABSIZ (a); bsize = ABSIZ (b); if (asize < bsize) { MPZ_SRCPTR_SWAP (a, b); MP_SIZE_T_SWAP (asize, bsize); MPZ_PTR_SWAP (s, t); } if (bsize == 0) { /* g = |a|, s = sgn(a), t = 0. */ ssize = SIZ (a) >= 0 ? (asize != 0) : -1; gp = MPZ_REALLOC (g, asize); MPN_COPY (gp, PTR (a), asize); SIZ (g) = asize; if (t != NULL) SIZ (t) = 0; if (s != NULL) { SIZ (s) = ssize; PTR (s)[0] = 1; } return; } TMP_MARK; TMP_ALLOC_LIMBS_2 (tmp_ap, asize, tmp_bp, bsize); MPN_COPY (tmp_ap, PTR (a), asize); MPN_COPY (tmp_bp, PTR (b), bsize); TMP_ALLOC_LIMBS_2 (tmp_gp, bsize, tmp_sp, bsize + 1); gsize = mpn_gcdext (tmp_gp, tmp_sp, &tmp_ssize, tmp_ap, asize, tmp_bp, bsize); ssize = ABS (tmp_ssize); tmp_ssize = SIZ (a) >= 0 ? tmp_ssize : -tmp_ssize; if (t != NULL) { mpz_t x; __mpz_struct gtmp, stmp; PTR (>mp) = tmp_gp; SIZ (>mp) = gsize; PTR (&stmp) = tmp_sp; SIZ (&stmp) = tmp_ssize; MPZ_TMP_INIT (x, ssize + asize + 1); mpz_mul (x, &stmp, a); mpz_sub (x, >mp, x); mpz_divexact (t, x, b); } if (s != NULL) { mp_ptr sp; sp = MPZ_REALLOC (s, ssize); MPN_COPY (sp, tmp_sp, ssize); SIZ (s) = tmp_ssize; } gp = MPZ_REALLOC (g, gsize); MPN_COPY (gp, tmp_gp, gsize); SIZ (g) = gsize; TMP_FREE; }
int mpz_set_str (mpz_ptr x, const char *str, int base) { size_t str_size; char *s, *begs; size_t i; mp_size_t xsize; int c; int negative; const unsigned char *digit_value; TMP_DECL; digit_value = digit_value_tab; if (base > 36) { /* For bases > 36, use the collating sequence 0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz. */ digit_value += 208; if (base > 62) return -1; /* too large base */ } /* Skip whitespace. */ do c = (unsigned char) *str++; while (isspace (c)); negative = 0; if (c == '-') { negative = 1; c = (unsigned char) *str++; } if (digit_value[c] >= (base == 0 ? 10 : base)) return -1; /* error if no valid digits */ /* If BASE is 0, try to find out the base by looking at the initial characters. */ if (base == 0) { base = 10; if (c == '0') { base = 8; c = (unsigned char) *str++; if (c == 'x' || c == 'X') { base = 16; c = (unsigned char) *str++; } else if (c == 'b' || c == 'B') { base = 2; c = (unsigned char) *str++; } } } /* Skip leading zeros and white space. */ while (c == '0' || isspace (c)) c = (unsigned char) *str++; /* Make sure the string does not become empty, mpn_set_str would fail. */ if (c == 0) { SIZ (x) = 0; return 0; } TMP_MARK; str_size = strlen (str - 1); s = begs = (char *) TMP_ALLOC (str_size + 1); /* Remove spaces from the string and convert the result from ASCII to a byte array. */ for (i = 0; i < str_size; i++) { if (!isspace (c)) { int dig = digit_value[c]; if (dig >= base) { TMP_FREE; return -1; } *s++ = dig; } c = (unsigned char) *str++; } str_size = s - begs; LIMBS_PER_DIGIT_IN_BASE (xsize, str_size, base); MPZ_REALLOC (x, xsize); /* Convert the byte array in base BASE to our bignum format. */ xsize = mpn_set_str (PTR (x), (unsigned char *) begs, str_size, base); SIZ (x) = negative ? -xsize : xsize; TMP_FREE; return 0; }
void mpz_powm_sec (mpz_ptr r, mpz_srcptr b, mpz_srcptr e, mpz_srcptr m) { mp_size_t n; mp_ptr rp, tp; mp_srcptr bp, ep, mp; mp_size_t rn, bn, es, en; TMP_DECL; n = ABSIZ(m); mp = PTR(m); if (UNLIKELY ((n == 0) || (mp[0] % 2 == 0))) DIVIDE_BY_ZERO; es = SIZ(e); if (UNLIKELY (es <= 0)) { if (es == 0) { /* b^0 mod m, b is anything and m is non-zero. Result is 1 mod m, i.e., 1 or 0 depending on if m = 1. */ SIZ(r) = n != 1 || mp[0] != 1; PTR(r)[0] = 1; return; } DIVIDE_BY_ZERO; } en = es; bn = ABSIZ(b); if (UNLIKELY (bn == 0)) { SIZ(r) = 0; return; } TMP_MARK; tp = TMP_ALLOC_LIMBS (n + mpn_sec_powm_itch (bn, en * GMP_NUMB_BITS, n)); rp = tp; tp += n; bp = PTR(b); ep = PTR(e); mpn_sec_powm (rp, bp, bn, ep, en * GMP_NUMB_BITS, mp, n, tp); rn = n; MPN_NORMALIZE (rp, rn); if ((ep[0] & 1) && SIZ(b) < 0 && rn != 0) { mpn_sub (rp, PTR(m), n, rp, rn); rn = n; MPN_NORMALIZE (rp, rn); } MPZ_REALLOC (r, rn); SIZ(r) = rn; MPN_COPY (PTR(r), rp, rn); TMP_FREE; }
void mpz_combit (mpz_ptr d, mp_bitcnt_t bit_index) { mp_size_t dsize = SIZ(d); mp_ptr dp = PTR(d); mp_size_t limb_index = bit_index / GMP_NUMB_BITS; mp_limb_t bit = (CNST_LIMB (1) << (bit_index % GMP_NUMB_BITS)); /* Check for the most common case: Positive input, no realloc or normalization needed. */ if (limb_index + 1 < dsize) dp[limb_index] ^= bit; /* Check for the hairy case. d < 0, and we have all zero bits to the right of the bit to toggle. */ else if (limb_index < -dsize && (limb_index == 0 || mpn_zero_p (dp, limb_index)) && (dp[limb_index] & (bit - 1)) == 0) { ASSERT (dsize < 0); dsize = -dsize; if (dp[limb_index] & bit) { /* We toggle the least significant one bit. Corresponds to an add, with potential carry propagation, on the absolute value. */ dp = MPZ_REALLOC (d, 1 + dsize); dp[dsize] = 0; MPN_INCR_U (dp + limb_index, 1 + dsize - limb_index, bit); SIZ(d) = - dsize - dp[dsize]; } else { /* We toggle a zero bit, subtract from the absolute value. */ MPN_DECR_U (dp + limb_index, dsize - limb_index, bit); /* The absolute value shrinked by at most one bit. */ dsize -= dp[dsize - 1] == 0; ASSERT (dsize > 0 && dp[dsize - 1] != 0); SIZ (d) = -dsize; } } else { /* Simple case: Toggle the bit in the absolute value. */ dsize = ABS(dsize); if (limb_index < dsize) { mp_limb_t dlimb; dlimb = dp[limb_index] ^ bit; dp[limb_index] = dlimb; /* Can happen only when limb_index = dsize - 1. Avoid SIZ(d) bookkeeping in the common case. */ if (UNLIKELY ((dlimb == 0) + limb_index == dsize)) /* dsize == limb_index + 1 */ { /* high limb became zero, must normalize */ MPN_NORMALIZE (dp, limb_index); SIZ (d) = SIZ (d) >= 0 ? limb_index : -limb_index; } } else { dp = MPZ_REALLOC (d, limb_index + 1); MPN_ZERO(dp + dsize, limb_index - dsize); dp[limb_index++] = bit; SIZ(d) = SIZ(d) >= 0 ? limb_index : -limb_index; } } }
void mpz_rootrem (mpz_ptr root, mpz_ptr rem, mpz_srcptr u, unsigned long int nth) { mp_ptr rootp, up, remp; mp_size_t us, un, rootn, remn; TMP_DECL; us = SIZ(u); /* even roots of negatives provoke an exception */ if (UNLIKELY (us < 0 && (nth & 1) == 0)) SQRT_OF_NEGATIVE; /* root extraction interpreted as c^(1/nth) means a zeroth root should provoke a divide by zero, do this even if c==0 */ if (UNLIKELY (nth == 0)) DIVIDE_BY_ZERO; if (us == 0) { if (root != NULL) SIZ(root) = 0; SIZ(rem) = 0; return; } un = ABS (us); rootn = (un - 1) / nth + 1; TMP_MARK; /* FIXME: Perhaps disallow root == NULL */ if (root != NULL && u != root) rootp = MPZ_REALLOC (root, rootn); else rootp = TMP_ALLOC_LIMBS (rootn); if (u != rem) remp = MPZ_REALLOC (rem, un); else remp = TMP_ALLOC_LIMBS (un); up = PTR(u); if (nth == 1) { MPN_COPY (rootp, up, un); remn = 0; } else { remn = mpn_rootrem (rootp, remp, up, un, (mp_limb_t) nth); } if (root != NULL) { SIZ(root) = us >= 0 ? rootn : -rootn; if (u == root) MPN_COPY (up, rootp, rootn); } if (u == rem) MPN_COPY (up, remp, remn); SIZ(rem) = us >= 0 ? remn : -remn; TMP_FREE; }