static void BM_count_leading_zeros_noasm(benchmark::State& state) { UINT32 cnt = 0x332533; while (state.KeepRunning()) { (void)count_leading_zeros(cnt); cnt++; } }
static inline std::size_t countleadbits(std::size_t x) { #ifndef count_leading_zeros std::size_t msk; std::size_t ret; if (x < 0x100) { msk = 0x80; ret = 24; } else if (x < 0x10000) { msk = 0x8000; ret = 16; } else if (x < 0x1000000) { msk = 0x800000; ret = 8; } else { msk = 0x80000000; ret = 0; } while ((msk & x) == 0 && ret < 31) { msk = msk >> 1; ret++; } return ret; #else return count_leading_zeros(x); #endif }
int mpfr_ui_div (mpfr_ptr y, unsigned long int u, mpfr_srcptr x, mpfr_rnd_t rnd_mode) { mpfr_t uu; mp_limb_t up[1]; unsigned long cnt; MPFR_LOG_FUNC (("u=%lu x[%Pu]=%.*Rg rnd=%d", u, mpfr_get_prec(x), mpfr_log_prec, x, rnd_mode), ("y[%Pu]=%.*Rg", mpfr_get_prec(y), mpfr_log_prec, y)); if (MPFR_UNLIKELY(MPFR_IS_SINGULAR(x))) { if (MPFR_IS_NAN(x)) { MPFR_SET_NAN(y); MPFR_RET_NAN; } else if (MPFR_IS_INF(x)) /* u/Inf = 0 */ { MPFR_SET_ZERO(y); MPFR_SET_SAME_SIGN(y,x); MPFR_RET(0); } else /* u / 0 */ { MPFR_ASSERTD(MPFR_IS_ZERO(x)); if (u) { /* u > 0, so y = sign(x) * Inf */ MPFR_SET_SAME_SIGN(y, x); MPFR_SET_INF(y); mpfr_set_divby0 (); MPFR_RET(0); } else { /* 0 / 0 */ MPFR_SET_NAN(y); MPFR_RET_NAN; } } } else if (MPFR_LIKELY(u != 0)) { MPFR_TMP_INIT1(up, uu, GMP_NUMB_BITS); MPFR_ASSERTN(u == (mp_limb_t) u); count_leading_zeros(cnt, (mp_limb_t) u); up[0] = (mp_limb_t) u << cnt; MPFR_SET_EXP (uu, GMP_NUMB_BITS - cnt); return mpfr_div (y, uu, x, rnd_mode); } else /* u = 0, and x != 0 */ { MPFR_SET_ZERO(y); /* if u=0, then set y to 0 */ MPFR_SET_SAME_SIGN(y, x); /* u considered as +0: sign(+0/x) = sign(x) */ MPFR_RET(0); } }
int mpfr_urandomb (mpfr_ptr rop, gmp_randstate_t rstate) { mp_ptr rp; mp_prec_t nbits; mp_size_t nlimbs; mp_size_t k; /* number of high zero limbs */ mp_exp_t exp; int cnt; MPFR_CLEAR_FLAGS (rop); rp = MPFR_MANT (rop); nbits = MPFR_PREC (rop); nlimbs = MPFR_LIMB_SIZE (rop); MPFR_SET_POS (rop); /* Uniform non-normalized significand */ _gmp_rand (rp, rstate, nlimbs * BITS_PER_MP_LIMB); /* If nbits isn't a multiple of BITS_PER_MP_LIMB, mask the low bits */ cnt = nlimbs * BITS_PER_MP_LIMB - nbits; if (MPFR_LIKELY (cnt != 0)) rp[0] &= ~MPFR_LIMB_MASK (cnt); /* Count the null significant limbs and remaining limbs */ exp = 0; k = 0; while (nlimbs != 0 && rp[nlimbs - 1] == 0) { k ++; nlimbs --; exp -= BITS_PER_MP_LIMB; } if (MPFR_LIKELY (nlimbs != 0)) /* otherwise value is zero */ { count_leading_zeros (cnt, rp[nlimbs - 1]); /* Normalization */ if (mpfr_set_exp (rop, exp - cnt)) { /* If the exponent is not in the current exponent range, we choose to return a NaN as this is probably a user error. Indeed this can happen only if the exponent range has been reduced to a very small interval and/or the precision is huge (very unlikely). */ MPFR_SET_NAN (rop); __gmpfr_flags |= MPFR_FLAGS_NAN; /* Can't use MPFR_RET_NAN */ return 1; } if (cnt != 0) mpn_lshift (rp + k, rp, nlimbs, cnt); if (k != 0) MPN_ZERO (rp, k); } else MPFR_SET_ZERO (rop); return 0; }
data_chunk decode_base58(std::string encoded) { // Trim spaces and newlines around the string. boost::algorithm::trim(encoded); size_t leading_zeros = count_leading_zeros(encoded); // log(58) / log(256), rounded up. const size_t data_size = encoded.size() * 733 / 1000 + 1; // Allocate enough space in big-endian base256 representation. data_chunk data(data_size); // Process the characters. for (auto it = encoded.begin() + leading_zeros; it != encoded.end(); ++it) { size_t carry = base58_chars.find(*it); if (carry == std::string::npos) return data_chunk(); unpack_char(data, carry); } // Skip leading zeroes in data. auto first_nonzero = search_first_nonzero(data); // Copy result into output vector. data_chunk decoded; const size_t estimated_size = leading_zeros + (data.end() - first_nonzero); decoded.reserve(estimated_size); decoded.assign(leading_zeros, 0x00); decoded.insert(decoded.end(), first_nonzero, data.cend()); return decoded; }
void mpfr_random2 (mpfr_ptr x, mp_size_t size, mp_exp_t exp) { mp_size_t xn; unsigned long cnt; mp_ptr xp = MPFR_MANT(x), yp[1]; mp_size_t prec = (MPFR_PREC(x) - 1)/BITS_PER_MP_LIMB; MPFR_CLEAR_FLAGS(x); xn = ABS (size); if (xn != 0) { if (xn > prec + 1) xn = prec + 1; mpn_random2 (xp, xn); } if (exp != 0) { /* use mpn_random instead of random since that function is not available on all platforms (for example HPUX, DEC OSF, ...) */ mpn_random ((mp_limb_t*) yp, 1); exp = (mp_exp_t) yp[0] % (2 * exp) - exp; } count_leading_zeros(cnt, xp[xn - 1]); if (cnt) mpn_lshift(xp, xp, xn, cnt); MPFR_EXP(x) = exp-cnt; cnt = xn*BITS_PER_MP_LIMB - prec; /* cnt is the number of non significant bits in the low limb */ xp[0] &= ~((MP_LIMB_T_ONE << cnt) - MP_LIMB_T_ONE); }
static inline unsigned countleadbits(unsigned x) { #ifndef count_leading_zeros unsigned msk; unsigned ret; if (x < 0x100) { msk = 0x80; ret = 24; } else if (x < 0x10000) { msk = 0x8000; ret = 16; } else if (x < 0x1000000) { msk = 0x800000; ret = 8; } else { msk = 0x80000000; ret = 0; } while ((msk & x) == 0 && ret < 31) { msk = msk >> 1; ret++; } return ret; #else return count_leading_zeros(x); #endif }
std::string encode_base58(const data_chunk& unencoded) { size_t leading_zeros = count_leading_zeros(unencoded); // size = log(256) / log(58), rounded up. const size_t number_nonzero = unencoded.size() - leading_zeros; const size_t indexes_size = number_nonzero * 138 / 100 + 1; // Allocate enough space in big-endian base58 representation. data_chunk indexes(indexes_size); // Process the bytes. for (auto it = unencoded.begin() + leading_zeros; it != unencoded.end(); ++it) { pack_value(indexes, *it); } // Skip leading zeroes in base58 result. auto first_nonzero = search_first_nonzero(indexes); // Translate the result into a string. std::string encoded; const size_t estimated_size = leading_zeros + (indexes.end() - first_nonzero); encoded.reserve(estimated_size); encoded.assign(leading_zeros, '1'); // Set actual main bytes. for (auto it = first_nonzero; it != indexes.end(); ++it) { const size_t index = *it; encoded += base58_chars[index]; } return encoded; }
double mpz_get_d_2exp (signed long int *exp2, mpz_srcptr src) { double res; mp_size_t size, i, n_limbs_to_use; int negative; mp_ptr qp; int cnt; size = SIZ(src); if (size == 0) { *exp2 = 0; return 0.0; } negative = size < 0; size = ABS (size); qp = PTR(src); n_limbs_to_use = MIN (LIMBS_PER_DOUBLE, size); qp += size - n_limbs_to_use; res = qp[0] / MP_BASE_AS_DOUBLE; for (i = 1; i < n_limbs_to_use; i++) res = (res + qp[i]) / MP_BASE_AS_DOUBLE; count_leading_zeros (cnt, qp[n_limbs_to_use - 1]); *exp2 = size * BITS_PER_MP_LIMB - cnt; res = res * ((mp_limb_t) 1 << cnt); return negative ? -res : res; }
double mpf_get_d_2exp (signed long *exp2, mpf_srcptr src) { mp_size_t size, abs_size; mp_srcptr ptr; int cnt; signed long exp; size = SIZ(src); if (UNLIKELY (size == 0)) { *exp2 = 0; return 0.0; } ptr = PTR(src); abs_size = ABS (size); count_leading_zeros (cnt, ptr[abs_size - 1]); cnt -= GMP_NAIL_BITS; exp = EXP(src) * GMP_NUMB_BITS - cnt; *exp2 = exp; return mpn_get_d (ptr, abs_size, size, (long) - (abs_size * GMP_NUMB_BITS - cnt)); }
int mpfr_sub_ui (mpfr_ptr y, mpfr_srcptr x, unsigned long int u, mp_rnd_t rnd_mode) { if (MPFR_LIKELY (u != 0)) /* if u=0, do nothing */ { mpfr_t uu; mp_limb_t up[1]; unsigned long cnt; int inex; MPFR_SAVE_EXPO_DECL (expo); MPFR_TMP_INIT1 (up, uu, BITS_PER_MP_LIMB); MPFR_ASSERTN (u == (mp_limb_t) u); count_leading_zeros (cnt, (mp_limb_t) u); *up = (mp_limb_t) u << cnt; /* Optimization note: Exponent save/restore operations may be removed if mpfr_sub works even when uu is out-of-range. */ MPFR_SAVE_EXPO_MARK (expo); MPFR_SET_EXP (uu, BITS_PER_MP_LIMB - cnt); inex = mpfr_sub (y, x, uu, rnd_mode); MPFR_SAVE_EXPO_FREE (expo); return mpfr_check_range (y, inex, rnd_mode); } else return mpfr_set (y, x, rnd_mode); }
size_t mpn_sizeinbase (mp_srcptr xp, mp_size_t xsize, int base) { int lb_base, cnt; mp_size_t totbits; ASSERT (xsize >= 0); ASSERT (base >= 2); ASSERT (base < numberof (__mp_bases)); /* Special case for X == 0. */ if (xsize == 0) return 1; /* Calculate the total number of significant bits of X. */ count_leading_zeros (cnt, xp[xsize-1]); totbits = xsize * BITS_PER_MP_LIMB - cnt; if (POW2_P (base)) { /* Special case for powers of 2, giving exact result. */ lb_base = __mp_bases[base].big_base; return (totbits + lb_base - 1) / lb_base; } else return (size_t) (totbits * __mp_bases[base].chars_per_bit_exactly) + 1; }
int mpfr_add_ui (mpfr_ptr y, mpfr_srcptr x, unsigned long int u, mp_rnd_t rnd_mode) { if (u) /* if u=0, do nothing */ { mpfr_t uu; mp_limb_t up[1]; unsigned long cnt; int inex; MPFR_INIT1(up, uu, BITS_PER_MP_LIMB, 1); count_leading_zeros(cnt, (mp_limb_t) u); *up = (mp_limb_t) u << cnt; MPFR_EXP(uu) = BITS_PER_MP_LIMB - cnt; /* Optimization note: Exponent operations may be removed if mpfr_add works even when uu is out-of-range. */ mpfr_save_emin_emax(); inex = mpfr_add(y, x, uu, rnd_mode); MPFR_RESTORE_RET(inex, y, rnd_mode); } else return mpfr_set (y, x, rnd_mode); }
/* * Set f to z, choosing the smallest precision for f * so that z = f*(2^BPML)*zs*2^(RetVal) */ static int set_z (mpfr_ptr f, mpz_srcptr z, mp_size_t *zs) { mp_limb_t *p; mp_size_t s; int c; mp_prec_t pf; MPFR_ASSERTD (mpz_sgn (z) != 0); /* Remove useless ending 0 */ for (p = PTR (z), s = *zs = ABS (SIZ (z)) ; *p == 0; p++, s--) MPFR_ASSERTD (s >= 0); /* Get working precision */ count_leading_zeros (c, p[s-1]); pf = s * BITS_PER_MP_LIMB - c; if (pf < MPFR_PREC_MIN) pf = MPFR_PREC_MIN; mpfr_init2 (f, pf); /* Copy Mantissa */ if (MPFR_LIKELY (c)) mpn_lshift (MPFR_MANT (f), p, s, c); else MPN_COPY (MPFR_MANT (f), p, s); MPFR_SET_SIGN (f, mpz_sgn (z)); MPFR_SET_EXP (f, 0); return -c; }
int mpfr_add_ui (mpfr_ptr y, mpfr_srcptr x, unsigned long int u, mpfr_rnd_t rnd_mode) { MPFR_LOG_FUNC (("x[%Pu]=%.*Rg u=%lu rnd=%d", mpfr_get_prec(x), mpfr_log_prec, x, u, rnd_mode), ("y[%Pu]=%.*Rg", mpfr_get_prec (y), mpfr_log_prec, y)); if (MPFR_LIKELY(u != 0) ) /* if u=0, do nothing */ { mpfr_t uu; mp_limb_t up[1]; unsigned long cnt; int inex; MPFR_SAVE_EXPO_DECL (expo); MPFR_TMP_INIT1 (up, uu, GMP_NUMB_BITS); MPFR_ASSERTD (u == (mp_limb_t) u); count_leading_zeros(cnt, (mp_limb_t) u); up[0] = (mp_limb_t) u << cnt; /* Optimization note: Exponent save/restore operations may be removed if mpfr_add works even when uu is out-of-range. */ MPFR_SAVE_EXPO_MARK (expo); MPFR_SET_EXP (uu, GMP_NUMB_BITS - cnt); inex = mpfr_add(y, x, uu, rnd_mode); MPFR_SAVE_EXPO_FREE (expo); return mpfr_check_range(y, inex, rnd_mode); } else /* (unsigned long) 0 is assumed to be a real 0 (unsigned) */ return mpfr_set (y, x, rnd_mode); }
int mpfr_sqrt_ui (mpfr_ptr r, unsigned long u, mpfr_rnd_t rnd_mode) { if (u) { mpfr_t uu; mp_limb_t up[1]; unsigned long cnt; int inex; MPFR_SAVE_EXPO_DECL (expo); MPFR_TMP_INIT1 (up, uu, GMP_NUMB_BITS); MPFR_ASSERTN (u == (mp_limb_t) u); count_leading_zeros (cnt, (mp_limb_t) u); *up = (mp_limb_t) u << cnt; MPFR_SAVE_EXPO_MARK (expo); MPFR_SET_EXP (uu, GMP_NUMB_BITS - cnt); inex = mpfr_sqrt(r, uu, rnd_mode); MPFR_SAVE_EXPO_FREE (expo); return mpfr_check_range(r, inex, rnd_mode); } else /* sqrt(0) = 0 */ { MPFR_SET_ZERO(r); MPFR_SET_POS(r); MPFR_RET(0); } }
/* set f to the integer z multiplied by 2^e */ int mpfr_set_z_2exp (mpfr_ptr f, mpz_srcptr z, mpfr_exp_t e, mpfr_rnd_t rnd_mode) { mp_size_t fn, zn, dif, en; int k, sign_z, inex; mp_limb_t *fp, *zp; mpfr_exp_t exp; sign_z = mpz_sgn (z); if (MPFR_UNLIKELY (sign_z == 0)) /* ignore the exponent for 0 */ { MPFR_SET_ZERO(f); MPFR_SET_POS(f); MPFR_RET(0); } MPFR_ASSERTD (sign_z == MPFR_SIGN_POS || sign_z == MPFR_SIGN_NEG); zn = ABS(SIZ(z)); /* limb size of z */ /* compute en = floor(e/GMP_NUMB_BITS) */ en = (e >= 0) ? e / GMP_NUMB_BITS : (e + 1) / GMP_NUMB_BITS - 1; MPFR_ASSERTD (zn >= 1); if (MPFR_UNLIKELY (zn + en > MPFR_EMAX_MAX / GMP_NUMB_BITS + 1)) return mpfr_overflow (f, rnd_mode, sign_z); /* because zn + en >= MPFR_EMAX_MAX / GMP_NUMB_BITS + 2 implies (zn + en) * GMP_NUMB_BITS >= MPFR_EMAX_MAX + GMP_NUMB_BITS + 1 and exp = zn * GMP_NUMB_BITS + e - k >= (zn + en) * GMP_NUMB_BITS - k > MPFR_EMAX_MAX */ fp = MPFR_MANT (f); fn = MPFR_LIMB_SIZE (f); dif = zn - fn; zp = PTR(z); count_leading_zeros (k, zp[zn-1]); /* now zn + en <= MPFR_EMAX_MAX / GMP_NUMB_BITS + 1 thus (zn + en) * GMP_NUMB_BITS <= MPFR_EMAX_MAX + GMP_NUMB_BITS and exp = zn * GMP_NUMB_BITS + e - k <= (zn + en) * GMP_NUMB_BITS - k + GMP_NUMB_BITS - 1 <= MPFR_EMAX_MAX + 2 * GMP_NUMB_BITS - 1 */ exp = (mpfr_prec_t) zn * GMP_NUMB_BITS + e - k; /* The exponent will be exp or exp + 1 (due to rounding) */ if (MPFR_UNLIKELY (exp > __gmpfr_emax)) return mpfr_overflow (f, rnd_mode, sign_z); if (MPFR_UNLIKELY (exp + 1 < __gmpfr_emin)) return mpfr_underflow (f, rnd_mode == MPFR_RNDN ? MPFR_RNDZ : rnd_mode, sign_z); if (MPFR_LIKELY (dif >= 0)) { mp_limb_t rb, sb, ulp; int sh; /* number has to be truncated */ if (MPFR_LIKELY (k != 0)) { mpn_lshift (fp, &zp[dif], fn, k); if (MPFR_LIKELY (dif > 0)) fp[0] |= zp[dif - 1] >> (GMP_NUMB_BITS - k); }
void my__gmpn_tdiv_qr (mp_ptr qp, mp_ptr rp, mp_size_t qxn, mp_srcptr np, mp_size_t nn, mp_srcptr dp, mp_size_t dn) { ASSERT_ALWAYS (qxn == 0); ASSERT (nn >= 0); ASSERT (dn >= 0); ASSERT (dn == 0 || dp[dn - 1] != 0); ASSERT (! MPN_OVERLAP_P (qp, nn - dn + 1 + qxn, np, nn)); ASSERT (! MPN_OVERLAP_P (qp, nn - dn + 1 + qxn, dp, dn)); int adjust; gmp_pi1_t dinv; TMP_DECL; TMP_MARK; /* conservative tests for quotient size */ adjust = np[nn - 1] >= dp[dn - 1]; mp_ptr n2p, d2p; mp_limb_t cy; int cnt; qp[nn - dn] = 0; /* zero high quotient limb */ count_leading_zeros (cnt, dp[dn - 1]); cnt -= GMP_NAIL_BITS; d2p = TMP_ALLOC_LIMBS (dn); mpn_lshift (d2p, dp, dn, cnt); for (int i=0; i<dn; i+=1) { printf("d2p %08x\n", *( (int*) (((void*)(d2p))+(i*4)))); } n2p = TMP_ALLOC_LIMBS (nn + 1); cy = mpn_lshift (n2p, np, nn, cnt); for (int i=0; i<nn; i+=1) { printf("n2p %08x\n", *( (int*) (((void*)(n2p))+(i*4)))); } n2p[nn] = cy; nn += adjust; printf("d2p[dn-1] = %08lx\nd2p[dn-2] = %08lx\n", d2p[dn-1], d2p[dn-2]); invert_pi1 (dinv, d2p[dn - 1], d2p[dn - 2]); printf("dinv %08lx\n", dinv.inv32); my_mpn_sbpi1_div_qr (qp, n2p, nn, d2p, dn, dinv.inv32); for (int i=0; i<nn; i+=1) { printf("inside qp %08x\n", *( (int*) (((void*)(qp))+(i*4)))); } n2p[nn] = cy; mpn_rshift (rp, n2p, dn, cnt); TMP_FREE; return; }
int mpfr_urandomb (mpfr_ptr rop, gmp_randstate_t rstate) { mpfr_limb_ptr rp; mpfr_prec_t nbits; mp_size_t nlimbs; mp_size_t k; /* number of high zero limbs */ mpfr_exp_t exp; int cnt; rp = MPFR_MANT (rop); nbits = MPFR_PREC (rop); nlimbs = MPFR_LIMB_SIZE (rop); MPFR_SET_POS (rop); cnt = nlimbs * GMP_NUMB_BITS - nbits; /* Uniform non-normalized significand */ /* generate exactly nbits so that the random generator stays in the same state, independent of the machine word size GMP_NUMB_BITS */ mpfr_rand_raw (rp, rstate, nbits); if (MPFR_LIKELY (cnt != 0)) /* this will put the low bits to zero */ mpn_lshift (rp, rp, nlimbs, cnt); /* Count the null significant limbs and remaining limbs */ exp = 0; k = 0; while (nlimbs != 0 && rp[nlimbs - 1] == 0) { k ++; nlimbs --; exp -= GMP_NUMB_BITS; } if (MPFR_LIKELY (nlimbs != 0)) /* otherwise value is zero */ { count_leading_zeros (cnt, rp[nlimbs - 1]); /* Normalization */ if (mpfr_set_exp (rop, exp - cnt)) { /* If the exponent is not in the current exponent range, we choose to return a NaN as this is probably a user error. Indeed this can happen only if the exponent range has been reduced to a very small interval and/or the precision is huge (very unlikely). */ MPFR_SET_NAN (rop); __gmpfr_flags |= MPFR_FLAGS_NAN; /* Can't use MPFR_RET_NAN */ return 1; } if (cnt != 0) mpn_lshift (rp + k, rp, nlimbs, cnt); if (k != 0) MPN_ZERO (rp, k); } else MPFR_SET_ZERO (rop); return 0; }
UTItype __udivmodti4 (UTItype num, UTItype den, UTItype * rp) { qword shift = si_from_uint (count_leading_zeros (den) - count_leading_zeros (num)); qword n0 = si_from_UTItype (num); qword d0 = si_from_UTItype (den); qword bit = si_andi (si_fsmbi (1), 1); qword r0 = si_il (0); qword m1 = si_fsmbi (0x000f); qword mask, r1, n1; d0 = si_shlqbybi (si_shlqbi (d0, shift), shift); bit = si_shlqbybi (si_shlqbi (bit, shift), shift); do { r1 = si_or (r0, bit); // n1 = n0 - d0 in TImode n1 = si_bg (d0, n0); n1 = si_shlqbyi (n1, 4); n1 = si_sf (m1, n1); n1 = si_bgx (d0, n0, n1); n1 = si_shlqbyi (n1, 4); n1 = si_sf (m1, n1); n1 = si_bgx (d0, n0, n1); n1 = si_shlqbyi (n1, 4); n1 = si_sf (m1, n1); n1 = si_sfx (d0, n0, n1); mask = si_fsm (si_cgti (n1, -1)); r0 = si_selb (r0, r1, mask); n0 = si_selb (n0, n1, mask); bit = si_rotqmbii (bit, -1); d0 = si_rotqmbii (d0, -1); } while (si_to_uint (si_orx (bit))); if (rp) *rp = si_to_UTItype (n0); return si_to_UTItype (r0); }
int mpfr_set_si (mpfr_ptr x, long i, mp_rnd_t rnd_mode) { int inex; mp_size_t xn; unsigned int cnt, nbits; mp_limb_t ai, *xp; MPFR_CLEAR_FLAGS(x); if (i == 0) { MPFR_SET_ZERO(x); MPFR_SET_POS(x); MPFR_RET(0); } xn = (MPFR_PREC(x)-1)/BITS_PER_MP_LIMB; ai = SAFE_ABS(long, i); count_leading_zeros(cnt, ai); xp = MPFR_MANT(x); xp[xn] = ai << cnt; /* don't forget to put zero in lower limbs */ MPN_ZERO(xp, xn); /* set sign */ if ((i < 0) ^ (MPFR_SIGN(x) < 0)) MPFR_CHANGE_SIGN(x); MPFR_EXP(x) = nbits = BITS_PER_MP_LIMB - cnt; inex = mpfr_check_range(x, rnd_mode); if (inex) return inex; /* underflow or overflow */ /* round if MPFR_PREC(x) smaller than length of i */ if (MPFR_PREC(x) < nbits) { int carry; carry = mpfr_round_raw(xp+xn, xp+xn, nbits, (i < 0), MPFR_PREC(x), rnd_mode, &inex); if (carry) { mp_exp_t exp = MPFR_EXP(x); if (exp == __mpfr_emax) return mpfr_set_overflow(x, rnd_mode, (i < 0 ? -1 : 1)); MPFR_EXP(x)++; xp[xn] = GMP_LIMB_HIGHBIT; } } MPFR_RET(inex); }
int mpfr_ui_div (mpfr_ptr y, unsigned long int u, mpfr_srcptr x, mp_rnd_t rnd_mode) { mpfr_t uu; mp_limb_t up[1]; unsigned long cnt; if (MPFR_UNLIKELY(MPFR_IS_SINGULAR(x))) { if (MPFR_IS_NAN(x)) { MPFR_SET_NAN(y); MPFR_RET_NAN; } else if (MPFR_IS_INF(x)) /* u/Inf = 0 */ { MPFR_SET_ZERO(y); MPFR_SET_SAME_SIGN(y,x); MPFR_RET(0); } else /* u / 0 */ { MPFR_ASSERTD(MPFR_IS_ZERO(x)); if (u) { /* u > 0, so y = sign(x) * Inf */ MPFR_SET_SAME_SIGN(y, x); MPFR_SET_INF(y); MPFR_RET(0); } else { /* 0 / 0 */ MPFR_SET_NAN(y); MPFR_RET_NAN; } } } else if (MPFR_LIKELY(u != 0)) { MPFR_TMP_INIT1(up, uu, BITS_PER_MP_LIMB); MPFR_ASSERTN(u == (mp_limb_t) u); count_leading_zeros(cnt, (mp_limb_t) u); up[0] = (mp_limb_t) u << cnt; MPFR_SET_EXP (uu, BITS_PER_MP_LIMB - cnt); return mpfr_div (y, uu, x, rnd_mode); } else /* u = 0, and x != 0 */ { MPFR_SET_ZERO(y); /* if u=0, then set y to 0 */ MPFR_SET_SAME_SIGN(y, x); /* u considered as +0: sign(+0/x) = sign(x) */ MPFR_RET(0); } }
mp_size_t mpn_hgcd_step (mp_size_t n, mp_ptr ap, mp_ptr bp, mp_size_t s, struct hgcd_matrix *M, mp_ptr tp) { struct hgcd_matrix1 M1; mp_limb_t mask; mp_limb_t ah, al, bh, bl; ASSERT (n > s); mask = ap[n-1] | bp[n-1]; ASSERT (mask > 0); if (n == s + 1) { if (mask < 4) goto subtract; ah = ap[n-1]; al = ap[n-2]; bh = bp[n-1]; bl = bp[n-2]; } else if (mask & GMP_NUMB_HIGHBIT) { ah = ap[n-1]; al = ap[n-2]; bh = bp[n-1]; bl = bp[n-2]; } else { int shift; count_leading_zeros (shift, mask); ah = MPN_EXTRACT_NUMB (shift, ap[n-1], ap[n-2]); al = MPN_EXTRACT_NUMB (shift, ap[n-2], ap[n-3]); bh = MPN_EXTRACT_NUMB (shift, bp[n-1], bp[n-2]); bl = MPN_EXTRACT_NUMB (shift, bp[n-2], bp[n-3]); } /* Try an mpn_hgcd2 step */ if (mpn_hgcd2 (ah, al, bh, bl, &M1)) { /* Multiply M <- M * M1 */ mpn_hgcd_matrix_mul_1 (M, &M1, tp); /* Can't swap inputs, so we need to copy. */ MPN_COPY (tp, ap, n); /* Multiply M1^{-1} (a;b) */ return mpn_matrix22_mul1_inverse_vector (&M1, ap, tp, bp, n); } subtract: return mpn_gcd_subdiv_step (ap, bp, n, s, hgcd_hook, M, tp); }
/* set f to the integer z */ int mpfr_set_z (mpfr_ptr f, mpz_srcptr z, mp_rnd_t rnd_mode) { mp_size_t fn, zn, dif; int k, sign_z, inex; mp_limb_t *fp, *zp; mp_exp_t exp; MPFR_CLEAR_FLAGS (f); /* z cannot be NaN nor Inf */ sign_z = mpz_cmp_ui (z, 0); if (sign_z == 0) { MPFR_SET_ZERO(f); MPFR_SET_POS(f); MPFR_RET(0); } fp = MPFR_MANT(f); fn = 1 + (MPFR_PREC(f) - 1) / BITS_PER_MP_LIMB; zn = ABS(SIZ(z)); dif = zn - fn; zp = PTR(z); count_leading_zeros(k, zp[zn-1]); exp = (mp_prec_t) zn * BITS_PER_MP_LIMB - k; /* The exponent will be exp or exp + 1 (due to rounding) */ if (exp > __mpfr_emax) return mpfr_set_overflow(f, rnd_mode, sign_z); if (exp + 1 < __mpfr_emin) return mpfr_set_underflow(f, rnd_mode, sign_z); if (MPFR_SIGN(f) * sign_z < 0) MPFR_CHANGE_SIGN(f); if (dif >= 0) { mp_limb_t cc; int sh; /* number has to be truncated */ if (k != 0) { mpn_lshift(fp, zp + dif, fn, k); if (dif != 0) fp[0] += zp[dif - 1] >> (BITS_PER_MP_LIMB - k); }
int mpfr_set_si_2exp (mpfr_ptr x, long i, mp_exp_t e, mp_rnd_t rnd_mode) { if (i == 0) { MPFR_SET_ZERO (x); MPFR_SET_POS (x); MPFR_RET (0); } else { mp_size_t xn; unsigned int cnt, nbits; mp_limb_t ai, *xp; int inex = 0; /* FIXME: support int limbs (e.g. 16-bit limbs on 16-bit proc) */ ai = SAFE_ABS (unsigned long, i); MPFR_ASSERTN (SAFE_ABS (unsigned long, i) == ai); /* Position of the highest limb */ xn = (MPFR_PREC (x) - 1) / BITS_PER_MP_LIMB; count_leading_zeros (cnt, ai); MPFR_ASSERTD (cnt < BITS_PER_MP_LIMB); /* OK since i != 0 */ xp = MPFR_MANT(x); xp[xn] = ai << cnt; /* Zero the xn lower limbs. */ MPN_ZERO(xp, xn); MPFR_SET_SIGN (x, i < 0 ? MPFR_SIGN_NEG : MPFR_SIGN_POS); nbits = BITS_PER_MP_LIMB - cnt; e += nbits; /* exponent _before_ the rounding */ /* round if MPFR_PREC(x) smaller than length of i */ if (MPFR_UNLIKELY (MPFR_PREC (x) < nbits) && MPFR_UNLIKELY (mpfr_round_raw (xp + xn, xp + xn, nbits, i < 0, MPFR_PREC (x), rnd_mode, &inex))) { e++; xp[xn] = MPFR_LIMB_HIGHBIT; } MPFR_CLEAR_FLAGS (x); MPFR_EXP (x) = e; return mpfr_check_range (x, inex, rnd_mode); } }
std::pair<Interval::uint64, int> CodeInterval::normalize() noexcept { const uint64 bits = base; if(range == 0) { base = 0; range = max; return {bits, 64}; } else { const int n = count_leading_zeros(range); base <<= n; range <<= n; range |= (1UL << n) - 1UL; assert(range >= msb); return {bits, n}; } }
static int count_lzeros(MPI a) { mpi_limb_t alimb; int i, lzeros = 0; for (i = a->nlimbs - 1; i >= 0; i--) { alimb = a->d[i]; if (alimb == 0) { lzeros += sizeof(mpi_limb_t); } else { lzeros += count_leading_zeros(alimb) / 8; break; } } return lzeros; }
/** * mpi_read_raw_data - Read a raw byte stream as a positive integer * @xbuffer: The data to read * @nbytes: The amount of data to read */ MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes) { const uint8_t *buffer = xbuffer; int i, j; unsigned nbits, nlimbs; mpi_limb_t a; MPI val = NULL; while (nbytes > 0 && buffer[0] == 0) { buffer++; nbytes--; } nbits = nbytes * 8; if (nbits > MAX_EXTERN_MPI_BITS) { pr_info("MPI: mpi too large (%u bits)\n", nbits); return NULL; } if (nbytes > 0) nbits -= count_leading_zeros(buffer[0]); else nbits = 0; nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB); val = mpi_alloc(nlimbs); if (!val) return NULL; val->nbits = nbits; val->sign = 0; val->nlimbs = nlimbs; if (nbytes > 0) { i = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB; i %= BYTES_PER_MPI_LIMB; for (j = nlimbs; j > 0; j--) { a = 0; for (; i < BYTES_PER_MPI_LIMB; i++) { a <<= 8; a |= *buffer++; } i = 0; val->d[j - 1] = a; } } return val; }
int mpfr_set_ui_2exp (mpfr_ptr x, unsigned long i, mpfr_exp_t e, mpfr_rnd_t rnd_mode) { MPFR_SET_POS (x); if (i == 0) { MPFR_SET_ZERO (x); MPFR_RET (0); } else { mp_size_t xn; unsigned int cnt, nbits; mp_limb_t *xp; int inex = 0; /* FIXME: support int limbs (e.g. 16-bit limbs on 16-bit proc) */ MPFR_ASSERTD (i == (mp_limb_t) i); /* Position of the highest limb */ xn = (MPFR_PREC (x) - 1) / GMP_NUMB_BITS; count_leading_zeros (cnt, (mp_limb_t) i); MPFR_ASSERTD (cnt < GMP_NUMB_BITS); /* OK since i != 0 */ xp = MPFR_MANT(x); xp[xn] = ((mp_limb_t) i) << cnt; /* Zero the xn lower limbs. */ MPN_ZERO(xp, xn); nbits = GMP_NUMB_BITS - cnt; e += nbits; /* exponent _before_ the rounding */ /* round if MPFR_PREC(x) smaller than length of i */ if (MPFR_UNLIKELY (MPFR_PREC (x) < nbits) && MPFR_UNLIKELY (mpfr_round_raw (xp + xn, xp + xn, nbits, 0, MPFR_PREC (x), rnd_mode, &inex))) { e++; xp[xn] = MPFR_LIMB_HIGHBIT; } MPFR_EXP (x) = e; return mpfr_check_range (x, inex, rnd_mode); } }
int __gmpfr_int_ceil_log2 (unsigned long n) { if (MPFR_UNLIKELY (n == 1)) return 0; else { int b; mp_limb_t limb; MPFR_ASSERTN (n > 1); limb = n - 1; MPFR_ASSERTN (limb == n - 1); count_leading_zeros (b, limb); return GMP_NUMB_BITS - b; } }