Ejemplo n.º 1
0
/* This should be made into an inline function in gmp.h.  */
void
mpn_mul_n (mp_ptr prodp, mp_srcptr up, mp_srcptr vp, mp_size_t size)
{
  TMP_DECL (marker);
  TMP_MARK (marker);
  if (up == vp)
    {
      if (size < KARATSUBA_THRESHOLD)
	{
	  impn_sqr_n_basecase (prodp, up, size);
	}
      else
	{
	  mp_ptr tspace;
	  tspace = (mp_ptr) TMP_ALLOC (2 * size * BYTES_PER_MP_LIMB);
	  impn_sqr_n (prodp, up, size, tspace);
	}
    }
  else
    {
      if (size < KARATSUBA_THRESHOLD)
	{
	  impn_mul_n_basecase (prodp, up, vp, size);
	}
      else
	{
	  mp_ptr tspace;
	  tspace = (mp_ptr) TMP_ALLOC (2 * size * BYTES_PER_MP_LIMB);
	  impn_mul_n (prodp, up, vp, size, tspace);
	}
    }
  TMP_FREE (marker);
}
Ejemplo n.º 2
0
void
mpz_fdiv_qr (mpz_ptr quot, mpz_ptr rem, mpz_srcptr dividend, mpz_srcptr divisor)
{
  mp_size_t divisor_size = divisor->_mp_size;
  mp_size_t xsize;
  mpz_t temp_divisor;		/* N.B.: lives until function returns! */
  TMP_DECL (marker);

  TMP_MARK (marker);

  /* We need the original value of the divisor after the quotient and
     remainder have been preliminary calculated.  We have to copy it to
     temporary space if it's the same variable as either QUOT or REM.  */
  if (quot == divisor || rem == divisor)
    {
      MPZ_TMP_INIT (temp_divisor, ABS (divisor_size));
      mpz_set (temp_divisor, divisor);
      divisor = temp_divisor;
    }

  xsize = dividend->_mp_size ^ divisor_size;;
  mpz_tdiv_qr (quot, rem, dividend, divisor);

  if (xsize < 0 && rem->_mp_size != 0)
    {
      mpz_sub_ui (quot, quot, 1L);
      mpz_add (rem, rem, divisor);
    }

  TMP_FREE (marker);
}
Ejemplo n.º 3
0
Archivo: mod.c Proyecto: mahdiz/mpclib
void
mpz_mod (mpz_ptr rem, mpz_srcptr dividend, mpz_srcptr divisor)
{
  mp_size_t divisor_size = divisor->_mp_size;
  mpz_t temp_divisor;		/* N.B.: lives until function returns! */
  TMP_DECL (marker);

  TMP_MARK (marker);

  /* We need the original value of the divisor after the remainder has been
     preliminary calculated.  We have to copy it to temporary space if it's
     the same variable as REM.  */
  if (rem == divisor)
    {
      MPZ_TMP_INIT (temp_divisor, ABS (divisor_size));
      mpz_set (temp_divisor, divisor);
      divisor = temp_divisor;
    }

  mpz_tdiv_r (rem, dividend, divisor);

  if (rem->_mp_size != 0)
    {
      if (dividend->_mp_size < 0)
	{
	  if (divisor->_mp_size < 0)
	    mpz_sub (rem, rem, divisor);
	  else
	    mpz_add (rem, rem, divisor);
	}
    }

  TMP_FREE (marker);
}
Ejemplo n.º 4
0
Archivo: sqrt.c Proyecto: mahdiz/mpclib
void
mpz_sqrt (mpz_ptr root, mpz_srcptr op)
{
  mp_size_t op_size, root_size;
  mp_ptr root_ptr, op_ptr;
  mp_ptr free_me = NULL;
  mp_size_t free_me_size;
  TMP_DECL (marker);

  TMP_MARK (marker);
  op_size = op->_mp_size;
  if (op_size <= 0)
    {
      if (op_size < 0)
        SQRT_OF_NEGATIVE;
      SIZ(root) = 0;
      return;
    }

  /* The size of the root is accurate after this simple calculation.  */
  root_size = (op_size + 1) / 2;

  root_ptr = root->_mp_d;
  op_ptr = op->_mp_d;

  if (root->_mp_alloc < root_size)
    {
      if (root_ptr == op_ptr)
	{
	  free_me = root_ptr;
	  free_me_size = root->_mp_alloc;
	}
      else
	(*__gmp_free_func) (root_ptr, root->_mp_alloc * BYTES_PER_MP_LIMB);

      root->_mp_alloc = root_size;
      root_ptr = (mp_ptr) (*__gmp_allocate_func) (root_size * BYTES_PER_MP_LIMB);
      root->_mp_d = root_ptr;
    }
  else
    {
      /* Make OP not overlap with ROOT.  */
      if (root_ptr == op_ptr)
	{
	  /* ROOT and OP are identical.  Allocate temporary space for OP.  */
	  op_ptr = (mp_ptr) TMP_ALLOC (op_size * BYTES_PER_MP_LIMB);
	  /* Copy to the temporary space.  Hack: Avoid temporary variable
	     by using ROOT_PTR.  */
	  MPN_COPY (op_ptr, root_ptr, op_size);
	}
    }

  mpn_sqrtrem (root_ptr, NULL, op_ptr, op_size);

  root->_mp_size = root_size;

  if (free_me != NULL)
    (*__gmp_free_func) (free_me, free_me_size * BYTES_PER_MP_LIMB);
  TMP_FREE (marker);
}
Ejemplo n.º 5
0
unsigned long int
mpz_fdiv_r_ui (mpz_ptr rem, mpz_srcptr dividend, unsigned long int divisor)
{
  mp_size_t ns, nn;
  mp_ptr np;
  mp_limb_t rl;

  if (divisor == 0)
    DIVIDE_BY_ZERO;

  ns = SIZ(dividend);
  if (ns == 0)
    {
      SIZ(rem) = 0;
      return 0;
    }

  nn = ABS(ns);
  np = PTR(dividend);
#if GMP_NAIL_BITS != 0
  if (divisor > GMP_NUMB_MAX)
    {
      mp_limb_t dp[2];
      mp_ptr rp, qp;
      mp_size_t rn;
      TMP_DECL (mark);

      MPZ_REALLOC (rem, 2);
      rp = PTR(rem);

      if (nn == 1)		/* tdiv_qr requirements; tested above for 0 */
	{
	  rl = np[0];
	  rp[0] = rl;
	}
      else
	{
	  TMP_MARK (mark);
	  dp[0] = divisor & GMP_NUMB_MASK;
	  dp[1] = divisor >> GMP_NUMB_BITS;
	  qp = TMP_ALLOC_LIMBS (nn - 2 + 1);
	  mpn_tdiv_qr (qp, rp, (mp_size_t) 0, np, nn, dp, (mp_size_t) 2);
	  TMP_FREE (mark);
	  rl = rp[0] + (rp[1] << GMP_NUMB_BITS);
	}

      if (rl != 0 && ns < 0)
	{
	  rl = divisor - rl;
	  rp[0] = rl & GMP_NUMB_MASK;
	  rp[1] = rl >> GMP_NUMB_BITS;
	}
Ejemplo n.º 6
0
Archivo: powm.c Proyecto: mahdiz/mpclib
/* Compute t = a mod m, a is defined by (ap,an), m is defined by (mp,mn), and
   t is defined by (tp,mn).  */
static void
reduce (mp_ptr tp, mp_srcptr ap, mp_size_t an, mp_srcptr mp, mp_size_t mn)
{
  mp_ptr qp;
  TMP_DECL (marker);

  TMP_MARK (marker);
  qp = TMP_ALLOC_LIMBS (an - mn + 1);

  mpn_tdiv_qr (qp, tp, 0L, ap, an, mp, mn);

  TMP_FREE (marker);
}
Ejemplo n.º 7
0
Archivo: mul.c Proyecto: mahdiz/mpclib
void
mpn_sqr_n (mp_ptr prodp,
	   mp_srcptr up, mp_size_t un)
{
  ASSERT (un >= 1);
  ASSERT (! MPN_OVERLAP_P (prodp, 2*un, up, un));

  /* FIXME: Can this be removed? */
  if (un == 0)
    return;

  if (BELOW_THRESHOLD (un, SQR_BASECASE_THRESHOLD))
    { /* mul_basecase is faster than sqr_basecase on small sizes sometimes */
      mpn_mul_basecase (prodp, up, un, up, un);
    }
  else if (BELOW_THRESHOLD (un, SQR_KARATSUBA_THRESHOLD))
    { /* plain schoolbook multiplication */
      mpn_sqr_basecase (prodp, up, un);
    }
  else if (BELOW_THRESHOLD (un, SQR_TOOM3_THRESHOLD))
    { /* karatsuba multiplication */
      mp_ptr tspace;
      TMP_DECL (marker);
      TMP_MARK (marker);
      tspace = TMP_ALLOC_LIMBS (MPN_KARA_SQR_N_TSIZE (un));
      mpn_kara_sqr_n (prodp, up, un, tspace);
      TMP_FREE (marker);
    }
#if WANT_FFT || TUNE_PROGRAM_BUILD
  else if (BELOW_THRESHOLD (un, SQR_FFT_THRESHOLD))
#else
  else
#endif
    { /* Toom3 multiplication.
	 Use workspace from the heap, as stack may be limited.  Since n is
	 at least MUL_TOOM3_THRESHOLD, the multiplication will take much
	 longer than malloc()/free().  */
      mp_ptr     tspace;
      mp_size_t  tsize;
      tsize = MPN_TOOM3_SQR_N_TSIZE (un);
      tspace = __GMP_ALLOCATE_FUNC_LIMBS (tsize);
      mpn_toom3_sqr_n (prodp, up, un, tspace);
      __GMP_FREE_FUNC_LIMBS (tspace, tsize);
    }
#if WANT_FFT || TUNE_PROGRAM_BUILD
  else
    {
Ejemplo n.º 8
0
static int mpfr_list_sum (mpfr_ptr ret, mpfr_t *tab, unsigned long n, 
                          mp_rnd_t rnd)
{
    mpfr_ptr *tabtmp;
    unsigned long i;
    int inexact;
    TMP_DECL(marker);
    
    TMP_MARK(marker);
    tabtmp = (mpfr_ptr *) TMP_ALLOC(n * sizeof(mpfr_srcptr));
    for (i = 0; i < n; i++)
        tabtmp[i] = tab[i];
    
    inexact = mpfr_sum (ret, tabtmp, n, rnd);
    TMP_FREE(marker);
    return inexact;
}
Ejemplo n.º 9
0
int
mpz_invert (mpz_ptr inverse, mpz_srcptr x, mpz_srcptr n)
{
  mpz_t gcd, tmp;
  mp_size_t xsize, nsize, size;
  TMP_DECL (marker);

  xsize = SIZ (x);
  nsize = SIZ (n);
  xsize = ABS (xsize);
  nsize = ABS (nsize);
  size = MAX (xsize, nsize) + 1;

  /* No inverse exists if the leftside operand is 0.  Likewise, no
     inverse exists if the mod operand is 1.  */
  if (xsize == 0 || (nsize == 1 && (PTR (n))[0] == 1))
    return 0;

  TMP_MARK (marker);

  MPZ_TMP_INIT (gcd, size);
  MPZ_TMP_INIT (tmp, size);
  mpz_gcdext (gcd, tmp, (mpz_ptr) 0, x, n);

  /* If no inverse existed, return with an indication of that.  */
  if (SIZ (gcd) != 1 || PTR(gcd)[0] != 1)
    {
      TMP_FREE (marker);
      return 0;
    }

  /* Make sure we return a positive inverse.  */
  if (SIZ (tmp) < 0)
    {
      if (SIZ (n) < 0)
	mpz_sub (inverse, tmp, n);
      else
	mpz_add (inverse, tmp, n);
    }
  else
    mpz_set (inverse, tmp);

  TMP_FREE (marker);
  return 1;
}
Ejemplo n.º 10
0
/* s <- 1 + r/1! + r^2/2! + ... + r^l/l! while MPFR_EXP(r^l/l!)+MPFR_EXPR(r)>-q
   using naive method with O(l) multiplications.
   Return the number of iterations l.
   The absolute error on s is less than 3*l*(l+1)*2^(-q).
   Version using fixed-point arithmetic with mpz instead
   of mpfr for internal computations.
   s must have at least qn+1 limbs (qn should be enough, but currently fails
   since mpz_mul_2exp(s, s, q-1) reallocates qn+1 limbs)
*/
static unsigned long
mpfr_exp2_aux (mpz_t s, mpfr_srcptr r, mp_prec_t q, mp_exp_t *exps)
{
  unsigned long l;
  mp_exp_t dif;
  mp_size_t qn;
  mpz_t t, rr;
  mp_exp_t expt, expr;
  TMP_DECL(marker);

  TMP_MARK(marker);
  qn = 1 + (q-1)/BITS_PER_MP_LIMB;
  expt = 0;
  *exps = 1 - (mp_exp_t) q;                   /* s = 2^(q-1) */
  MY_INIT_MPZ(t, 2*qn+1);
  MY_INIT_MPZ(rr, qn+1);
  mpz_set_ui(t, 1); 
  mpz_set_ui(s, 1); 
  mpz_mul_2exp(s, s, q-1); 
  expr = mpfr_get_z_exp(rr, r);               /* no error here */

  l = 0;
  do {
    l++;
    mpz_mul(t, t, rr); 
    expt += expr;
    dif = *exps + mpz_sizeinbase(s, 2) - expt - mpz_sizeinbase(t, 2);
    /* truncates the bits of t which are < ulp(s) = 2^(1-q) */
    expt += mpz_normalize(t, t, (mp_exp_t) q-dif); /* error at most 2^(1-q) */
    mpz_div_ui(t, t, l);                   /* error at most 2^(1-q) */
    /* the error wrt t^l/l! is here at most 3*l*ulp(s) */
    MPFR_ASSERTD (expt == *exps);
    mpz_add(s, s, t);                      /* no error here: exact */
    /* ensures rr has the same size as t: after several shifts, the error
       on rr is still at most ulp(t)=ulp(s) */
    expr += mpz_normalize(rr, rr, mpz_sizeinbase(t, 2));
  } while (mpz_cmp_ui(t, 0));

  TMP_FREE(marker);
  return l;
}
Ejemplo n.º 11
0
mp_limb_t
mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t limb)
{
  mp_ptr p0, p1, tp;
  mp_limb_t cy_limb;
  TMP_DECL (marker);
  TMP_MARK (marker);

  p1 = TMP_ALLOC (n * BYTES_PER_MP_LIMB);
  p0 = TMP_ALLOC (n * BYTES_PER_MP_LIMB);
  tp = TMP_ALLOC (n * BYTES_PER_MP_LIMB);

  GMPN_MULWW (p1, p0, up, &n, &limb);
  cy_limb = mpn_add_n (tp, rp, p0, n);
  rp[0] = tp[0];
  cy_limb += mpn_add_n (rp + 1, tp + 1, p1, n - 1);
  cy_limb += p1[n - 1];

  TMP_FREE (marker);
  return cy_limb;
}
Ejemplo n.º 12
0
/* s <- 1 + r/1! + r^2/2! + ... + r^l/l! while MPFR_EXP(r^l/l!)+MPFR_EXPR(r)>-q
   using naive method with O(l) multiplications.
   Return the number of iterations l.
   The absolute error on s is less than 3*l*(l+1)*2^(-q).
   Version using fixed-point arithmetic with mpz instead 
   of mpfr for internal computations.
   s must have at least qn+1 limbs (qn should be enough, but currently fails
   since mpz_mul_2exp(s, s, q-1) reallocates qn+1 limbs)
*/
static int
mpfr_exp2_aux (mpz_t s, mpfr_srcptr r, int q, int *exps)
{
  int l, dif, qn;
  mpz_t t, rr; mp_exp_t expt, expr;
  TMP_DECL(marker);

  TMP_MARK(marker);
  qn = 1 + (q-1)/BITS_PER_MP_LIMB;
  MY_INIT_MPZ(t, 2*qn+1); /* 2*qn+1 is neeeded since mpz_div_2exp may 
			      need an extra limb */
  MY_INIT_MPZ(rr, qn+1);
  mpz_set_ui(t, 1); expt=0;
  mpz_set_ui(s, 1); mpz_mul_2exp(s, s, q-1); *exps = 1-q; /* s = 2^(q-1) */
  expr = mpfr_get_z_exp(rr, r); /* no error here */

  l = 0;
  do {
    l++;
    mpz_mul(t, t, rr); expt=expt+expr;
    dif = *exps + mpz_sizeinbase(s, 2) - expt - mpz_sizeinbase(t, 2);
    /* truncates the bits of t which are < ulp(s) = 2^(1-q) */
    expt += mpz_normalize(t, t, q-dif); /* error at most 2^(1-q) */
    mpz_div_ui(t, t, l); /* error at most 2^(1-q) */
    /* the error wrt t^l/l! is here at most 3*l*ulp(s) */
#ifdef DEBUG
    if (expt != *exps) {
      fprintf(stderr, "Error: expt != exps %d %d\n", expt, *exps); exit(1);
    }
#endif
    mpz_add(s, s, t); /* no error here: exact */
    /* ensures rr has the same size as t: after several shifts, the error
       on rr is still at most ulp(t)=ulp(s) */
    expr += mpz_normalize(rr, rr, mpz_sizeinbase(t, 2));
  } while (mpz_cmp_ui(t, 0));

  TMP_FREE(marker);
  return l;
}
Ejemplo n.º 13
0
static
unsigned long int
lc (mp_ptr rp, gmp_randstate_t rstate)
{
  mp_ptr tp, seedp, ap;
  mp_size_t ta;
  mp_size_t tn, seedn, an;
  unsigned long int m2exp;
  mp_limb_t c;
  TMP_DECL (mark);

  m2exp = rstate->_mp_algdata._mp_lc->_mp_m2exp;

  /* The code below assumes the mod part is a power of two.  Make sure
     that is the case.  */
  ASSERT_ALWAYS (m2exp != 0);

  c = (mp_limb_t) rstate->_mp_algdata._mp_lc->_mp_c;

  seedp = PTR (rstate->_mp_seed);
  seedn = SIZ (rstate->_mp_seed);

  if (seedn == 0)
    {
      /* Seed is 0.  Result is C % M.  Assume table is sensibly stored,
       with C smaller than M*/
      *rp = c;

      *seedp = c;
      SIZ (rstate->_mp_seed) = 1;
      return m2exp;
    }

  ap = PTR (rstate->_mp_algdata._mp_lc->_mp_a);
  an = SIZ (rstate->_mp_algdata._mp_lc->_mp_a);

  /* Allocate temporary storage.  Let there be room for calculation of
     (A * seed + C) % M, or M if bigger than that.  */

  TMP_MARK (mark);
  ta = an + seedn + 1;
  tp = (mp_ptr) TMP_ALLOC (ta * BYTES_PER_MP_LIMB);

  /* t = a * seed */
  if (seedn >= an)
    mpn_mul (tp, seedp, seedn, ap, an);
  else
    mpn_mul (tp, ap, an, seedp, seedn);
  tn = an + seedn;

  /* t = t + c */
  tp[tn] = 0;			/* sentinel, stops MPN_INCR_U */
  MPN_INCR_U (tp, tn, c);

  ASSERT_ALWAYS (m2exp / GMP_NUMB_BITS < ta);

  /* t = t % m */
  tp[m2exp / GMP_NUMB_BITS] &= ((mp_limb_t) 1 << m2exp % GMP_NUMB_BITS) - 1;
  tn = (m2exp + GMP_NUMB_BITS - 1) / GMP_NUMB_BITS;

  /* Save result as next seed.  */
  MPN_COPY (PTR (rstate->_mp_seed), tp, tn);
  SIZ (rstate->_mp_seed) = tn;

  {
    /* Discard the lower m2exp/2 bits of result.  */
    unsigned long int bits = m2exp / 2;
    mp_size_t xn = bits / GMP_NUMB_BITS;

    tn -= xn;
    if (tn > 0)
      {
	unsigned int cnt = bits % GMP_NUMB_BITS;
	if (cnt != 0)
	  { 
	    mpn_rshift (tp, tp + xn, tn, cnt);
	    MPN_COPY_INCR (rp, tp, xn + 1); 
	  }
	else			/* Even limb boundary.  */
	  MPN_COPY_INCR (rp, tp + xn, tn);
      }
  }

  TMP_FREE (mark);

  /* Return number of valid bits in the result.  */
  return (m2exp + 1) / 2;
}
Ejemplo n.º 14
0
void
_gmp_rand (mp_ptr rp, gmp_randstate_t rstate, unsigned long int nbits)
{
  mp_size_t rn;			/* Size of R.  */

  rn = (nbits + GMP_NUMB_BITS - 1) / GMP_NUMB_BITS;

  switch (rstate->_mp_alg)
    {
    case GMP_RAND_ALG_LC:
      {
	unsigned long int rbitpos;
	int chunk_nbits;
	mp_ptr tp;
	mp_size_t tn;
	TMP_DECL (lcmark);

	TMP_MARK (lcmark);

	chunk_nbits = rstate->_mp_algdata._mp_lc->_mp_m2exp / 2;
	tn = (chunk_nbits + GMP_NUMB_BITS - 1) / GMP_NUMB_BITS;

	tp = (mp_ptr) TMP_ALLOC (tn * BYTES_PER_MP_LIMB);

	rbitpos = 0;
	while (rbitpos + chunk_nbits <= nbits)
	  {
	    mp_ptr r2p = rp + rbitpos / GMP_NUMB_BITS;

	    if (rbitpos % GMP_NUMB_BITS != 0)
	      {
		mp_limb_t savelimb, rcy;
		/* Target of of new chunk is not bit aligned.  Use temp space
		   and align things by shifting it up.  */
		lc (tp, rstate);
		savelimb = r2p[0];
		rcy = mpn_lshift (r2p, tp, tn, rbitpos % GMP_NUMB_BITS);
		r2p[0] |= savelimb;
/* bogus */	if ((chunk_nbits % GMP_NUMB_BITS + rbitpos % GMP_NUMB_BITS)
		    > GMP_NUMB_BITS)
		  r2p[tn] = rcy;
	      }
	    else
	      {
		/* Target of of new chunk is bit aligned.  Let `lc' put bits
		   directly into our target variable.  */
		lc (r2p, rstate);
	      }
	    rbitpos += chunk_nbits;
	  }

	/* Handle last [0..chunk_nbits) bits.  */
	if (rbitpos != nbits)
	  {
	    mp_ptr r2p = rp + rbitpos / GMP_NUMB_BITS;
	    int last_nbits = nbits - rbitpos;
	    tn = (last_nbits + GMP_NUMB_BITS - 1) / GMP_NUMB_BITS;
	    lc (tp, rstate);
	    if (rbitpos % GMP_NUMB_BITS != 0)
	      {
		mp_limb_t savelimb, rcy;
		/* Target of of new chunk is not bit aligned.  Use temp space
		   and align things by shifting it up.  */
		savelimb = r2p[0];
		rcy = mpn_lshift (r2p, tp, tn, rbitpos % GMP_NUMB_BITS);
		r2p[0] |= savelimb;
		if (rbitpos + tn * GMP_NUMB_BITS - rbitpos % GMP_NUMB_BITS < nbits)
		  r2p[tn] = rcy;
	      }
	    else
	      {
		MPN_COPY (r2p, tp, tn);
	      }
	    /* Mask off top bits if needed.  */
	    if (nbits % GMP_NUMB_BITS != 0)
	      rp[nbits / GMP_NUMB_BITS]
		&= ~ ((~(mp_limb_t) 0) << nbits % GMP_NUMB_BITS);
	  }

	TMP_FREE (lcmark);
	break;
      }

    default:
      ASSERT (0);
      break;
    }
}
Ejemplo n.º 15
0
/* use Brent's formula exp(x) = (1+r+r^2/2!+r^3/3!+...)^(2^K)*2^n
   where x = n*log(2)+(2^K)*r
   together with Brent-Kung O(t^(1/2)) algorithm for the evaluation of
   power series. The resulting complexity is O(n^(1/3)*M(n)).
*/
int
mpfr_exp_2 (mpfr_ptr y, mpfr_srcptr x, mp_rnd_t rnd_mode)
{
  long n;
  unsigned long K, k, l, err; /* FIXME: Which type ? */
  int error_r;
  mp_exp_t exps;
  mp_prec_t q, precy;
  int inexact;
  mpfr_t r, s, t;
  mpz_t ss;
  TMP_DECL(marker);

  precy = MPFR_PREC(y);
  
  MPFR_TRACE ( printf("Py=%d Px=%d", MPFR_PREC(y), MPFR_PREC(x)) );
  MPFR_TRACE ( MPFR_DUMP (x) );

  n = (long) (mpfr_get_d1 (x) / LOG2);

  /* error bounds the cancelled bits in x - n*log(2) */
  if (MPFR_UNLIKELY(n == 0))
    error_r = 0;
  else
    count_leading_zeros (error_r, (mp_limb_t) (n < 0) ? -n : n);
  error_r = BITS_PER_MP_LIMB - error_r + 2;

  /* for the O(n^(1/2)*M(n)) method, the Taylor series computation of
     n/K terms costs about n/(2K) multiplications when computed in fixed
     point */
  K = (precy < SWITCH) ? __gmpfr_isqrt ((precy + 1) / 2)
    : __gmpfr_cuberoot (4*precy);
  l = (precy - 1) / K + 1;
  err = K + MPFR_INT_CEIL_LOG2 (2 * l + 18);
  /* add K extra bits, i.e. failure probability <= 1/2^K = O(1/precy) */
  q = precy + err + K + 5;
  
  /*q = ( (q-1)/BITS_PER_MP_LIMB + 1) * BITS_PER_MP_LIMB; */

  mpfr_init2 (r, q + error_r);
  mpfr_init2 (s, q + error_r);
  mpfr_init2 (t, q);

  /* the algorithm consists in computing an upper bound of exp(x) using
     a precision of q bits, and see if we can round to MPFR_PREC(y) taking
     into account the maximal error. Otherwise we increase q. */
  for (;;)
    {
      MPFR_TRACE ( printf("n=%d K=%d l=%d q=%d\n",n,K,l,q) );
      
      /* if n<0, we have to get an upper bound of log(2)
	 in order to get an upper bound of r = x-n*log(2) */
      mpfr_const_log2 (s, (n >= 0) ? GMP_RNDZ : GMP_RNDU);
      /* s is within 1 ulp of log(2) */
      
      mpfr_mul_ui (r, s, (n < 0) ? -n : n, (n >= 0) ? GMP_RNDZ : GMP_RNDU);
      /* r is within 3 ulps of n*log(2) */
      if (n < 0)
	mpfr_neg (r, r, GMP_RNDD); /* exact */
      /* r = floor(n*log(2)), within 3 ulps */
      
      MPFR_TRACE ( MPFR_DUMP (x) );
      MPFR_TRACE ( MPFR_DUMP (r) );
      
      mpfr_sub (r, x, r, GMP_RNDU);
      /* possible cancellation here: the error on r is at most
	 3*2^(EXP(old_r)-EXP(new_r)) */
      while (MPFR_IS_NEG (r))
	{ /* initial approximation n was too large */
	  n--;
	  mpfr_add (r, r, s, GMP_RNDU);
	}
      mpfr_prec_round (r, q, GMP_RNDU);
      MPFR_TRACE ( MPFR_DUMP (r) );
      MPFR_ASSERTD (MPFR_IS_POS (r));
      mpfr_div_2ui (r, r, K, GMP_RNDU); /* r = (x-n*log(2))/2^K, exact */
      
      TMP_MARK(marker);
      MY_INIT_MPZ(ss, 3 + 2*((q-1)/BITS_PER_MP_LIMB));
      exps = mpfr_get_z_exp (ss, s);
      /* s <- 1 + r/1! + r^2/2! + ... + r^l/l! */
      l = (precy < SWITCH) ? 
	mpfr_exp2_aux (ss, r, q, &exps)      /* naive method */
	: mpfr_exp2_aux2 (ss, r, q, &exps);  /* Brent/Kung method */
      
      MPFR_TRACE(printf("l=%d q=%d (K+l)*q^2=%1.3e\n", l, q, (K+l)*(double)q*q));
      
      for (k = 0; k < K; k++)
	{
	  mpz_mul (ss, ss, ss);
	  exps <<= 1;
	  exps += mpz_normalize (ss, ss, q);
	}
      mpfr_set_z (s, ss, GMP_RNDN);
      
      MPFR_SET_EXP(s, MPFR_GET_EXP (s) + exps);
      TMP_FREE(marker); /* don't need ss anymore */
      
      if (n>0) 
	mpfr_mul_2ui(s, s, n, GMP_RNDU);
      else 
	mpfr_div_2ui(s, s, -n, GMP_RNDU);
      
      /* error is at most 2^K*(3l*(l+1)) ulp for mpfr_exp2_aux */
      l = (precy < SWITCH) ? 3*l*(l+1) : l*(l+4) ;
      k = MPFR_INT_CEIL_LOG2 (l);
      /* k = 0; while (l) { k++; l >>= 1; } */

      /* now k = ceil(log(error in ulps)/log(2)) */
      K += k;

      MPFR_TRACE ( printf("after mult. by 2^n:\n") );
      MPFR_TRACE ( MPFR_DUMP (s) );
      MPFR_TRACE ( printf("err=%d bits\n", K) );
      
      if (mpfr_can_round (s, q - K, GMP_RNDN, GMP_RNDZ,
			  precy + (rnd_mode == GMP_RNDN)) )
	break;
      MPFR_TRACE (printf("prec++, use %d\n", q+BITS_PER_MP_LIMB) );
      MPFR_TRACE (printf("q=%d q-K=%d precy=%d\n",q,q-K,precy) );
      q += BITS_PER_MP_LIMB;
      mpfr_set_prec (r, q);
      mpfr_set_prec (s, q);
      mpfr_set_prec (t, q);
    }
  
  inexact = mpfr_set (y, s, rnd_mode);

  mpfr_clear (r); 
  mpfr_clear (s); 
  mpfr_clear (t);

  return inexact;
}
Ejemplo n.º 16
0
/* s <- 1 + r/1! + r^2/2! + ... + r^l/l! while MPFR_EXP(r^l/l!)+MPFR_EXPR(r)>-q
   using Brent/Kung method with O(sqrt(l)) multiplications.
   Return l.
   Uses m multiplications of full size and 2l/m of decreasing size,
   i.e. a total equivalent to about m+l/m full multiplications,
   i.e. 2*sqrt(l) for m=sqrt(l).
   Version using mpz. ss must have at least (sizer+1) limbs.
   The error is bounded by (l^2+4*l) ulps where l is the return value.
*/
static unsigned long
mpfr_exp2_aux2 (mpz_t s, mpfr_srcptr r, mp_prec_t q, mp_exp_t *exps)
{
  mp_exp_t expr, *expR, expt;
  mp_size_t sizer;
  mp_prec_t ql;
  unsigned long l, m, i;
  mpz_t t, *R, rr, tmp;
  TMP_DECL(marker);

  /* estimate value of l */
  MPFR_ASSERTD (MPFR_GET_EXP (r) < 0);
  l = q / (- MPFR_GET_EXP (r));
  m = __gmpfr_isqrt (l);
  /* we access R[2], thus we need m >= 2 */
  if (m < 2)
    m = 2;

  TMP_MARK(marker);
  R = (mpz_t*) TMP_ALLOC((m+1)*sizeof(mpz_t));          /* R[i] is r^i */
  expR = (mp_exp_t*) TMP_ALLOC((m+1)*sizeof(mp_exp_t)); /* exponent for R[i] */
  sizer = 1 + (MPFR_PREC(r)-1)/BITS_PER_MP_LIMB;
  mpz_init(tmp);
  MY_INIT_MPZ(rr, sizer+2);
  MY_INIT_MPZ(t, 2*sizer);            /* double size for products */
  mpz_set_ui(s, 0); 
  *exps = 1-q;                        /* 1 ulp = 2^(1-q) */
  for (i = 0 ; i <= m ; i++)
    MY_INIT_MPZ(R[i], sizer+2);
  expR[1] = mpfr_get_z_exp(R[1], r); /* exact operation: no error */
  expR[1] = mpz_normalize2(R[1], R[1], expR[1], 1-q); /* error <= 1 ulp */
  mpz_mul(t, R[1], R[1]); /* err(t) <= 2 ulps */
  mpz_div_2exp(R[2], t, q-1); /* err(R[2]) <= 3 ulps */
  expR[2] = 1-q;
  for (i = 3 ; i <= m ; i++)
    {
      mpz_mul(t, R[i-1], R[1]); /* err(t) <= 2*i-2 */
      mpz_div_2exp(R[i], t, q-1); /* err(R[i]) <= 2*i-1 ulps */
      expR[i] = 1-q;
    }
  mpz_set_ui (R[0], 1);
  mpz_mul_2exp (R[0], R[0], q-1);
  expR[0] = 1-q; /* R[0]=1 */
  mpz_set_ui (rr, 1);
  expr = 0; /* rr contains r^l/l! */
  /* by induction: err(rr) <= 2*l ulps */

  l = 0;
  ql = q; /* precision used for current giant step */
  do
    {
      /* all R[i] must have exponent 1-ql */
      if (l != 0)
        for (i = 0 ; i < m ; i++)
	  expR[i] = mpz_normalize2 (R[i], R[i], expR[i], 1-ql);
      /* the absolute error on R[i]*rr is still 2*i-1 ulps */
      expt = mpz_normalize2 (t, R[m-1], expR[m-1], 1-ql);
      /* err(t) <= 2*m-1 ulps */
      /* computes t = 1 + r/(l+1) + ... + r^(m-1)*l!/(l+m-1)!
         using Horner's scheme */
      for (i = m-1 ; i-- != 0 ; )
        {
          mpz_div_ui(t, t, l+i+1); /* err(t) += 1 ulp */
          mpz_add(t, t, R[i]);
        }
      /* now err(t) <= (3m-2) ulps */

      /* now multiplies t by r^l/l! and adds to s */
      mpz_mul(t, t, rr);
      expt += expr;
      expt = mpz_normalize2(t, t, expt, *exps);
      /* err(t) <= (3m-1) + err_rr(l) <= (3m-2) + 2*l */
      MPFR_ASSERTD (expt == *exps);
      mpz_add(s, s, t); /* no error here */

      /* updates rr, the multiplication of the factors l+i could be done
         using binary splitting too, but it is not sure it would save much */
      mpz_mul(t, rr, R[m]); /* err(t) <= err(rr) + 2m-1 */
      expr += expR[m];
      mpz_set_ui (tmp, 1);
      for (i = 1 ; i <= m ; i++)
	mpz_mul_ui (tmp, tmp, l + i);
      mpz_fdiv_q(t, t, tmp); /* err(t) <= err(rr) + 2m */
      expr += mpz_normalize(rr, t, ql); /* err_rr(l+1) <= err_rr(l) + 2m+1 */
      ql = q - *exps - mpz_sizeinbase(s, 2) + expr + mpz_sizeinbase(rr, 2);
      l += m;
    }
  while ((size_t) expr+mpz_sizeinbase(rr, 2) > (size_t)((int)-q));

  TMP_FREE(marker);
  mpz_clear(tmp);
  return l;
}
Ejemplo n.º 17
0
/* Compute the first 2^m terms from the hypergeometric series
   with x = p / 2^r */
static int
GENERIC (mpfr_ptr y, mpz_srcptr p, long r, int m)
{
  int n,i,k,j,l;
  int is_p_one = 0;
  mpz_t* P,*S;
#ifdef A
  mpz_t *T;
#endif
  mpz_t* ptoj;
#ifdef R_IS_RATIONAL
  mpz_t* qtoj;
  mpfr_t tmp;
#endif
  int diff, expo;
  int precy = MPFR_PREC(y);
  TMP_DECL(marker);

  TMP_MARK(marker);
  MPFR_CLEAR_FLAGS(y); 
  n = 1 << m;
  P = (mpz_t*) TMP_ALLOC ((m+1) * sizeof(mpz_t));
  S = (mpz_t*) TMP_ALLOC ((m+1) * sizeof(mpz_t));
  ptoj = (mpz_t*) TMP_ALLOC ((m+1) * sizeof(mpz_t)); /* ptoj[i] = mantissa^(2^i) */
#ifdef A
  T = (mpz_t*) TMP_ALLOC ((m+1) * sizeof(mpz_t));
#endif
#ifdef R_IS_RATIONAL
  qtoj = (mpz_t*) TMP_ALLOC ((m+1) * sizeof(mpz_t));
#endif
  for (i=0;i<=m;i++)
    {
      mpz_init (P[i]);
      mpz_init (S[i]);
      mpz_init (ptoj[i]);
#ifdef R_IS_RATIONAL
      mpz_init (qtoj[i]);
#endif
#ifdef A
      mpz_init (T[i]);
#endif
    }
  mpz_set (ptoj[0], p);
#ifdef C
#  if C2 != 1
  mpz_mul_ui(ptoj[0], ptoj[0], C2);
#  endif
#endif
  is_p_one = !mpz_cmp_si(ptoj[0], 1);
#ifdef A
#  ifdef B
  mpz_set_ui(T[0], A1 * B1);
#  else
  mpz_set_ui(T[0], A1);
#  endif
#endif
  if (!is_p_one) 
  for (i=1;i<m;i++) mpz_mul(ptoj[i], ptoj[i-1], ptoj[i-1]);
#ifdef R_IS_RATIONAL
  mpz_set_si(qtoj[0], r);
  for (i=1;i<=m;i++) 
    {
      mpz_mul(qtoj[i], qtoj[i-1], qtoj[i-1]);
    }
#endif

  mpz_set_ui(P[0], 1);
  mpz_set_ui(S[0], 1);
  k = 0;
  for (i=1;(i < n) ;i++) {
    k++;
    
#ifdef A
#  ifdef B 
    mpz_set_ui(T[k], (A1 + A2*i)*(B1+B2*i));
#  else
    mpz_set_ui(T[k], A1 + A2*i);
#  endif
#endif
    
#ifdef C
#  ifdef NO_FACTORIAL
    mpz_set_ui(P[k], (C1 + C2 * (i-1)));
    mpz_set_ui(S[k], 1);
#  else
    mpz_set_ui(P[k], (i+1) * (C1 + C2 * (i-1)));
    mpz_set_ui(S[k], i+1);
#  endif
#else
#  ifdef NO_FACTORIAL
    mpz_set_ui(P[k], 1);
#  else
    mpz_set_ui(P[k], i+1);
#  endif
    mpz_set(S[k], P[k]);
#endif
    j=i+1; l=0; while ((j & 1) == 0) {      
      if (!is_p_one) 
	mpz_mul(S[k], S[k], ptoj[l]);
#ifdef A
#  ifdef B
#    if (A2*B2) != 1
      mpz_mul_ui(P[k], P[k], A2*B2);
#    endif
#  else
#    if A2 != 1 
      mpz_mul_ui(P[k], P[k], A2);
#  endif
#endif
      mpz_mul(S[k], S[k], T[k-1]);
#endif
      mpz_mul(S[k-1], S[k-1], P[k]);
#ifdef R_IS_RATIONAL
      mpz_mul(S[k-1], S[k-1], qtoj[l]);
#else
      mpz_mul_2exp(S[k-1], S[k-1], r*(1<<l));
#endif
      mpz_add(S[k-1], S[k-1], S[k]);
      mpz_mul(P[k-1], P[k-1], P[k]);
#ifdef A
      mpz_mul(T[k-1], T[k-1], T[k]);
#endif
      l++; j>>=1; k--;
    }
  }

  diff = mpz_sizeinbase(S[0],2) - 2*precy;
  expo = diff;
  if (diff >=0)
    {
      mpz_div_2exp(S[0],S[0],diff);
    } else 
      {
	mpz_mul_2exp(S[0],S[0],-diff);
      }
  diff = mpz_sizeinbase(P[0],2) - precy;
  expo -= diff;
  if (diff >=0)
    {
      mpz_div_2exp(P[0],P[0],diff);
    } else
      {
	mpz_mul_2exp(P[0],P[0],-diff);
	}

  mpz_tdiv_q(S[0], S[0], P[0]);
  mpfr_set_z(y, S[0], GMP_RNDD);
  MPFR_SET_EXP (y, MPFR_GET_EXP (y) + expo);

#ifdef R_IS_RATIONAL
  /* exact division */
  mpz_div_ui (qtoj[m], qtoj[m], r);
  mpfr_init2 (tmp, MPFR_PREC(y));
  mpfr_set_z (tmp, qtoj[m] , GMP_RNDD);
  mpfr_div (y, y, tmp, GMP_RNDD);
  mpfr_clear (tmp);
#else
  mpfr_div_2ui(y, y, r*(i-1), GMP_RNDN);
#endif
  for (i=0;i<=m;i++)
    {
      mpz_clear (P[i]);
      mpz_clear (S[i]);
      mpz_clear (ptoj[i]); 
#ifdef R_IS_RATIONAL
      mpz_clear (qtoj[i]);
#endif
#ifdef A
      mpz_clear (T[i]);
#endif
    }
  TMP_FREE(marker);
  return 0;
}
Ejemplo n.º 18
0
Archivo: add.c Proyecto: mahdiz/mpclib
void
mpf_add (mpf_ptr r, mpf_srcptr u, mpf_srcptr v)
{
  mp_srcptr up, vp;
  mp_ptr rp, tp;
  mp_size_t usize, vsize, rsize;
  mp_size_t prec;
  mp_exp_t uexp;
  mp_size_t ediff;
  mp_limb_t cy;
  int negate;
  TMP_DECL (marker);

  usize = u->_mp_size;
  vsize = v->_mp_size;

  /* Handle special cases that don't work in generic code below.  */
  if (usize == 0)
    {
    set_r_v_maybe:
      if (r != v)
        mpf_set (r, v);
      return;
    }
  if (vsize == 0)
    {
      v = u;
      goto set_r_v_maybe;
    }

  /* If signs of U and V are different, perform subtraction.  */
  if ((usize ^ vsize) < 0)
    {
      __mpf_struct v_negated;
      v_negated._mp_size = -vsize;
      v_negated._mp_exp = v->_mp_exp;
      v_negated._mp_d = v->_mp_d;
      mpf_sub (r, u, &v_negated);
      return;
    }

  TMP_MARK (marker);

  /* Signs are now known to be the same.  */
  negate = usize < 0;

  /* Make U be the operand with the largest exponent.  */
  if (u->_mp_exp < v->_mp_exp)
    {
      mpf_srcptr t;
      t = u; u = v; v = t;
      usize = u->_mp_size;
      vsize = v->_mp_size;
    }

  usize = ABS (usize);
  vsize = ABS (vsize);
  up = u->_mp_d;
  vp = v->_mp_d;
  rp = r->_mp_d;
  prec = r->_mp_prec;
  uexp = u->_mp_exp;
  ediff = u->_mp_exp - v->_mp_exp;

  /* If U extends beyond PREC, ignore the part that does.  */
  if (usize > prec)
    {
      up += usize - prec;
      usize = prec;
    }

  /* If V extends beyond PREC, ignore the part that does.
     Note that this may make vsize negative.  */
  if (vsize + ediff > prec)
    {
      vp += vsize + ediff - prec;
      vsize = prec - ediff;
    }

#if 0
  /* Locate the least significant non-zero limb in (the needed parts
     of) U and V, to simplify the code below.  */
  while (up[0] == 0)
    up++, usize--;
  while (vp[0] == 0)
    vp++, vsize--;
#endif

  /* Allocate temp space for the result.  Allocate
     just vsize + ediff later???  */
  tp = (mp_ptr) TMP_ALLOC (prec * BYTES_PER_MP_LIMB);

  if (ediff >= prec)
    {
      /* V completely cancelled.  */
      if (rp != up)
	MPN_COPY_INCR (rp, up, usize);
      rsize = usize;
    }
  else
    {
      /* uuuu     |  uuuu     |  uuuu     |  uuuu     |  uuuu    */
      /* vvvvvvv  |  vv       |    vvvvv  |    v      |       vv */

      if (usize > ediff)
	{
	  /* U and V partially overlaps.  */
	  if (vsize + ediff <= usize)
	    {
	      /* uuuu     */
	      /*   v      */
	      mp_size_t size;
	      size = usize - ediff - vsize;
	      MPN_COPY (tp, up, size);
	      cy = mpn_add (tp + size, up + size, usize - size, vp, vsize);
	      rsize = usize;
	    }
	  else
	    {
	      /* uuuu     */
	      /*   vvvvv  */
	      mp_size_t size;
	      size = vsize + ediff - usize;
	      MPN_COPY (tp, vp, size);
	      cy = mpn_add (tp + size, up, usize, vp + size, usize - ediff);
	      rsize = vsize + ediff;
	    }
	}
      else
	{
	  /* uuuu     */
	  /*      vv  */
	  mp_size_t size;
	  size = vsize + ediff - usize;
	  MPN_COPY (tp, vp, vsize);
	  MPN_ZERO (tp + vsize, ediff - usize);
	  MPN_COPY (tp + size, up, usize);
	  cy = 0;
	  rsize = size + usize;
	}

      MPN_COPY (rp, tp, rsize);
      rp[rsize] = cy;
      rsize += cy;
      uexp += cy;
    }

  r->_mp_size = negate ? -rsize : rsize;
  r->_mp_exp = uexp;
  TMP_FREE (marker);
}
Ejemplo n.º 19
0
/* use Brent's formula exp(x) = (1+r+r^2/2!+r^3/3!+...)^(2^K)*2^n
   where x = n*log(2)+(2^K)*r
   together with Brent-Kung O(t^(1/2)) algorithm for the evaluation of
   power series. The resulting complexity is O(n^(1/3)*M(n)).
*/
int 
mpfr_exp_2 (mpfr_ptr y, mpfr_srcptr x, mp_rnd_t rnd_mode) 
{
  int n, K, precy, q, k, l, err, exps, inexact;
  mpfr_t r, s, t; mpz_t ss;
  TMP_DECL(marker);

  precy = MPFR_PREC(y);

  n = (int) (mpfr_get_d1 (x) / LOG2);

  /* for the O(n^(1/2)*M(n)) method, the Taylor series computation of
     n/K terms costs about n/(2K) multiplications when computed in fixed
     point */
  K = (precy<SWITCH) ? _mpfr_isqrt((precy + 1) / 2) : _mpfr_cuberoot (4*precy);
  l = (precy-1)/K + 1;
  err = K + (int) _mpfr_ceil_log2 (2.0 * (double) l + 18.0);
  /* add K extra bits, i.e. failure probability <= 1/2^K = O(1/precy) */
  q = precy + err + K + 3;
  mpfr_init2 (r, q);
  mpfr_init2 (s, q);
  mpfr_init2 (t, q);
  /* the algorithm consists in computing an upper bound of exp(x) using
     a precision of q bits, and see if we can round to MPFR_PREC(y) taking
     into account the maximal error. Otherwise we increase q. */
  do {
#ifdef DEBUG
  printf("n=%d K=%d l=%d q=%d\n",n,K,l,q);
#endif

  /* if n<0, we have to get an upper bound of log(2)
     in order to get an upper bound of r = x-n*log(2) */
  mpfr_const_log2 (s, (n>=0) ? GMP_RNDZ : GMP_RNDU);
#ifdef DEBUG
  printf("n=%d log(2)=",n); mpfr_print_binary(s); putchar('\n');
#endif
  mpfr_mul_ui (r, s, (n<0) ? -n : n, (n>=0) ? GMP_RNDZ : GMP_RNDU); 
  if (n<0) mpfr_neg(r, r, GMP_RNDD);
  /* r = floor(n*log(2)) */

#ifdef DEBUG
  printf("x=%1.20e\n", mpfr_get_d1 (x));
  printf(" ="); mpfr_print_binary(x); putchar('\n');
  printf("r=%1.20e\n", mpfr_get_d1 (r));
  printf(" ="); mpfr_print_binary(r); putchar('\n');
#endif
  mpfr_sub(r, x, r, GMP_RNDU);
  if (MPFR_SIGN(r)<0) { /* initial approximation n was too large */
    n--;
    mpfr_mul_ui(r, s, (n<0) ? -n : n, GMP_RNDZ);
    if (n<0) mpfr_neg(r, r, GMP_RNDD);
    mpfr_sub(r, x, r, GMP_RNDU);
  }
#ifdef DEBUG
  printf("x-r=%1.20e\n", mpfr_get_d1 (r));
  printf(" ="); mpfr_print_binary(r); putchar('\n');
  if (MPFR_SIGN(r)<0) { fprintf(stderr,"Error in mpfr_exp: r<0\n"); exit(1); }
#endif
  mpfr_div_2ui(r, r, K, GMP_RNDU); /* r = (x-n*log(2))/2^K */

  TMP_MARK(marker);
  MY_INIT_MPZ(ss, 3 + 2*((q-1)/BITS_PER_MP_LIMB));
  exps = mpfr_get_z_exp(ss, s);
  /* s <- 1 + r/1! + r^2/2! + ... + r^l/l! */
  l = (precy<SWITCH) ? mpfr_exp2_aux(ss, r, q, &exps) /* naive method */
    : mpfr_exp2_aux2(ss, r, q, &exps); /* Brent/Kung method */

#ifdef DEBUG
  printf("l=%d q=%d (K+l)*q^2=%1.3e\n", l, q, (K+l)*(double)q*q);
#endif

  for (k=0;k<K;k++) {
    mpz_mul(ss, ss, ss); exps <<= 1;
    exps += mpz_normalize(ss, ss, q);
  }
  mpfr_set_z(s, ss, GMP_RNDN); MPFR_EXP(s) += exps;
  TMP_FREE(marker); /* don't need ss anymore */

  if (n>0) mpfr_mul_2ui(s, s, n, GMP_RNDU);
  else mpfr_div_2ui(s, s, -n, GMP_RNDU);

  /* error is at most 2^K*(3l*(l+1)) ulp for mpfr_exp2_aux */
  if (precy<SWITCH) l = 3*l*(l+1);
  else l = l*(l+4);
  k=0; while (l) { k++; l >>= 1; }
  /* now k = ceil(log(error in ulps)/log(2)) */
  K += k;
#ifdef DEBUG
    printf("after mult. by 2^n:\n");
    if (MPFR_EXP(s) > -1024)
      printf("s=%1.20e\n", mpfr_get_d1 (s));
    printf(" ="); mpfr_print_binary(s); putchar('\n');
    printf("err=%d bits\n", K);
#endif

  l = mpfr_can_round(s, q-K, GMP_RNDN, rnd_mode, precy);
  if (l==0) {
#ifdef DEBUG
     printf("not enough precision, use %d\n", q+BITS_PER_MP_LIMB);
     printf("q=%d q-K=%d precy=%d\n",q,q-K,precy);
#endif
     q += BITS_PER_MP_LIMB;
     mpfr_set_prec(r, q); mpfr_set_prec(s, q); mpfr_set_prec(t, q);
  }
  } while (l==0);

  inexact = mpfr_set (y, s, rnd_mode);

  mpfr_clear(r); mpfr_clear(s); mpfr_clear(t);
  return inexact;
}
Ejemplo n.º 20
0
int
mpfr_cos (mpfr_ptr y, mpfr_srcptr x, mp_rnd_t rnd_mode)
{
  int K0, K, precy, m, k, l;
  int inexact;
  mpfr_t r, s;
  mp_limb_t *rp, *sp;
  mp_size_t sm;
  mp_exp_t exps, cancel = 0;
  TMP_DECL (marker);

  if (MPFR_UNLIKELY(MPFR_IS_SINGULAR(x)))
    {
      if (MPFR_IS_NAN(x) || MPFR_IS_INF(x))
	{
	  MPFR_SET_NAN(y);
	  MPFR_RET_NAN;
	}
      else
        {
          MPFR_ASSERTD(MPFR_IS_ZERO(x));
	  return mpfr_set_ui (y, 1, GMP_RNDN);
        }
    }

  mpfr_save_emin_emax ();

  precy = MPFR_PREC(y);
  K0 = __gmpfr_isqrt(precy / 2); /* Need K + log2(precy/K) extra bits */
  m = precy + 3 * (K0 + 2 * MAX(MPFR_GET_EXP (x), 0)) + 3;

  TMP_MARK(marker);
  sm = (m + BITS_PER_MP_LIMB - 1) / BITS_PER_MP_LIMB;
  MPFR_TMP_INIT(rp, r, m, sm);
  MPFR_TMP_INIT(sp, s, m, sm);

  for (;;)
    {
      mpfr_mul (r, x, x, GMP_RNDU); /* err <= 1 ulp */

      /* we need that |r| < 1 for mpfr_cos2_aux, i.e. up(x^2)/2^(2K) < 1 */
      K = K0 + MAX (MPFR_GET_EXP (r), 0);

      mpfr_div_2ui (r, r, 2 * K, GMP_RNDN); /* r = (x/2^K)^2, err <= 1 ulp */

      /* s <- 1 - r/2! + ... + (-1)^l r^l/(2l)! */
      l = mpfr_cos2_aux (s, r);

      MPFR_SET_ONE (r);
      for (k = 0; k < K; k++)
	{
	  mpfr_mul (s, s, s, GMP_RNDU);       /* err <= 2*olderr */
	  mpfr_mul_2ui (s, s, 1, GMP_RNDU);   /* err <= 4*olderr */
	  mpfr_sub (s, s, r, GMP_RNDN);
	}

      /* absolute error on s is bounded by (2l+1/3)*2^(2K-m) */
      for (k = 2 * K, l = 2 * l + 1; l > 1; l = (l + 1) >> 1)
	k++;
      /* now the error is bounded by 2^(k-m) = 2^(EXP(s)-err) */

      exps = MPFR_GET_EXP(s);
      if (MPFR_LIKELY(mpfr_can_round (s, exps + m - k, GMP_RNDN, GMP_RNDZ,
				      precy + (rnd_mode == GMP_RNDN))))
	break;

      m += BITS_PER_MP_LIMB;
      if (exps < cancel)
        {
          m += cancel - exps;
          cancel = exps;
        }
      sm = (m + BITS_PER_MP_LIMB - 1) / BITS_PER_MP_LIMB;
      MPFR_TMP_INIT(rp, r, m, sm);
      MPFR_TMP_INIT(sp, s, m, sm);
    }

  mpfr_restore_emin_emax ();
  inexact = mpfr_set (y, s, rnd_mode); /* FIXME: Dont' need check range? */

  TMP_FREE(marker);
  
  return inexact;
}
Ejemplo n.º 21
0
Archivo: powm.c Proyecto: mahdiz/mpclib
/* For now, also disable REDC when MOD is even, as the inverse can't handle
   that.  At some point, we might want to make the code faster for that case,
   perhaps using CRR.  */

#ifndef POWM_THRESHOLD
#define POWM_THRESHOLD  ((8 * SQR_KARATSUBA_THRESHOLD) / 3)
#endif

#define HANDLE_NEGATIVE_EXPONENT 1
#undef REDUCE_EXPONENT

void
#ifndef BERKELEY_MP
mpz_powm (mpz_ptr r, mpz_srcptr b, mpz_srcptr e, mpz_srcptr m)
#else /* BERKELEY_MP */
pow (mpz_srcptr b, mpz_srcptr e, mpz_srcptr m, mpz_ptr r)
#endif /* BERKELEY_MP */
{
  mp_ptr xp, tp, qp, gp, this_gp;
  mp_srcptr bp, ep, mp;
  mp_size_t bn, es, en, mn, xn;
  mp_limb_t invm, c;
  unsigned long int enb;
  mp_size_t i, K, j, l, k;
  int m_zero_cnt, e_zero_cnt;
  int sh;
  int use_redc;
#if HANDLE_NEGATIVE_EXPONENT
  mpz_t new_b;
#endif
#if REDUCE_EXPONENT
  mpz_t new_e;
#endif
  TMP_DECL (marker);

  mp = PTR(m);
  mn = ABSIZ (m);
  if (mn == 0)
    DIVIDE_BY_ZERO;

  TMP_MARK (marker);

  es = SIZ (e);
  if (es <= 0)
    {
      if (es == 0)
	{
	  /* Exponent is zero, result is 1 mod m, i.e., 1 or 0 depending on if
	     m equals 1.  */
	  SIZ(r) = (mn == 1 && mp[0] == 1) ? 0 : 1;
	  PTR(r)[0] = 1;
	  TMP_FREE (marker);	/* we haven't really allocated anything here */
	  return;
	}
#if HANDLE_NEGATIVE_EXPONENT
      MPZ_TMP_INIT (new_b, mn + 1);

      if (! mpz_invert (new_b, b, m))
	DIVIDE_BY_ZERO;
      b = new_b;
      es = -es;
#else
      DIVIDE_BY_ZERO;
#endif
    }
  en = es;

#if REDUCE_EXPONENT
  /* Reduce exponent by dividing it by phi(m) when m small.  */
  if (mn == 1 && mp[0] < 0x7fffffffL && en * GMP_NUMB_BITS > 150)
    {
      MPZ_TMP_INIT (new_e, 2);
      mpz_mod_ui (new_e, e, phi (mp[0]));
      e = new_e;
    }
#endif

  use_redc = mn < POWM_THRESHOLD && mp[0] % 2 != 0;
  if (use_redc)
    {
      /* invm = -1/m mod 2^BITS_PER_MP_LIMB, must have m odd */
      modlimb_invert (invm, mp[0]);
      invm = -invm;
    }
  else
    {
      /* Normalize m (i.e. make its most significant bit set) as required by
	 division functions below.  */
      count_leading_zeros (m_zero_cnt, mp[mn - 1]);
      m_zero_cnt -= GMP_NAIL_BITS;
      if (m_zero_cnt != 0)
	{
	  mp_ptr new_mp;
	  new_mp = TMP_ALLOC_LIMBS (mn);
	  mpn_lshift (new_mp, mp, mn, m_zero_cnt);
	  mp = new_mp;
	}
    }

  /* Determine optimal value of k, the number of exponent bits we look at
     at a time.  */
  count_leading_zeros (e_zero_cnt, PTR(e)[en - 1]);
  e_zero_cnt -= GMP_NAIL_BITS;
  enb = en * GMP_NUMB_BITS - e_zero_cnt; /* number of bits of exponent */
  k = 1;
  K = 2;
  while (2 * enb > K * (2 + k * (3 + k)))
    {
      k++;
      K *= 2;
    }

  tp = TMP_ALLOC_LIMBS (2 * mn + 1);
  qp = TMP_ALLOC_LIMBS (mn + 1);

  gp = __GMP_ALLOCATE_FUNC_LIMBS (K / 2 * mn);

  /* Compute x*R^n where R=2^BITS_PER_MP_LIMB.  */
  bn = ABSIZ (b);
  bp = PTR(b);
  /* Handle |b| >= m by computing b mod m.  FIXME: It is not strictly necessary
     for speed or correctness to do this when b and m have the same number of
     limbs, perhaps remove mpn_cmp call.  */
  if (bn > mn || (bn == mn && mpn_cmp (bp, mp, mn) >= 0))
    {
      /* Reduce possibly huge base while moving it to gp[0].  Use a function
	 call to reduce, since we don't want the quotient allocation to
	 live until function return.  */
      if (use_redc)
	{
	  reduce (tp + mn, bp, bn, mp, mn);	/* b mod m */
	  MPN_ZERO (tp, mn);
	  mpn_tdiv_qr (qp, gp, 0L, tp, 2 * mn, mp, mn); /* unnormnalized! */
	}
      else
	{
	  reduce (gp, bp, bn, mp, mn);
	}
    }
  else
    {
      /* |b| < m.  We pad out operands to become mn limbs,  which simplifies
	 the rest of the function, but slows things down when the |b| << m.  */
      if (use_redc)
	{
	  MPN_ZERO (tp, mn);
	  MPN_COPY (tp + mn, bp, bn);
	  MPN_ZERO (tp + mn + bn, mn - bn);
	  mpn_tdiv_qr (qp, gp, 0L, tp, 2 * mn, mp, mn);
	}
      else
	{
	  MPN_COPY (gp, bp, bn);
	  MPN_ZERO (gp + bn, mn - bn);
	}
    }

  /* Compute xx^i for odd g < 2^i.  */

  xp = TMP_ALLOC_LIMBS (mn);
  mpn_sqr_n (tp, gp, mn);
  if (use_redc)
    redc (xp, mp, mn, invm, tp);		/* xx = x^2*R^n */
  else
    mpn_tdiv_qr (qp, xp, 0L, tp, 2 * mn, mp, mn);
  this_gp = gp;
  for (i = 1; i < K / 2; i++)
    {
      mpn_mul_n (tp, this_gp, xp, mn);
      this_gp += mn;
      if (use_redc)
	redc (this_gp, mp, mn, invm, tp);	/* g[i] = x^(2i+1)*R^n */
      else
	mpn_tdiv_qr (qp, this_gp, 0L, tp, 2 * mn, mp, mn);
    }

  /* Start the real stuff.  */
  ep = PTR (e);
  i = en - 1;				/* current index */
  c = ep[i];				/* current limb */
  sh = GMP_NUMB_BITS - e_zero_cnt;	/* significant bits in ep[i] */
  sh -= k;				/* index of lower bit of ep[i] to take into account */
  if (sh < 0)
    {					/* k-sh extra bits are needed */
      if (i > 0)
	{
	  i--;
	  c <<= (-sh);
	  sh += GMP_NUMB_BITS;
	  c |= ep[i] >> sh;
	}
    }
Ejemplo n.º 22
0
int
mpz_perfect_power_p (mpz_srcptr u)
{
    unsigned long int prime;
    unsigned long int n, n2;
    int i;
    unsigned long int rem;
    mpz_t u2, q;
    int exact;
    mp_size_t uns;
    mp_size_t usize = SIZ (u);
    TMP_DECL (marker);

    if (usize == 0)
        return 1;			/* consider 0 a perfect power */

    n2 = mpz_scan1 (u, 0);
    if (n2 == 1)
        return 0;			/* 2 divides exactly once.  */

    if (n2 != 0 && (n2 & 1) == 0 && usize < 0)
        return 0;			/* 2 has even multiplicity with negative U */

    TMP_MARK (marker);

    uns = ABS (usize) - n2 / BITS_PER_MP_LIMB;
    MPZ_TMP_INIT (q, uns);
    MPZ_TMP_INIT (u2, uns);

    mpz_tdiv_q_2exp (u2, u, n2);

    if (isprime (n2))
        goto n2prime;

    for (i = 1; primes[i] != 0; i++)
    {
        prime = primes[i];
        rem = mpz_tdiv_ui (u2, prime);
        if (rem == 0)		/* divisable by this prime? */
        {
            rem = mpz_tdiv_q_ui (q, u2, prime * prime);
            if (rem != 0)
            {
                TMP_FREE (marker);
                return 0;		/* prime divides exactly once, reject */
            }
            mpz_swap (q, u2);
            for (n = 2;;)
            {
                rem = mpz_tdiv_q_ui (q, u2, prime);
                if (rem != 0)
                    break;
                mpz_swap (q, u2);
                n++;
            }

            if ((n & 1) == 0 && usize < 0)
            {
                TMP_FREE (marker);
                return 0;		/* even multiplicity with negative U, reject */
            }

            n2 = gcd (n2, n);
            if (n2 == 1)
            {
                TMP_FREE (marker);
                return 0;		/* we have multiplicity 1 of some factor */
            }

            if (mpz_cmpabs_ui (u2, 1) == 0)
            {
                TMP_FREE (marker);
                return 1;		/* factoring completed; consistent power */
            }

            /* As soon as n2 becomes a prime number, stop factoring.
               Either we have u=x^n2 or u is not a perfect power.  */
            if (isprime (n2))
                goto n2prime;
        }
    }

    if (n2 == 0)
    {
        /* We found no factors above; have to check all values of n.  */
        unsigned long int nth;
        for (nth = usize < 0 ? 3 : 2;; nth++)
        {
            if (! isprime (nth))
                continue;
#if 0
            exact = mpz_padic_root (q, u2, nth, PTH);
            if (exact)
#endif
                exact = mpz_root (q, u2, nth);
            if (exact)
            {
                TMP_FREE (marker);
                return 1;
            }
            if (mpz_cmp_ui (q, SMALLEST_OMITTED_PRIME) < 0)
            {
                TMP_FREE (marker);
                return 0;
            }
        }
    }
    else
    {
        unsigned long int nth;
        /* We found some factors above.  We just need to consider values of n
        that divides n2.  */
        for (nth = 2; nth <= n2; nth++)
        {
            if (! isprime (nth))
                continue;
            if (n2 % nth != 0)
                continue;
#if 0
            exact = mpz_padic_root (q, u2, nth, PTH);
            if (exact)
#endif
                exact = mpz_root (q, u2, nth);
            if (exact)
            {
                TMP_FREE (marker);
                return 1;
            }
            if (mpz_cmp_ui (q, SMALLEST_OMITTED_PRIME) < 0)
            {
                TMP_FREE (marker);
                return 0;
            }
        }

        TMP_FREE (marker);
        return 0;
    }

n2prime:
    exact = mpz_root (NULL, u2, n2);
    TMP_FREE (marker);
    return exact;
}
Ejemplo n.º 23
0
/* s <- 1 + r/1! + r^2/2! + ... + r^l/l! while MPFR_EXP(r^l/l!)+MPFR_EXPR(r)>-q
   using Brent/Kung method with O(sqrt(l)) multiplications.
   Return l.
   Uses m multiplications of full size and 2l/m of decreasing size, 
   i.e. a total equivalent to about m+l/m full multiplications,
   i.e. 2*sqrt(l) for m=sqrt(l).
   Version using mpz. ss must have at least (sizer+1) limbs.
   The error is bounded by (l^2+4*l) ulps where l is the return value.
*/
static int
mpfr_exp2_aux2 (mpz_t s, mpfr_srcptr r, int q, int *exps)
{
  int expr, l, m, i, sizer, *expR, expt, ql;
  unsigned long int c;
  mpz_t t, *R, rr, tmp;
  TMP_DECL(marker);

  /* estimate value of l */
  l = q / (-MPFR_EXP(r));
  m = (int) _mpfr_isqrt (l);
  /* we access R[2], thus we need m >= 2 */
  if (m < 2) m = 2;
  TMP_MARK(marker);
  R = (mpz_t*) TMP_ALLOC((m+1)*sizeof(mpz_t)); /* R[i] stands for r^i */
  expR = (int*) TMP_ALLOC((m+1)*sizeof(int)); /* exponent for R[i] */
  sizer = 1 + (MPFR_PREC(r)-1)/BITS_PER_MP_LIMB;
  mpz_init(tmp);
  MY_INIT_MPZ(rr, sizer+2);
  MY_INIT_MPZ(t, 2*sizer); /* double size for products */
  mpz_set_ui(s, 0); *exps = 1-q; /* initialize s to zero, 1 ulp = 2^(1-q) */
  for (i=0;i<=m;i++) MY_INIT_MPZ(R[i], sizer+2);
  expR[1] = mpfr_get_z_exp(R[1], r); /* exact operation: no error */
  expR[1] = mpz_normalize2(R[1], R[1], expR[1], 1-q); /* error <= 1 ulp */
  mpz_mul(t, R[1], R[1]); /* err(t) <= 2 ulps */
  mpz_div_2exp(R[2], t, q-1); /* err(R[2]) <= 3 ulps */
  expR[2] = 1-q;
  for (i=3;i<=m;i++) {
    mpz_mul(t, R[i-1], R[1]); /* err(t) <= 2*i-2 */
    mpz_div_2exp(R[i], t, q-1); /* err(R[i]) <= 2*i-1 ulps */
    expR[i] = 1-q;
  }
  mpz_set_ui(R[0], 1); mpz_mul_2exp(R[0], R[0], q-1); expR[0]=1-q; /* R[0]=1 */
  mpz_set_ui(rr, 1); expr=0; /* rr contains r^l/l! */
  /* by induction: err(rr) <= 2*l ulps */

  l = 0;
  ql = q; /* precision used for current giant step */
  do {
    /* all R[i] must have exponent 1-ql */
    if (l) for (i=0;i<m;i++) {
      expR[i] = mpz_normalize2(R[i], R[i], expR[i], 1-ql);
    }
    /* the absolute error on R[i]*rr is still 2*i-1 ulps */
    expt = mpz_normalize2(t, R[m-1], expR[m-1], 1-ql); 
    /* err(t) <= 2*m-1 ulps */
    /* computes t = 1 + r/(l+1) + ... + r^(m-1)*l!/(l+m-1)! 
       using Horner's scheme */
    for (i=m-2;i>=0;i--) {
      mpz_div_ui(t, t, l+i+1); /* err(t) += 1 ulp */
      mpz_add(t, t, R[i]);
    }
    /* now err(t) <= (3m-2) ulps */
    
    /* now multiplies t by r^l/l! and adds to s */
    mpz_mul(t, t, rr); expt += expr;
    expt = mpz_normalize2(t, t, expt, *exps);
    /* err(t) <= (3m-1) + err_rr(l) <= (3m-2) + 2*l */
#ifdef DEBUG
    if (expt != *exps) {
      fprintf(stderr, "Error: expt != exps %d %d\n", expt, *exps); exit(1);
    }
#endif
    mpz_add(s, s, t); /* no error here */

    /* updates rr, the multiplication of the factors l+i could be done 
       using binary splitting too, but it is not sure it would save much */
    mpz_mul(t, rr, R[m]); /* err(t) <= err(rr) + 2m-1 */
    expr += expR[m];
    mpz_set_ui (tmp, 1);
    for (i=1, c=1; i<=m; i++) {
      if (l+i > ~((unsigned long int) 0)/c) {
	mpz_mul_ui(tmp, tmp, c);
	c = l+i;
      }
      else c *= (unsigned long int) l+i;
    }
    if (c != 1) mpz_mul_ui (tmp, tmp, c); /* tmp is exact */
    mpz_fdiv_q(t, t, tmp); /* err(t) <= err(rr) + 2m */
    expr += mpz_normalize(rr, t, ql); /* err_rr(l+1) <= err_rr(l) + 2m+1 */
    ql = q - *exps - mpz_sizeinbase(s, 2) + expr + mpz_sizeinbase(rr, 2);
    l+=m;
  } while (expr+mpz_sizeinbase(rr, 2) > -q);

  TMP_FREE(marker);
  mpz_clear(tmp);
  return l;
}
Ejemplo n.º 24
0
int
mpfr_div (mpfr_ptr q, mpfr_srcptr u, mpfr_srcptr v, mp_rnd_t rnd_mode)
{
  mp_srcptr up, vp, bp;
  mp_size_t usize, vsize;

  mp_ptr ap, qp, rp;
  mp_size_t asize, bsize, qsize, rsize;
  mp_exp_t qexp;

  mp_size_t err, k;
  mp_limb_t tonearest;
  int inex, sh, can_round = 0, sign_quotient;
  unsigned int cc = 0, rw;

  TMP_DECL (marker);


  /**************************************************************************
   *                                                                        *
   *              This part of the code deals with special cases            *
   *                                                                        *
   **************************************************************************/

  if (MPFR_ARE_SINGULAR(u,v))
    {
      if (MPFR_IS_NAN(u) || MPFR_IS_NAN(v))
	{
	  MPFR_SET_NAN(q);
	  MPFR_RET_NAN;
	}
      sign_quotient = MPFR_MULT_SIGN( MPFR_SIGN(u) , MPFR_SIGN(v) );
      MPFR_SET_SIGN(q, sign_quotient);
      if (MPFR_IS_INF(u))
	{
	  if (MPFR_IS_INF(v))
	    {
	      MPFR_SET_NAN(q);
	      MPFR_RET_NAN;
	    }
	  else
	    {
	      MPFR_SET_INF(q);
	      MPFR_RET(0);
	    }
	}
      else if (MPFR_IS_INF(v))
	{
	  MPFR_SET_ZERO(q);
	  MPFR_RET(0);
	}
      else if (MPFR_IS_ZERO(v))
	{
	  if (MPFR_IS_ZERO(u))
	    {
	      MPFR_SET_NAN(q);
	      MPFR_RET_NAN;
	    }
	  else
	    {
	      MPFR_SET_INF(q);
	      MPFR_RET(0);
	    }
	}
      else
	{
	  MPFR_ASSERTD(MPFR_IS_ZERO(u));
	  MPFR_SET_ZERO(q);
	  MPFR_RET(0);
	}
    }
  MPFR_CLEAR_FLAGS(q);

  /**************************************************************************
   *                                                                        *
   *              End of the part concerning special values.                *
   *                                                                        *
   **************************************************************************/

  sign_quotient = MPFR_MULT_SIGN( MPFR_SIGN(u) , MPFR_SIGN(v) );
  up = MPFR_MANT(u);
  vp = MPFR_MANT(v);
  MPFR_SET_SIGN(q, sign_quotient);

  TMP_MARK (marker);
  usize = MPFR_LIMB_SIZE(u);
  vsize = MPFR_LIMB_SIZE(v);

  /**************************************************************************
   *                                                                        *
   *   First try to use only part of u, v. If this is not sufficient,       *
   *   use the full u and v, to avoid long computations eg. in the case     *
   *   u = v.                                                               *
   *                                                                        *
   **************************************************************************/

  /* The dividend is a, length asize. The divisor is b, length bsize. */

  qsize = (MPFR_PREC(q) + 3) / BITS_PER_MP_LIMB + 1;

  /* in case PREC(q)=PREC(v), then vsize=qsize with probability 1-4/b
     where b is the number of bits per limb */
  if (MPFR_LIKELY(vsize <= qsize))
    {
      bsize = vsize;
      bp = vp;
    }
  else /* qsize < vsize: take only the qsize high limbs of the divisor */
    {
      bsize = qsize;
      bp = (mp_srcptr) vp + (vsize - qsize);
    }

  /* we have {bp, bsize} * (1 + errb) = (true divisor)
     with 0 <= errb < 2^(-qsize*BITS_PER_MP_LIMB+1) */

  asize = bsize + qsize;
  ap = (mp_ptr) TMP_ALLOC (asize * BYTES_PER_MP_LIMB);
  /* if all arguments have same precision, then asize will be about 2*usize */
  if (MPFR_LIKELY(asize > usize))
    {
      /* copy u into the high limbs of {ap, asize}, and pad with zeroes */
      /* FIXME: could we copy only the qsize high limbs of the dividend? */
      MPN_COPY (ap + asize - usize, up, usize);
      MPN_ZERO (ap, asize - usize);
    }
  else /* truncate the high asize limbs of u into {ap, asize} */
    MPN_COPY (ap, up + usize - asize, asize);

  /* we have {ap, asize} = (true dividend) * (1 - erra)
     with 0 <= erra < 2^(-asize*BITS_PER_MP_LIMB).
     This {ap, asize} / {bp, bsize} =
     (true dividend) / (true divisor) * (1 - erra) (1 + errb) */

  /* Allocate limbs for quotient and remainder. */
  qp = (mp_ptr) TMP_ALLOC ((qsize + 1) * BYTES_PER_MP_LIMB);
  rp = (mp_ptr) TMP_ALLOC (bsize * BYTES_PER_MP_LIMB);
  rsize = bsize;

  mpn_tdiv_qr (qp, rp, 0, ap, asize, bp, bsize);
  sh = - (int) qp[qsize];
  /* since u and v are normalized, sh is 0 or -1 */

  /* we have {qp, qsize + 1} = {ap, asize} / {bp, bsize} (1 - errq)
     with 0 <= errq < 2^(-qsize*BITS_PER_MP_LIMB+1+sh)
     thus {qp, qsize + 1} =
     (true dividend) / (true divisor) * (1 - erra) (1 + errb) (1 - errq).
     
     In fact, since the truncated dividend and {rp, bsize} do not overlap,
     we have: {qp, qsize + 1} =
     (true dividend) / (true divisor) * (1 - erra') (1 + errb)
     where 0 <= erra' < 2^(-qsize*BITS_PER_MP_LIMB+sh) */

  /* Estimate number of correct bits. */

  err = qsize * BITS_PER_MP_LIMB;

  /* We want to check if rounding is possible, but without normalizing
     because we might have to divide again if rounding is impossible, or
     if the result might be exact. We have however to mimic normalization */

  /*
     To detect asap if the result is inexact, so as to avoid doing the
     division completely, we perform the following check :

     - if rnd_mode != GMP_RNDN, and the result is exact, we are unable
     to round simultaneously to zero and to infinity ;

     - if rnd_mode == GMP_RNDN, and if we can round to zero with one extra
     bit of precision, we can decide rounding. Hence in that case, check
     as in the case of GMP_RNDN, with one extra bit. Note that in the case
     of close to even rounding we shall do the division completely, but
     this is necessary anyway : we need to know whether this is really
     even rounding or not.
  */

  if (MPFR_UNLIKELY(asize < usize || bsize < vsize))
    {
      {
	mp_rnd_t  rnd_mode1, rnd_mode2;
	mp_exp_t  tmp_exp;
	mp_prec_t tmp_prec;

        if (bsize < vsize)
          err -= 2; /* divisor is truncated */
#if 0 /* commented this out since the truncation of the dividend is already
         taken into account in {rp, bsize}, which does not overlap with the
         neglected part of the dividend */
        else if (asize < usize)
          err --;   /* dividend is truncated */
#endif

	if (MPFR_LIKELY(rnd_mode == GMP_RNDN))
	  {
	    rnd_mode1 = GMP_RNDZ;
	    rnd_mode2 = MPFR_IS_POS_SIGN(sign_quotient) ? GMP_RNDU : GMP_RNDD;
	    sh++;
	  }
	else
	  {
	    rnd_mode1 = rnd_mode;
	    switch (rnd_mode)
	      {
	      case GMP_RNDU:
		rnd_mode2 = GMP_RNDD; break;
	      case GMP_RNDD:
		rnd_mode2 = GMP_RNDU; break;
	      default:
		rnd_mode2 = MPFR_IS_POS_SIGN(sign_quotient) ?
		  GMP_RNDU : GMP_RNDD;
		break;
	      }
	  }

	tmp_exp  = err + sh + BITS_PER_MP_LIMB;
	tmp_prec = MPFR_PREC(q) + sh + BITS_PER_MP_LIMB;
	
	can_round =
	  mpfr_can_round_raw (qp, qsize + 1, sign_quotient, tmp_exp,
                              GMP_RNDN, rnd_mode1, tmp_prec)
	  & mpfr_can_round_raw (qp, qsize + 1, sign_quotient, tmp_exp,
                                GMP_RNDN, rnd_mode2, tmp_prec);

        /* restore original value of sh, i.e. sh = - qp[qsize] */
	sh -= (rnd_mode == GMP_RNDN);
      }
Ejemplo n.º 25
0
mp_limb_t
mpn_divrem (mp_ptr qp, mp_size_t qxn,
	    mp_ptr np, mp_size_t nn,
	    mp_srcptr dp, mp_size_t dn)
{
  ASSERT (qxn >= 0);
  ASSERT (nn >= dn);
  ASSERT (dn >= 1);
  ASSERT (dp[dn-1] & GMP_NUMB_HIGHBIT);
  ASSERT (! MPN_OVERLAP_P (np, nn, dp, dn));
  ASSERT (! MPN_OVERLAP_P (qp, nn-dn+qxn, np, nn) || qp==np+dn+qxn);
  ASSERT (! MPN_OVERLAP_P (qp, nn-dn+qxn, dp, dn));
  ASSERT_MPN (np, nn);
  ASSERT_MPN (dp, dn);

  if (dn == 1)
    {
      mp_limb_t ret;
      mp_ptr q2p;
      mp_size_t qn;
      TMP_DECL (marker);

      TMP_MARK (marker);
      q2p = (mp_ptr) TMP_ALLOC ((nn + qxn) * BYTES_PER_MP_LIMB);

      np[0] = mpn_divrem_1 (q2p, qxn, np, nn, dp[0]);
      qn = nn + qxn - 1;
      MPN_COPY (qp, q2p, qn);
      ret = q2p[qn];

      TMP_FREE (marker);
      return ret;
    }
  else if (dn == 2)
    {
      return mpn_divrem_2 (qp, qxn, np, nn, dp);
    }
  else
    {
      mp_ptr rp, q2p;
      mp_limb_t qhl;
      mp_size_t qn;
      TMP_DECL (marker);

      TMP_MARK (marker);
      if (qxn != 0)
	{
	  mp_ptr n2p;
	  n2p = (mp_ptr) TMP_ALLOC ((nn + qxn) * BYTES_PER_MP_LIMB);
	  MPN_ZERO (n2p, qxn);
	  MPN_COPY (n2p + qxn, np, nn);
	  q2p = (mp_ptr) TMP_ALLOC ((nn - dn + qxn + 1) * BYTES_PER_MP_LIMB);
	  rp = (mp_ptr) TMP_ALLOC (dn * BYTES_PER_MP_LIMB);
	  mpn_tdiv_qr (q2p, rp, 0L, n2p, nn + qxn, dp, dn);
	  MPN_COPY (np, rp, dn);
	  qn = nn - dn + qxn;
	  MPN_COPY (qp, q2p, qn);
	  qhl = q2p[qn];
	}
      else
	{
	  q2p = (mp_ptr) TMP_ALLOC ((nn - dn + 1) * BYTES_PER_MP_LIMB);
	  rp = (mp_ptr) TMP_ALLOC (dn * BYTES_PER_MP_LIMB);
	  mpn_tdiv_qr (q2p, rp, 0L, np, nn, dp, dn);
	  MPN_COPY (np, rp, dn);	/* overwrite np area with remainder */
	  qn = nn - dn;
	  MPN_COPY (qp, q2p, qn);
	  qhl = q2p[qn];
	}
      TMP_FREE (marker);
      return qhl;
    }
}
Ejemplo n.º 26
0
Archivo: mout.c Proyecto: mahdiz/mpclib
void
mout (const MINT *x)
{
  mp_ptr xp;
  mp_srcptr x_ptr;
  mp_size_t x_size;
  unsigned char *str;
  size_t str_size;
  int i;
  TMP_DECL (marker);

  x_size = x->_mp_size;
  if (x_size == 0)
    {
      fputc ('0', stdout);
      fputc ('\n', stdout);
      return;
    }
  if (x_size < 0)
    {
      fputc ('-', stdout);
      x_size = -x_size;
    }

  TMP_MARK (marker);
  x_ptr = x->_mp_d;
  MPN_SIZEINBASE (str_size, x_ptr, x_size, 10);
  str_size += 2;
  str = (unsigned char *) TMP_ALLOC (str_size);

  /* mpn_get_str clobbers its argument */
  xp = TMP_ALLOC_LIMBS (x_size);
  MPN_COPY (xp, x_ptr, x_size);

  str_size = mpn_get_str (str, 10, xp, x_size);

  /* mpn_get_str might make a leading zero, skip it.  */
  str_size -= (*str == 0);
  str += (*str == 0);
  ASSERT (*str != 0);

  /* Translate to printable chars.  */
  for (i = 0; i < str_size; i++)
    str[i] = "0123456789"[str[i]];
  str[str_size] = 0;

  str_size = strlen ((char *) str);
  if (str_size % 10 != 0)
    {
      fwrite (str, 1, str_size % 10, stdout);
      str += str_size % 10;
      str_size -= str_size % 10;
      if (str_size != 0)
	fputc (' ', stdout);
    }
  for (i = 0; i < str_size; i += 10)
    {
      fwrite (str, 1, 10, stdout);
      str += 10;
      if (i + 10 < str_size)
	fputc (' ', stdout);
    }
  fputc ('\n', stdout);
  TMP_FREE (marker);
}
Ejemplo n.º 27
0
REGPARM_ATTR (1) static void
mpz_aorsmul (mpz_ptr w, mpz_srcptr x, mpz_srcptr y, mp_size_t sub)
{
  mp_size_t  xsize, ysize, tsize, wsize, wsize_signed;
  mp_ptr     wp, tp;
  mp_limb_t  c, high;
  TMP_DECL (marker);

  /* w unaffected if x==0 or y==0 */
  xsize = SIZ(x);
  ysize = SIZ(y);
  if (xsize == 0 || ysize == 0)
    return;

  /* make x the bigger of the two */
  if (ABS(ysize) > ABS(xsize))
    {
      MPZ_SRCPTR_SWAP (x, y);
      MP_SIZE_T_SWAP (xsize, ysize);
    }

  sub ^= ysize;
  ysize = ABS(ysize);

  /* use mpn_addmul_1/mpn_submul_1 if possible */
  if (ysize == 1)
    {
      mpz_aorsmul_1 (w, x, PTR(y)[0], sub);
      return;
    }

  sub ^= xsize;
  xsize = ABS(xsize);

  wsize_signed = SIZ(w);
  sub ^= wsize_signed;
  wsize = ABS(wsize_signed);

  tsize = xsize + ysize;
  MPZ_REALLOC (w, MAX (wsize, tsize) + 1);
  wp = PTR(w);

  if (wsize_signed == 0)
    {
      /* Nothing to add to, just set w=x*y.  No w==x or w==y overlap here,
         since we know x,y!=0 but w==0.  */
      high = mpn_mul (wp, PTR(x),xsize, PTR(y),ysize);
      tsize -= (high == 0);
      SIZ(w) = (sub >= 0 ? tsize : -tsize);
      return;
    }

  TMP_MARK (marker);
  tp = TMP_ALLOC_LIMBS (tsize);

  high = mpn_mul (tp, PTR(x),xsize, PTR(y),ysize);
  tsize -= (high == 0);
  ASSERT (tp[tsize-1] != 0);
  if (sub >= 0)
    {
      mp_srcptr up    = wp;
      mp_size_t usize = wsize;

      if (usize < tsize)
        {
          up    = tp;
          usize = tsize;
          tp    = wp;
          tsize = wsize;

          wsize = usize;
        }

      c = mpn_add (wp, up,usize, tp,tsize);
      wp[wsize] = c;
      wsize += (c != 0);
    }
  else
    {
      mp_srcptr up    = wp;
      mp_size_t usize = wsize;

      if (mpn_cmp_twosizes_lt (up,usize, tp,tsize))
        {
          up    = tp;
          usize = tsize;
          tp    = wp;
          tsize = wsize;

          wsize = usize;
          wsize_signed = -wsize_signed;
        }

      ASSERT_NOCARRY (mpn_sub (wp, up,usize, tp,tsize));
      wsize = usize;
      MPN_NORMALIZE (wp, wsize);
    }

  SIZ(w) = (wsize_signed >= 0 ? wsize : -wsize);

  TMP_FREE (marker);
}
Ejemplo n.º 28
0
/* returns 0 if result exact, non-zero otherwise */
int
mpfr_div_ui (mpfr_ptr y, mpfr_srcptr x, unsigned long int u, mp_rnd_t rnd_mode)
{
  long int xn, yn, dif, sh, i;
  mp_limb_t *xp, *yp, *tmp, c, d;
  mp_exp_t exp;
  int inexact, middle = 1;
  TMP_DECL(marker);

  if (MPFR_UNLIKELY( MPFR_IS_SINGULAR(x) ))
    {
      if (MPFR_IS_NAN(x))
	{
	  MPFR_SET_NAN(y);
	  MPFR_RET_NAN;
	}
      else if (MPFR_IS_INF(x))
	{
	  MPFR_SET_INF(y);
	  MPFR_SET_SAME_SIGN(y, x);
	  MPFR_RET(0);
	}
      else
	{
          MPFR_ASSERTD(MPFR_IS_ZERO(x));
	  if (u == 0)/* 0/0 is NaN */
	    {
	      MPFR_SET_NAN(y);
	      MPFR_RET_NAN;
	    }
	  else
	    {
	      MPFR_SET_ZERO(y);
	      MPFR_RET(0);
	    }
	}
    }

  if (MPFR_UNLIKELY(u == 0))
    {
      /* x/0 is Inf */
      MPFR_SET_INF(y);
      MPFR_SET_SAME_SIGN(y, x);
      MPFR_RET(0);
    }

  MPFR_CLEAR_FLAGS(y);

  MPFR_SET_SAME_SIGN(y, x);

  TMP_MARK(marker);
  xn = MPFR_LIMB_SIZE(x);
  yn = MPFR_LIMB_SIZE(y);

  xp = MPFR_MANT(x);
  yp = MPFR_MANT(y);
  exp = MPFR_GET_EXP (x);

  dif = yn + 1 - xn;

  /* we need to store yn+1 = xn + dif limbs of the quotient */
  /* don't use tmp=yp since the mpn_lshift call below requires yp >= tmp+1 */
  tmp = (mp_limb_t*) TMP_ALLOC((yn + 1) * BYTES_PER_MP_LIMB);

  c = (mp_limb_t) u;
  MPFR_ASSERTN(u == c);
  if (dif >= 0)
    c = mpn_divrem_1 (tmp, dif, xp, xn, c); /* used all the dividend */
  else /* dif < 0 i.e. xn > yn, don't use the (-dif) low limbs from x */
    c = mpn_divrem_1 (tmp, 0, xp - dif, yn + 1, c);

  inexact = (c != 0);

  /* First pass in estimating next bit of the quotient, in case of RNDN    *
   * In case we just have the right number of bits (postpone this ?),      *
   * we need to check whether the remainder is more or less than half      *
   * the divisor. The test must be performed with a subtraction, so as     *
   * to prevent carries.                                                   */

  if (rnd_mode == GMP_RNDN)
    {
      if (c < (mp_limb_t) u - c) /* We have u > c */
	middle = -1;
      else if (c > (mp_limb_t) u - c)
	middle = 1;
      else
	middle = 0; /* exactly in the middle */
    }

  /* If we believe that we are right in the middle or exact, we should check
     that we did not neglect any word of x (division large / 1 -> small). */

  for (i=0; ((inexact == 0) || (middle == 0)) && (i < -dif); i++)
    if (xp[i])
      inexact = middle = 1; /* larger than middle */

  /*
     If the high limb of the result is 0 (xp[xn-1] < u), remove it.
     Otherwise, compute the left shift to be performed to normalize.
     In the latter case, we discard some low bits computed. They
     contain information useful for the rounding, hence the updating
     of middle and inexact.
  */

  if (tmp[yn] == 0)
    {
      MPN_COPY(yp, tmp, yn);
      exp -= BITS_PER_MP_LIMB;
      sh = 0;
    }
  else
    {
      count_leading_zeros (sh, tmp[yn]);

      /* shift left to normalize */
      if (sh)
        {
          mp_limb_t w = tmp[0] << sh;

          mpn_lshift (yp, tmp + 1, yn, sh);
          yp[0] += tmp[0] >> (BITS_PER_MP_LIMB - sh);

          if (w > (MPFR_LIMB_ONE << (BITS_PER_MP_LIMB - 1)))
            { middle = 1; }
          else if (w < (MPFR_LIMB_ONE << (BITS_PER_MP_LIMB - 1)))
            { middle = -1; }
          else
            { middle = (c != 0); }

          inexact = inexact || (w != 0);
          exp -= sh;
        }
      else
        { /* this happens only if u == 1 and xp[xn-1] >=
             1<<(BITS_PER_MP_LIMB-1). It might be better to handle the
             u == 1 case seperately ?
          */

          MPN_COPY (yp, tmp + 1, yn);
        }
    }
Ejemplo n.º 29
0
size_t
mpf_out_str (FILE *stream, int base, size_t n_digits, mpf_srcptr op)
{
    char *str;
    mp_exp_t exp;
    size_t written;
    TMP_DECL (marker);

    TMP_MARK (marker);

    if (base == 0)
        base = 10;
    if (n_digits == 0)
        MPF_SIGNIFICANT_DIGITS (n_digits, base, op->_mp_prec);

    if (stream == 0)
        stream = stdout;

    str = (char *) TMP_ALLOC (n_digits + 2); /* extra for minus sign and \0 */

    mpf_get_str (str, &exp, base, n_digits, op);
    n_digits = strlen (str);

    written = 0;

    /* Write sign */
    if (str[0] == '-')
    {
        str++;
        fputc ('-', stream);
        written = 1;
        n_digits--;
    }

#if HAVE_LOCALECONV
    {
        const char  *point = localeconv()->decimal_point;
        size_t      pointlen = strlen (point);
        putc ('0', stream);
        fwrite (point, 1, pointlen, stream);
        written += pointlen + 1;
    }
#else
    fwrite ("0.", 1, 2, stream);
    written += 2;
#endif

    /* Write mantissa */
    {
        size_t fwret;
        fwret = fwrite (str, 1, n_digits, stream);
        written += fwret;
    }

    /* Write exponent */
    {
        int fpret;
        fpret = fprintf (stream, (base <= 10 ? "e%ld" : "@%ld"), exp);
        written += fpret;
    }

    TMP_FREE (marker);
    return ferror (stream) ? 0 : written;
}