Beispiel #1
0
static int
hgcd_appr_valid_p (mpz_t a, mpz_t b, mp_size_t res0,
		   struct hgcd_ref *ref, mpz_t ref_r0, mpz_t ref_r1,
		   mp_size_t res1, struct hgcd_matrix *hgcd)
{
  mp_size_t n = MAX (mpz_size (a), mpz_size (b));
  mp_size_t s = n/2 + 1;

  mp_bitcnt_t dbits, abits, margin;
  mpz_t appr_r0, appr_r1, t, q;
  struct hgcd_ref appr;

  if (!res0)
    {
      if (!res1)
	return 1;

      fprintf (stderr, "mpn_hgcd_appr returned 1 when no reduction possible.\n");
      return 0;
    }

  /* NOTE: No *_clear calls on error return, since we're going to
     abort anyway. */
  mpz_init (t);
  mpz_init (q);
  hgcd_ref_init (&appr);
  mpz_init (appr_r0);
  mpz_init (appr_r1);

  if (mpz_size (ref_r0) <= s)
    {
      fprintf (stderr, "ref_r0 too small!!!: "); debug_mp (ref_r0, 16);
      return 0;
    }
  if (mpz_size (ref_r1) <= s)
    {
      fprintf (stderr, "ref_r1 too small!!!: "); debug_mp (ref_r1, 16);
      return 0;
    }

  mpz_sub (t, ref_r0, ref_r1);
  dbits = mpz_sizeinbase (t, 2);
  if (dbits > s*GMP_NUMB_BITS)
    {
      fprintf (stderr, "ref |r0 - r1| too large!!!: "); debug_mp (t, 16);
      return 0;
    }

  if (!res1)
    {
      mpz_set (appr_r0, a);
      mpz_set (appr_r1, b);
    }
  else
    {
      unsigned i;

      for (i = 0; i<2; i++)
	{
	  unsigned j;

	  for (j = 0; j<2; j++)
	    {
	      mp_size_t mn = hgcd->n;
	      MPN_NORMALIZE (hgcd->p[i][j], mn);
	      mpz_realloc (appr.m[i][j], mn);
	      MPN_COPY (PTR (appr.m[i][j]), hgcd->p[i][j], mn);
	      SIZ (appr.m[i][j]) = mn;
	    }
	}
      mpz_mul (appr_r0, appr.m[1][1], a);
      mpz_mul (t, appr.m[0][1], b);
      mpz_sub (appr_r0, appr_r0, t);
      if (mpz_sgn (appr_r0) <= 0
	  || mpz_size (appr_r0) <= s)
	{
	  fprintf (stderr, "appr_r0 too small: "); debug_mp (appr_r0, 16);
	  return 0;
	}

      mpz_mul (appr_r1, appr.m[1][0], a);
      mpz_mul (t, appr.m[0][0], b);
      mpz_sub (appr_r1, t, appr_r1);
      if (mpz_sgn (appr_r1) <= 0
	  || mpz_size (appr_r1) <= s)
	{
	  fprintf (stderr, "appr_r1 too small: "); debug_mp (appr_r1, 16);
	  return 0;
	}
    }

  mpz_sub (t, appr_r0, appr_r1);
  abits = mpz_sizeinbase (t, 2);
  if (abits < dbits)
    {
      fprintf (stderr, "|r0 - r1| too small: "); debug_mp (t, 16);
      return 0;
    }

  /* We lose one bit each time we discard the least significant limbs.
     For the lehmer code, that can happen at most s * (GMP_NUMB_BITS)
     / (GMP_NUMB_BITS - 1) times. For the dc code, we lose an entire
     limb (or more?) for each level of recursion. */

  margin = (n/2+1) * GMP_NUMB_BITS / (GMP_NUMB_BITS - 1);
  {
    mp_size_t rn;
    for (rn = n; ABOVE_THRESHOLD (rn, HGCD_APPR_THRESHOLD); rn = (rn + 1)/2)
      margin += GMP_NUMB_BITS;
  }

  if (verbose_flag && abits > dbits)
    fprintf (stderr, "n = %u: sbits = %u: ref #(r0-r1): %u, appr #(r0-r1): %u excess: %d, margin: %u\n",
	     (unsigned) n, (unsigned) s*GMP_NUMB_BITS,
	     (unsigned) dbits, (unsigned) abits,
	     (int) abits - s * GMP_NUMB_BITS, (unsigned) margin);

  if (abits > s*GMP_NUMB_BITS + margin)
    {
      fprintf (stderr, "appr |r0 - r1| much larger than minimal (by %u bits, margin = %u bits)\n",
	       (unsigned) (abits - s*GMP_NUMB_BITS), (unsigned) margin);
      return 0;
    }

  while (mpz_cmp (appr_r0, ref_r0) > 0 || mpz_cmp (appr_r1, ref_r1) > 0)
    {
      ASSERT (mpz_size (appr_r0) > s);
      ASSERT (mpz_size (appr_r1) > s);

      if (mpz_cmp (appr_r0, appr_r1) > 0)
	{
	  if (!sdiv_qr (q, appr_r0, s, appr_r0, appr_r1))
	    break;
	  mpz_addmul (appr.m[0][1], q, appr.m[0][0]);
	  mpz_addmul (appr.m[1][1], q, appr.m[1][0]);
	}
      else
	{
	  if (!sdiv_qr (q, appr_r1, s, appr_r1, appr_r0))
	    break;
	  mpz_addmul (appr.m[0][0], q, appr.m[0][1]);
	  mpz_addmul (appr.m[1][0], q, appr.m[1][1]);
	}
    }

  if (mpz_cmp (appr_r0, ref_r0) != 0
      || mpz_cmp (appr_r1, ref_r1) != 0
      || !hgcd_ref_equal (ref, &appr))
    {
      fprintf (stderr, "appr_r0: "); debug_mp (appr_r0, 16);
      fprintf (stderr, "ref_r0: "); debug_mp (ref_r0, 16);

      fprintf (stderr, "appr_r1: "); debug_mp (appr_r1, 16);
      fprintf (stderr, "ref_r1: "); debug_mp (ref_r1, 16);

      return 0;
    }
  mpz_clear (t);
  mpz_clear (q);
  hgcd_ref_clear (&appr);
  mpz_clear (appr_r0);
  mpz_clear (appr_r1);

  return 1;
}
Beispiel #2
0
mp_size_t
mpn_gcdext (mp_ptr gp, mp_ptr up, mp_size_t *usizep,
	    mp_ptr ap, mp_size_t an, mp_ptr bp, mp_size_t n)
{
  mp_size_t talloc;
  mp_size_t scratch;
  mp_size_t matrix_scratch;
  mp_size_t ualloc = n + 1;

  mp_size_t un;
  mp_ptr u0;
  mp_ptr u1;

  mp_ptr tp;

  TMP_DECL;

  ASSERT (an >= n);
  ASSERT (n > 0);

  TMP_MARK;

  /* FIXME: Check for small sizes first, before setting up temporary
     storage etc. */
  talloc = MPN_GCDEXT_LEHMER_N_ITCH(n);

  /* For initial division */
  scratch = an - n + 1;
  if (scratch > talloc)
    talloc = scratch;

  if (ABOVE_THRESHOLD (n, GCDEXT_DC_THRESHOLD))
    {
      /* For hgcd loop. */
      mp_size_t hgcd_scratch;
      mp_size_t update_scratch;
      mp_size_t p1 = CHOOSE_P_1 (n);
      mp_size_t p2 = CHOOSE_P_2 (n);
      mp_size_t min_p = MIN(p1, p2);
      mp_size_t max_p = MAX(p1, p2);
      matrix_scratch = MPN_HGCD_MATRIX_INIT_ITCH (n - min_p);
      hgcd_scratch = mpn_hgcd_itch (n - min_p);
      update_scratch = max_p + n - 1;

      scratch = matrix_scratch + MAX(hgcd_scratch, update_scratch);
      if (scratch > talloc)
	talloc = scratch;

      /* Final mpn_gcdext_lehmer_n call. Need space for u and for
	 copies of a and b. */
      scratch = MPN_GCDEXT_LEHMER_N_ITCH (GCDEXT_DC_THRESHOLD)
	+ 3*GCDEXT_DC_THRESHOLD;

      if (scratch > talloc)
	talloc = scratch;

      /* Cofactors u0 and u1 */
      talloc += 2*(n+1);
    }

  tp = TMP_ALLOC_LIMBS(talloc);

  if (an > n)
    {
      mpn_tdiv_qr (tp, ap, 0, ap, an, bp, n);

      if (mpn_zero_p (ap, n))
	{
	  MPN_COPY (gp, bp, n);
	  *usizep = 0;
	  TMP_FREE;
	  return n;
	}
    }

  if (BELOW_THRESHOLD (n, GCDEXT_DC_THRESHOLD))
    {
      mp_size_t gn = mpn_gcdext_lehmer_n(gp, up, usizep, ap, bp, n, tp);

      TMP_FREE;
      return gn;
    }

  MPN_ZERO (tp, 2*ualloc);
  u0 = tp; tp += ualloc;
  u1 = tp; tp += ualloc;

  {
    /* For the first hgcd call, there are no u updates, and it makes
       some sense to use a different choice for p. */

    /* FIXME: We could trim use of temporary storage, since u0 and u1
       are not used yet. For the hgcd call, we could swap in the u0
       and u1 pointers for the relevant matrix elements. */

    struct hgcd_matrix M;
    mp_size_t p = CHOOSE_P_1 (n);
    mp_size_t nn;

    mpn_hgcd_matrix_init (&M, n - p, tp);
    nn = mpn_hgcd (ap + p, bp + p, n - p, &M, tp + matrix_scratch);
    if (nn > 0)
      {
	ASSERT (M.n <= (n - p - 1)/2);
	ASSERT (M.n + p <= (p + n - 1) / 2);

	/* Temporary storage 2 (p + M->n) <= p + n - 1 */
	n = mpn_hgcd_matrix_adjust (&M, p + nn, ap, bp, p, tp + matrix_scratch);

	MPN_COPY (u0, M.p[1][0], M.n);
	MPN_COPY (u1, M.p[1][1], M.n);
	un = M.n;
	while ( (u0[un-1] | u1[un-1] ) == 0)
	  un--;
      }
    else
      {
	/* mpn_hgcd has failed. Then either one of a or b is very
	   small, or the difference is very small. Perform one
	   subtraction followed by one division. */
	mp_size_t gn;
	mp_size_t updated_un = 1;

	u1[0] = 1;

	/* Temporary storage 2n + 1 */
	n = mpn_gcdext_subdiv_step (gp, &gn, up, usizep, ap, bp, n,
				    u0, u1, &updated_un, tp, tp + n);
	if (n == 0)
	  {
	    TMP_FREE;
	    return gn;
	  }

	un = updated_un;
	ASSERT (un < ualloc);
      }
  }

  while (ABOVE_THRESHOLD (n, GCDEXT_DC_THRESHOLD))
    {
      struct hgcd_matrix M;
      mp_size_t p = CHOOSE_P_2 (n);
      mp_size_t nn;

      mpn_hgcd_matrix_init (&M, n - p, tp);
      nn = mpn_hgcd (ap + p, bp + p, n - p, &M, tp + matrix_scratch);
      if (nn > 0)
	{
	  mp_ptr t0;

	  t0 = tp + matrix_scratch;
	  ASSERT (M.n <= (n - p - 1)/2);
	  ASSERT (M.n + p <= (p + n - 1) / 2);

	  /* Temporary storage 2 (p + M->n) <= p + n - 1 */
	  n = mpn_hgcd_matrix_adjust (&M, p + nn, ap, bp, p, t0);

	  /* By the same analysis as for mpn_hgcd_matrix_mul */
	  ASSERT (M.n + un <= ualloc);

	  /* FIXME: This copying could be avoided by some swapping of
	   * pointers. May need more temporary storage, though. */
	  MPN_COPY (t0, u0, un);

	  /* Temporary storage ualloc */
	  un = hgcd_mul_matrix_vector (&M, u0, t0, u1, un, t0 + un);

	  ASSERT (un < ualloc);
	  ASSERT ( (u0[un-1] | u1[un-1]) > 0);
	}
      else
	{
	  /* mpn_hgcd has failed. Then either one of a or b is very
	     small, or the difference is very small. Perform one
	     subtraction followed by one division. */
	  mp_size_t gn;
	  mp_size_t updated_un = un;

	  /* Temporary storage 2n + 1 */
	  n = mpn_gcdext_subdiv_step (gp, &gn, up, usizep, ap, bp, n,
				      u0, u1, &updated_un, tp, tp + n);
	  if (n == 0)
	    {
	      TMP_FREE;
	      return gn;
	    }

	  un = updated_un;
	  ASSERT (un < ualloc);
	}
    }

  if (UNLIKELY (mpn_cmp (ap, bp, n) == 0))
    {
      /* Must return the smallest cofactor, +u1 or -u0 */
      int c;

      MPN_COPY (gp, ap, n);

      MPN_CMP (c, u0, u1, un);
      ASSERT (c != 0);
      if (c < 0)
	{
	  MPN_NORMALIZE (u0, un);
	  MPN_COPY (up, u0, un);
	  *usizep = -un;
	}
      else
	{
	  MPN_NORMALIZE_NOT_ZERO (u1, un);
	  MPN_COPY (up, u1, un);
	  *usizep = un;
	}

      TMP_FREE;
      return n;
    }
  else if (mpn_zero_p (u0, un))
    {
      mp_size_t gn;
      ASSERT (un == 1);
      ASSERT (u1[0] == 1);

      /* g = u a + v b = (u u1 - v u0) A + (...) B = u A + (...) B */
      gn = mpn_gcdext_lehmer_n (gp, up, usizep, ap, bp, n, tp);

      TMP_FREE;
      return gn;
    }
  else
    {
      /* We have A = ... a + ... b
		 B =  u0 a +  u1 b

		 a = u1  A + ... B
		 b = -u0 A + ... B

	 with bounds

	   |u0|, |u1| <= B / min(a, b)

	 Compute g = u a + v b = (u u1 - v u0) A + (...) B
	 Here, u, v are bounded by

	 |u| <= b,
	 |v| <= a
      */

      mp_size_t u0n;
      mp_size_t u1n;
      mp_size_t lehmer_un;
      mp_size_t lehmer_vn;
      mp_size_t gn;

      mp_ptr lehmer_up;
      mp_ptr lehmer_vp;
      int negate;

      lehmer_up = tp; tp += n;

      /* Call mpn_gcdext_lehmer_n with copies of a and b. */
      MPN_COPY (tp, ap, n);
      MPN_COPY (tp + n, bp, n);
      gn = mpn_gcdext_lehmer_n (gp, lehmer_up, &lehmer_un, tp, tp + n, n, tp + 2*n);

      u0n = un;
      MPN_NORMALIZE (u0, u0n);
      if (lehmer_un == 0)
	{
	  /* u == 0  ==>  v = g / b == 1  ==> g = - u0 A + (...) B */
	  MPN_COPY (up, u0, u0n);
	  *usizep = -u0n;

	  TMP_FREE;
	  return gn;
	}

      lehmer_vp = tp;
      /* Compute v = (g - u a) / b */
      lehmer_vn = compute_v (lehmer_vp,
			     ap, bp, n, gp, gn, lehmer_up, lehmer_un, tp + n + 1);

      if (lehmer_un > 0)
	negate = 0;
      else
	{
	  lehmer_un = -lehmer_un;
	  negate = 1;
	}

      u1n = un;
      MPN_NORMALIZE (u1, u1n);

      /* It's possible that u0 = 1, u1 = 0 */
      if (u1n == 0)
	{
	  ASSERT (un == 1);
	  ASSERT (u0[0] == 1);

	  /* u1 == 0 ==> u u1 + v u0 = v */
	  MPN_COPY (up, lehmer_vp, lehmer_vn);
	  *usizep = negate ? lehmer_vn : - lehmer_vn;

	  TMP_FREE;
	  return gn;
	}

      ASSERT (lehmer_un + u1n <= ualloc);
      ASSERT (lehmer_vn + u0n <= ualloc);

      /* Now u0, u1, u are non-zero. We may still have v == 0 */

      /* Compute u u0 */
      if (lehmer_un <= u1n)
	/* Should be the common case */
	mpn_mul (up, u1, u1n, lehmer_up, lehmer_un);
      else
	mpn_mul (up, lehmer_up, lehmer_un, u1, u1n);

      un = u1n + lehmer_un;
      un -= (up[un - 1] == 0);

      if (lehmer_vn > 0)
	{
	  mp_limb_t cy;

	  /* Overwrites old u1 value */
	  if (lehmer_vn <= u0n)
	    /* Should be the common case */
	    mpn_mul (u1, u0, u0n, lehmer_vp, lehmer_vn);
	  else
	    mpn_mul (u1, lehmer_vp, lehmer_vn, u0, u0n);

	  u1n = u0n + lehmer_vn;
	  u1n -= (u1[u1n - 1] == 0);

	  if (u1n <= un)
	    {
	      cy = mpn_add (up, up, un, u1, u1n);
	    }
	  else
	    {
	      cy = mpn_add (up, u1, u1n, up, un);
	      un = u1n;
	    }
	  up[un] = cy;
	  un += (cy != 0);

	  ASSERT (un < ualloc);
	}
      *usizep = negate ? -un : un;

      TMP_FREE;
      return gn;
    }
}
Beispiel #3
0
mp_limb_t
mpn_mul (mp_ptr prodp,
	 mp_srcptr up, mp_size_t un,
	 mp_srcptr vp, mp_size_t vn)
{
  mp_size_t l, k;
  mp_limb_t c;

  ASSERT (un >= vn);
  ASSERT (vn >= 1);
  ASSERT (! MPN_OVERLAP_P (prodp, un+vn, up, un));
  ASSERT (! MPN_OVERLAP_P (prodp, un+vn, vp, vn));

  if (un == vn)
   {
    if (up == vp)
    {
      mpn_sqr (prodp, up, un);
      return prodp[2 * un - 1];
    }
    else
    {
      mpn_mul_n (prodp, up, vp, un);
      return prodp[2 * un - 1];
    }
   }

  if (vn < MUL_KARATSUBA_THRESHOLD)
    { /* plain schoolbook multiplication */
      if (un <= MUL_BASECASE_MAX_UN)
	mpn_mul_basecase (prodp, up, un, vp, vn);
      else
	{
	  /* We have un >> MUL_BASECASE_MAX_UN > vn.  For better memory
	     locality, split up[] into MUL_BASECASE_MAX_UN pieces and multiply
	     these pieces with the vp[] operand.  After each such partial
	     multiplication (but the last) we copy the most significant vn
	     limbs into a temporary buffer since that part would otherwise be
	     overwritten by the next multiplication.  After the next
	     multiplication, we add it back.  This illustrates the situation:

                                                    -->vn<--
                                                      |  |<------- un ------->|
                                                         _____________________|
                                                        X                    /|
                                                      /XX__________________/  |
                                    _____________________                     |
                                   X                    /                     |
                                 /XX__________________/                       |
               _____________________                                          |
              /                    /                                          |
            /____________________/                                            |
	    ==================================================================

	    The parts marked with X are the parts whose sums are copied into
	    the temporary buffer.  */

	  mp_limb_t tp[MUL_KARATSUBA_THRESHOLD_LIMIT];
	  mp_limb_t cy;
          ASSERT (MUL_KARATSUBA_THRESHOLD <= MUL_KARATSUBA_THRESHOLD_LIMIT);

	  mpn_mul_basecase (prodp, up, MUL_BASECASE_MAX_UN, vp, vn);
	  prodp += MUL_BASECASE_MAX_UN;
	  MPN_COPY (tp, prodp, vn);		/* preserve high triangle */
	  up += MUL_BASECASE_MAX_UN;
	  un -= MUL_BASECASE_MAX_UN;
	  while (un > MUL_BASECASE_MAX_UN)
	    {
	      mpn_mul_basecase (prodp, up, MUL_BASECASE_MAX_UN, vp, vn);
	      cy = mpn_add_n (prodp, prodp, tp, vn); /* add back preserved triangle */
	      mpn_incr_u (prodp + vn, cy);		/* safe? */
	      prodp += MUL_BASECASE_MAX_UN;
	      MPN_COPY (tp, prodp, vn);		/* preserve high triangle */
	      up += MUL_BASECASE_MAX_UN;
	      un -= MUL_BASECASE_MAX_UN;
	    }
	  if (un > vn)
	    {
	      mpn_mul_basecase (prodp, up, un, vp, vn);
	    }
	  else
	    {
	      ASSERT_ALWAYS (un > 0);
	      mpn_mul_basecase (prodp, vp, vn, up, un);
	    }
	  cy = mpn_add_n (prodp, prodp, tp, vn); /* add back preserved triangle */
	  mpn_incr_u (prodp + vn, cy);		/* safe? */
	}
      return prodp[un + vn - 1];
  }

  if (ABOVE_THRESHOLD (un + vn, 2*MUL_FFT_FULL_THRESHOLD)
      && ABOVE_THRESHOLD (3*vn, MUL_FFT_FULL_THRESHOLD))
    {
      mpn_mul_fft_main (prodp, up, un, vp, vn);
      return prodp[un + vn - 1];
    }

  k = (un + 3)/4; // ceil(un/4)

#if GMP_NUMB_BITS == 32
  if ((ABOVE_THRESHOLD (un + vn, 2*MUL_TOOM8H_THRESHOLD)) && (vn>=86) && (5*un <= 11*vn))
#else
  if ((ABOVE_THRESHOLD (un + vn, 2*MUL_TOOM8H_THRESHOLD)) && (vn>=86) && (4*un <= 13*vn))
#endif
  {
      mpn_toom8h_mul(prodp, up, un, vp, vn);
      return prodp[un + vn - 1];
  }
  
  if (ABOVE_THRESHOLD (un + vn, 2*MUL_TOOM4_THRESHOLD))
  {
          if (vn > 3*k)
          {
             mpn_toom4_mul(prodp, up, un, vp, vn);
             return prodp[un + vn - 1];
          } else
          {
             l = (un + 4)/5; // ceil(un/5)
             if ((((vn > 9*k/4) && (un+vn <= 6*MUL_TOOM4_THRESHOLD)) 
                 || ((vn > 2*l) && (un+vn > 6*MUL_TOOM4_THRESHOLD)))
                 && (vn <= 3*l))
             {
                mpn_toom53_mul(prodp, up, un, vp, vn);
                return prodp[un + vn - 1];
             }
          }
  } 
  
  if (ABOVE_THRESHOLD (un + vn, 2*MUL_TOOM3_THRESHOLD) && (vn > k))
  {
          mp_ptr ws;
          TMP_DECL;
          TMP_MARK;

          if (vn < 2*k) // un/2 >= vn > un/4
          {
                  ws = TMP_ALLOC_LIMBS (MPN_TOOM3_MUL_TSIZE(un));
                  mpn_toom42_mul(prodp, up, un, vp, vn, ws);
                  TMP_FREE;
                  return prodp[un + vn - 1];
          }

          l = (un+2)/3; //ceil(u/3)
          if (vn > 2*l) // un >= vn > 2un/3
          {
                  ws = TMP_ALLOC_LIMBS (MPN_TOOM3_MUL_TSIZE(un));
                  mpn_toom3_mul(prodp, up, un, vp, vn, ws);
                  TMP_FREE;
                  return prodp[un + vn - 1];
          } else // 2un/3 >= vn > un/3
          {
                  ws = TMP_ALLOC_LIMBS (MPN_TOOM3_MUL_TSIZE(un));
                  mpn_toom32_mul(prodp, up, un, vp, vn, ws);
                  TMP_FREE;
                  return prodp[un + vn - 1];
          }
  }

  mpn_mul_n (prodp, up, vp, vn);

  if (un != vn)
    { mp_limb_t t;
      mp_ptr ws;
      TMP_DECL;
      TMP_MARK;

      prodp += vn;
      l = vn;
      up += vn;
      un -= vn;

      if (un < vn)
	{
	  /* Swap u's and v's. */
	  MPN_SRCPTR_SWAP (up,un, vp,vn);
	}

      ws = TMP_ALLOC_LIMBS ((vn >= MUL_KARATSUBA_THRESHOLD ? vn : un) + vn);

      t = 0;
      while (vn >= MUL_KARATSUBA_THRESHOLD)
	{
	  mpn_mul_n (ws, up, vp, vn);
	  if (l <= 2*vn)
	    {
	      t += mpn_add_n (prodp, prodp, ws, l);
	      if (l != 2*vn)
		{
		  t = mpn_add_1 (prodp + l, ws + l, 2*vn - l, t);
		  l = 2*vn;
		}
	    }
	  else
	    {
	      c = mpn_add_n (prodp, prodp, ws, 2*vn);
	      t += mpn_add_1 (prodp + 2*vn, prodp + 2*vn, l - 2*vn, c);
	    }
	  prodp += vn;
	  l -= vn;
	  up += vn;
	  un -= vn;
	  if (un < vn)
	    {
	      /* Swap u's and v's. */
	      MPN_SRCPTR_SWAP (up,un, vp,vn);
	    }
		}

      if (vn != 0)
	{
	  mpn_mul_basecase (ws, up, un, vp, vn);
	  if (l <= un + vn)
	    {
	      t += mpn_add_n (prodp, prodp, ws, l);
	      if (l != un + vn)
		t = mpn_add_1 (prodp + l, ws + l, un + vn - l, t);
	    }
	  else
	    {
	      c = mpn_add_n (prodp, prodp, ws, un + vn);
	      t += mpn_add_1 (prodp + un + vn, prodp + un + vn, l - un - vn, c);
	    }
	}

      TMP_FREE;
  }

  return prodp[un + vn - 1];
}
Beispiel #4
0
int
mpz_congruent_ui_p (mpz_srcptr a, unsigned long cu, unsigned long du)
{
  mp_srcptr  ap;
  mp_size_t  asize;
  mp_limb_t  c, d, r;

  if (UNLIKELY (du == 0))
    return (mpz_cmp_ui (a, cu) == 0);

  asize = SIZ(a);
  if (asize == 0)
    {
      if (cu < du)
        return cu == 0;
      else
        return (cu % du) == 0;
    }

  /* For nails don't try to be clever if c or d is bigger than a limb, just
     fake up some mpz_t's and go to the main mpz_congruent_p.  */
  if (du > GMP_NUMB_MAX || cu > GMP_NUMB_MAX)
    {
      mp_limb_t  climbs[2], dlimbs[2];
      mpz_t      cz, dz;

      ALLOC(cz) = 2;
      PTR(cz) = climbs;
      ALLOC(dz) = 2;
      PTR(dz) = dlimbs;

      mpz_set_ui (cz, cu);
      mpz_set_ui (dz, du);
      return mpz_congruent_p (a, cz, dz);
    }

  /* NEG_MOD works on limbs, so convert ulong to limb */
  c = cu;
  d = du;

  if (asize < 0)
    {
      asize = -asize;
      NEG_MOD (c, c, d);
    }

  ap = PTR (a);

  if (ABOVE_THRESHOLD (asize, BMOD_1_TO_MOD_1_THRESHOLD))
    {
      r = mpn_mod_1 (ap, asize, d);
      if (c < d)
        return r == c;
      else
        return r == (c % d);
    }

  if ((d & 1) == 0)
    {
      /* Strip low zero bits to get odd d required by modexact.  If
         d==e*2^n then a==c mod d if and only if both a==c mod 2^n
         and a==c mod e.  */

      unsigned  twos;

      if ((ap[0]-c) & LOW_ZEROS_MASK (d))
        return 0;

      count_trailing_zeros (twos, d);
      d >>= twos;
    }
Beispiel #5
0
/* Same scratch requirements as for mpn_hgcd. */
mp_size_t
mpn_hgcd_jacobi (mp_ptr ap, mp_ptr bp, mp_size_t n,
                 struct hgcd_matrix *M, unsigned *bitsp, mp_ptr tp)
{
    mp_size_t s = n/2 + 1;

    mp_size_t nn;
    int success = 0;

    if (n <= s)
        /* Happens when n <= 2, a fairly uninteresting case but exercised
           by the random inputs of the testsuite. */
        return 0;

    ASSERT ((ap[n-1] | bp[n-1]) > 0);

    ASSERT ((n+1)/2 - 1 < M->alloc);

    if (ABOVE_THRESHOLD (n, HGCD_THRESHOLD))
    {
        mp_size_t n2 = (3*n)/4 + 1;
        mp_size_t p = n/2;

        nn = mpn_hgcd_jacobi (ap + p, bp + p, n - p, M, bitsp, tp);
        if (nn > 0)
        {
            /* Needs 2*(p + M->n) <= 2*(floor(n/2) + ceil(n/2) - 1)
               = 2 (n - 1) */
            n = mpn_hgcd_matrix_adjust (M, p + nn, ap, bp, p, tp);
            success = 1;
        }
        while (n > n2)
        {
            /* Needs n + 1 storage */
            nn = hgcd_jacobi_step (n, ap, bp, s, M, bitsp, tp);
            if (!nn)
                return success ? n : 0;
            n = nn;
            success = 1;
        }

        if (n > s + 2)
        {
            struct hgcd_matrix M1;
            mp_size_t scratch;

            p = 2*s - n + 1;
            scratch = MPN_HGCD_MATRIX_INIT_ITCH (n-p);

            mpn_hgcd_matrix_init(&M1, n - p, tp);
            nn = mpn_hgcd_jacobi (ap + p, bp + p, n - p, &M1, bitsp, tp + scratch);
            if (nn > 0)
            {
                /* We always have max(M) > 2^{-(GMP_NUMB_BITS + 1)} max(M1) */
                ASSERT (M->n + 2 >= M1.n);

                /* Furthermore, assume M ends with a quotient (1, q; 0, 1),
                then either q or q + 1 is a correct quotient, and M1 will
                 start with either (1, 0; 1, 1) or (2, 1; 1, 1). This
                 rules out the case that the size of M * M1 is much
                 smaller than the expected M->n + M1->n. */

                ASSERT (M->n + M1.n < M->alloc);

                /* Needs 2 (p + M->n) <= 2 (2*s - n2 + 1 + n2 - s - 1)
                = 2*s <= 2*(floor(n/2) + 1) <= n + 2. */
                n = mpn_hgcd_matrix_adjust (&M1, p + nn, ap, bp, p, tp + scratch);

                /* We need a bound for of M->n + M1.n. Let n be the original
                input size. Then

                 ceil(n/2) - 1 >= size of product >= M.n + M1.n - 2

                 and it follows that

                 M.n + M1.n <= ceil(n/2) + 1

                 Then 3*(M.n + M1.n) + 5 <= 3 * ceil(n/2) + 8 is the
                 amount of needed scratch space. */
                mpn_hgcd_matrix_mul (M, &M1, tp + scratch);
                success = 1;
            }
        }
    }

    for (;;)
    {
        /* Needs s+3 < n */
        nn = hgcd_jacobi_step (n, ap, bp, s, M, bitsp, tp);
        if (!nn)
            return success ? n : 0;

        n = nn;
        success = 1;
    }
}
Beispiel #6
0
int
mpz_congruent_p (mpz_srcptr a, mpz_srcptr c, mpz_srcptr d)
{
  mp_size_t  asize, csize, dsize, sign;
  mp_srcptr  ap, cp, dp;
  mp_ptr     xp;
  mp_limb_t  alow, clow, dlow, dmask, r;
  int        result;
  TMP_DECL;

  dsize = SIZ(d);
  if (UNLIKELY (dsize == 0))
    return (mpz_cmp (a, c) == 0);

  dsize = ABS(dsize);
  dp = PTR(d);

  if (ABSIZ(a) < ABSIZ(c))
    MPZ_SRCPTR_SWAP (a, c);

  asize = SIZ(a);
  csize = SIZ(c);
  sign = (asize ^ csize);

  asize = ABS(asize);
  ap = PTR(a);

  if (csize == 0)
    return mpn_divisible_p (ap, asize, dp, dsize);

  csize = ABS(csize);
  cp = PTR(c);

  alow = ap[0];
  clow = cp[0];
  dlow = dp[0];

  /* Check a==c mod low zero bits of dlow.  This might catch a few cases of
     a!=c quickly, and it helps the csize==1 special cases below.  */
  dmask = LOW_ZEROS_MASK (dlow) & GMP_NUMB_MASK;
  alow = (sign >= 0 ? alow : -alow);
  if (((alow-clow) & dmask) != 0)
    return 0;

  if (csize == 1)
    {
      if (dsize == 1)
	{
	cong_1:
	  if (sign < 0)
	    NEG_MOD (clow, clow, dlow);

	  if (ABOVE_THRESHOLD (asize, BMOD_1_TO_MOD_1_THRESHOLD))
	    {
	      r = mpn_mod_1 (ap, asize, dlow);
	      if (clow < dlow)
		return r == clow;
	      else
		return r == (clow % dlow);
	    }

	  if ((dlow & 1) == 0)
	    {
	      /* Strip low zero bits to get odd d required by modexact.  If
		 d==e*2^n then a==c mod d if and only if both a==c mod e and
		 a==c mod 2^n, the latter having been done above.  */
	      unsigned	twos;
	      count_trailing_zeros (twos, dlow);
	      dlow >>= twos;
	    }

	  r = mpn_modexact_1c_odd (ap, asize, dlow, clow);
	  return r == 0 || r == dlow;
	}
Beispiel #7
0
mp_size_t
mpn_gcdext (mp_ptr gp, mp_ptr s0p, mp_size_t *s0size,
	    mp_ptr ap, mp_size_t an, mp_ptr bp, mp_size_t n)
{
  mp_size_t init_scratch, orig_n = n;
  mp_size_t scratch, un, u0n, u1n;
  mp_limb_t t;
  mp_ptr tp, u0, u1;
  int swapped = 0;
    struct ngcd_matrix M;
    mp_size_t p;
    mp_size_t nn;
  mp_limb_signed_t a;
  int c;
  TMP_DECL;
  
  ASSERT (an >= n);
  
  if (an == 1)
  {
    if (!n)
    {
       /* shouldn't ever occur, but we include for completeness */
		gp[0] = ap[0];
       s0p[0] = 1;
       *s0size = 1;
       
	   return 1;
    }
    
	gp[0] = mpn_gcdinv_1(&a, ap[0], bp[0]);
    if (a < (mp_limb_signed_t) 0)
	{
	   s0p[0] = -a;
       (*s0size) = -1;
	} else
    {
	   s0p[0] = a;
       (*s0size) = 1 - (s0p[0] == 0);
	}
	
	return 1;
  }

  init_scratch = MPN_NGCD_MATRIX_INIT_ITCH (n-P_SIZE(n));
  scratch = mpn_nhgcd_itch ((n+1)/2);

  /* Space needed for mpn_ngcd_matrix_adjust */
  if (scratch < 2*n)
    scratch = 2*n;
  if (scratch < an - n + 1) /* the first division can sometimes be selfish!! */
	 scratch = an - n + 1;

 /* Space needed for cofactor adjust */
  scratch = MAX(scratch, 2*(n+1) + P_SIZE(n) + 1);

  TMP_MARK;
  
  if (5*n + 2 + MPN_GCD_LEHMER_N_ITCH(n) > init_scratch + scratch) 
    tp = TMP_ALLOC_LIMBS (7*n+4+MPN_GCD_LEHMER_N_ITCH(n)); /* 2n+2 for u0, u1, 5*n+2 + MPN_GCD_LEHMER_N_ITCH(n) for Lehmer
                                                              and copies of ap and bp and s (and finally 3*n+1 for t and get_t) */
  else
    tp = TMP_ALLOC_LIMBS (2*(n+1) + init_scratch + scratch);
    
  if (an > n)
    {
      mp_ptr qp = tp;

      mpn_tdiv_qr (qp, ap, 0, ap, an, bp, n);
      
      an = n;
      MPN_NORMALIZE (ap, an);
      if (an == 0)
	{	  
	  MPN_COPY (gp, bp, n);
	  TMP_FREE;
	  (*s0size) = 0;
	  
	  return n;
	}
    }
    
    if (BELOW_THRESHOLD (n, GCDEXT_THRESHOLD))
    {
      n = mpn_ngcdext_lehmer (gp, s0p, s0size, ap, bp, n, tp);
      TMP_FREE;
      
	  return n;
    }
  
    u0 = tp; /* Cofactor space */
    u1 = tp + n + 1;

    MPN_ZERO(tp, 2*(n+1));

    tp += 2*(n+1);
  
    /* First iteration, setup u0 and u1 */

    p = P_SIZE(n);
  
    mpn_ngcd_matrix_init (&M, n - p, tp);
	 ASSERT(tp + init_scratch > M.p[1][1] + M.n);
	 nn = mpn_nhgcd (ap + p, bp + p, n - p, &M, tp + init_scratch);
  if (nn > 0)
	 {
		 n = mpn_ngcd_matrix_adjust (&M, p + nn, ap, bp, p, tp + init_scratch);
		 
		 /* 
            (ap'', bp'')^T = M^-1(ap', bp')^T 
		    and (ap', bp') = (1*ap + ?*bp, 0*ap + ?*bp) 
		    We let u0 be minus the factor of ap appearing 
            in the expression for bp'' and u1 be the 
            factor of ap appearing in the expression for ap''
        */

       MPN_COPY(u0, M.p[1][0], M.n);
	    MPN_COPY(u1, M.p[1][1], M.n);

	    un = M.n;
	    while ((u0[un-1] == 0) && (u1[un-1] == 0)) un--; /* normalise u0, u1, both cannot be zero as det = 1*/
     }
  else	
	 {
	   mp_size_t gn;

		un = 1;
	   u0[0] = 0; /* bp = 0*ap + ?*bp, thus u0 = -0 */
	   u1[0] = 1; /* ap = 1*ap + ?*bp, thus u1 = 1 */
   
	   n = mpn_ngcdext_subdiv_step (gp, &gn, s0p, u0, u1, &un, ap, bp, n, tp);
	 if (n == 0)
	   {
	      /* never observed to occur */
		   (*s0size) = un;
			ASSERT(s0p[*s0size - 1] != 0);
		   TMP_FREE;
	       
		   return gn;
	   }
	 } 

  while (ABOVE_THRESHOLD (n, GCDEXT_THRESHOLD))
    {
      struct ngcd_matrix M;
      mp_size_t p = P_SIZE(n);
      mp_size_t nn;
      
      mpn_ngcd_matrix_init (&M, n - p, tp);
      nn = mpn_nhgcd (ap + p, bp + p, n - p, &M, tp + init_scratch);
		if (nn > 0)
	{
	   n = mpn_ngcd_matrix_adjust (&M, p + nn, ap, bp, p, tp + init_scratch);

		ngcdext_cofactor_adjust(u0, u1, &un, &M, tp + init_scratch);
		
		/* 
            (ap'', bp'')^T = M^-1(ap', bp')^T 
		    and (ap', bp') = (u1*ap + ?*bp, -u0*ap + ?*bp) 
		    So we need u0' = -(-c*u1 + a*-u0) = a*u0 + c*u1
            and we need u1' = (d*u1 -b*-u0) = b*u0 + d*u1 
        */

     
		ASSERT(un <= orig_n + 1);

	}  else	
	{
	  mp_size_t gn;
	  n = mpn_ngcdext_subdiv_step (gp, &gn, s0p, u0, u1, &un, ap, bp, n, tp);
	  ASSERT(un <= orig_n + 1);
	  if (n == 0)
	    {
	      (*s0size) = un;
			ASSERT(((*s0size) == 0) || (s0p[ABS(*s0size) - 1] != 0));
		   TMP_FREE;
		   
		   return gn;
	    }
	}
    }

  ASSERT (ap[n-1] > 0 || bp[n-1] > 0);
  ASSERT (u0[un-1] > 0 || u1[un-1] > 0);

  if (ap[n-1] < bp[n-1])
  {
	  MP_PTR_SWAP (ap, bp);
	  MP_PTR_SWAP (u0, u1);
	  swapped = 1;
  }
   
  an = n; /* {ap, an} and {bp, bn} are normalised, {ap, an} >= {bp, bn} */
  MPN_NORMALIZE (bp, n);

  if (n == 0)
    {
      /* If bp == 0 then gp = ap
		   with cofactor u1
			If we swapped then cofactor is -u1
			This case never seems to happen
		*/
		MPN_COPY (gp, ap, an);
		MPN_NORMALIZE(u1, un);
		MPN_COPY(s0p, u1, un);
      (*s0size) = un;
		if (swapped) (*s0size) = -(*s0size);
      TMP_FREE;
      
	  return an;
    }

  /* 
     If at this point we have s*ap' + t*bp' = gp where gp is the gcd
	  and (ap', bp') = (u1*ap + ?*bp, -u0*ap + ?*bp)
	  then gp = s*u1*ap - t*u0*ap + ?*bp
	  and the cofactor we want is (s*u1-t*u0).

	  First there is the special case u0 = 0, u1 = 1 in which case we do not need 
	  to compute t...
  */
    
  ASSERT(u1 + un <= tp);
  u0n = un;
  MPN_NORMALIZE(u0, u0n);  /* {u0, u0n} is now normalised */

  if (u0n == 0) /* u1 = 1 case is rare*/
  {
	  mp_size_t gn;
	 
	  gn = mpn_ngcdext_lehmer (gp, s0p, s0size, ap, bp, n, tp);
	  if (swapped) (*s0size) = -(*s0size);
	  TMP_FREE;
	  
	  return gn;
  }
  else
  {
	  /* Compute final gcd. */
  
	  mp_size_t gn, sn, tn;
	  mp_ptr s, t;
	  mp_limb_t cy;
	  int negate = 0;
	  
      /* Save an, bn first as gcdext destroys inputs */
	  s = tp;
	  tp += an;
	  
     MPN_COPY(tp, ap, an);
	  MPN_COPY(tp + an, bp, an);
	  
	  if (mpn_cmp(tp, tp + an, an) == 0) 
	  {
	     /* gcd is tp or tp + an 
		    return smallest cofactor, either -u0 or u1
		 */
	     gn = an;
		 MPN_NORMALIZE(tp, gn);
		 MPN_COPY(gp, tp, gn);
		 
		 MPN_CMP(c, u0, u1, un);
		 if (c < (mp_limb_signed_t) 0)
		 {
		    MPN_COPY(s0p, u0, u0n);
			(*s0size) = -u0n;
		 } else
		 {
		    MPN_NORMALIZE(u1, un);
			MPN_COPY(s0p, u1, un);
			(*s0size) = un;
		 }
		 TMP_FREE;
		  
		 return gn;
	  }

      gn = mpn_ngcdext_lehmer (gp, s, &sn, tp, tp + an, an, tp + 2*an);
      
	  /* Special case, s == 0, t == 1, cofactor = -u0 case is rare*/

	  if (sn == 0)
	  {
		  MPN_COPY(s0p, u0, u0n);
		  (*s0size) = -u0n;
		  if (swapped) (*s0size) = -(*s0size);
		  TMP_FREE;
		  
		  return gn;
	  }

	  /* We'll need the other cofactor t = (gp - s*ap)/bp 
		*/

	  t = tp;
	  tp += (an + 1);
		 
	  gcdext_get_t(t, &tn, gp, gn, ap, an, bp, n, s, sn, tp);

	  ASSERT((tn == 0) || (t[tn - 1] > 0)); /* {t, tn} is normalised */

	  ASSERT(tn <= an + 1);

	  /* We want to compute s*u1 - t*u0, so if s is negative
	     t will be positive, so we'd be dealing with negative
		  numbers. We fix that here.
	  */

	  if (sn < 0)
	  {
		  sn = -sn;
		  negate = 1;
	  }

	  /* Now we can deal with the special case u1 = 0 */

	  u1n = un; 
	  MPN_NORMALIZE(u1, u1n); /* {u1, u1n} is now normalised */
     
	  if (u1n == 0) /* case is rare */
	  {
		  MPN_COPY(s0p, t, tn);
		  (*s0size) = -tn;
		  if (swapped ^ negate) (*s0size) = -(*s0size);
		  TMP_FREE;
		  
		  return gn;
	  }

	  /* t may be zero, but we need to compute s*u1 anyway */
	  if (sn >= u1n)
		  mpn_mul(s0p, s, sn, u1, u1n);
	  else
		  mpn_mul(s0p, u1, u1n, s, sn);

	  (*s0size) = sn + u1n;
	  (*s0size) -= (s0p[sn + u1n - 1] == 0);

	  ASSERT(s0p[*s0size - 1] > 0); /* {s0p, *s0size} is normalised now */

	  if (tn == 0) /* case is rare */
	  {
		  if (swapped ^ negate) (*s0size) = -(*s0size);
        TMP_FREE;
	    
		return gn;
	  }

	  /* Now compute the rest of the cofactor, t*u0
	     and subtract it
		  We're done with u1 and s which happen to be
		  consecutive, so use that space
	  */

	  ASSERT(u1 + tn + u0n <= t);

     if (tn > u0n)
		  mpn_mul(u1, t, tn, u0, u0n);
	  else
		  mpn_mul(u1, u0, u0n, t, tn);

	  u1n = tn + u0n;
	  u1n -= (u1[tn + u0n - 1] == 0);

	  ASSERT(u1[u1n - 1] > 0);

	  /* Recall t is now negated so s*u1 - t*u0 
	     involves an *addition* 
	  */

	  if ((*s0size) >= u1n)
	  {
		  cy = mpn_add(s0p, s0p, *s0size, u1, u1n);
		  if (cy) s0p[(*s0size)++] = cy;
	  }
	  else
	  {
		  cy = mpn_add(s0p, u1, u1n, s0p, *s0size);
        (*s0size) = u1n;
	     if (cy) s0p[(*s0size)++] = cy;
	  }

	  if (swapped ^ negate) (*s0size) = -(*s0size);
     TMP_FREE;  
     
	 return gn;
  }
}
Beispiel #8
0
int
mpn_jacobi_n (mp_ptr ap, mp_ptr bp, mp_size_t n, unsigned bits)
{
  mp_size_t scratch;
  mp_size_t matrix_scratch;
  mp_ptr tp;

  TMP_DECL;

  ASSERT (n > 0);
  ASSERT ( (ap[n-1] | bp[n-1]) > 0);
  ASSERT ( (bp[0] | ap[0]) & 1);

  /* FIXME: Check for small sizes first, before setting up temporary
     storage etc. */
  scratch = MPN_GCD_SUBDIV_STEP_ITCH(n);

  if (ABOVE_THRESHOLD (n, GCD_DC_THRESHOLD))
    {
      mp_size_t hgcd_scratch;
      mp_size_t update_scratch;
      mp_size_t p = CHOOSE_P (n);
      mp_size_t dc_scratch;

      matrix_scratch = MPN_HGCD_MATRIX_INIT_ITCH (n - p);
      hgcd_scratch = mpn_hgcd_itch (n - p);
      update_scratch = p + n - 1;

      dc_scratch = matrix_scratch + MAX(hgcd_scratch, update_scratch);
      if (dc_scratch > scratch)
	scratch = dc_scratch;
    }

  TMP_MARK;
  tp = TMP_ALLOC_LIMBS(scratch);

  while (ABOVE_THRESHOLD (n, JACOBI_DC_THRESHOLD))
    {
      struct hgcd_matrix M;
      mp_size_t p = 2*n/3;
      mp_size_t matrix_scratch = MPN_HGCD_MATRIX_INIT_ITCH (n - p);
      mp_size_t nn;
      mpn_hgcd_matrix_init (&M, n - p, tp);

      nn = mpn_hgcd_jacobi (ap + p, bp + p, n - p, &M, &bits,
			    tp + matrix_scratch);
      if (nn > 0)
	{
	  ASSERT (M.n <= (n - p - 1)/2);
	  ASSERT (M.n + p <= (p + n - 1) / 2);
	  /* Temporary storage 2 (p + M->n) <= p + n - 1. */
	  n = mpn_hgcd_matrix_adjust (&M, p + nn, ap, bp, p, tp + matrix_scratch);
	}
      else
	{
	  /* Temporary storage n */
	  n = mpn_gcd_subdiv_step (ap, bp, n, 0, jacobi_hook, &bits, tp);
	  if (!n)
	    {
	      TMP_FREE;
	      return bits == BITS_FAIL ? 0 : mpn_jacobi_finish (bits);
	    }
	}
    }

  while (n > 2)
    {
      struct hgcd_matrix1 M;
      mp_limb_t ah, al, bh, bl;
      mp_limb_t mask;

      mask = ap[n-1] | bp[n-1];
      ASSERT (mask > 0);

      if (mask & GMP_NUMB_HIGHBIT)
	{
	  ah = ap[n-1]; al = ap[n-2];
	  bh = bp[n-1]; bl = bp[n-2];
	}
      else
	{
	  int shift;

	  count_leading_zeros (shift, mask);
	  ah = MPN_EXTRACT_NUMB (shift, ap[n-1], ap[n-2]);
	  al = MPN_EXTRACT_NUMB (shift, ap[n-2], ap[n-3]);
	  bh = MPN_EXTRACT_NUMB (shift, bp[n-1], bp[n-2]);
	  bl = MPN_EXTRACT_NUMB (shift, bp[n-2], bp[n-3]);
	}

      /* Try an mpn_nhgcd2 step */
      if (mpn_hgcd2_jacobi (ah, al, bh, bl, &M, &bits))
	{
	  n = mpn_matrix22_mul1_inverse_vector (&M, tp, ap, bp, n);
	  MP_PTR_SWAP (ap, tp);
	}
      else
	{
	  /* mpn_hgcd2 has failed. Then either one of a or b is very
	     small, or the difference is very small. Perform one
	     subtraction followed by one division. */
	  n = mpn_gcd_subdiv_step (ap, bp, n, 0, &jacobi_hook, &bits, tp);
	  if (!n)
	    {
	      TMP_FREE;
	      return bits == BITS_FAIL ? 0 : mpn_jacobi_finish (bits);
	    }
	}
    }

  if (bits >= 16)
    MP_PTR_SWAP (ap, bp);

  ASSERT (bp[0] & 1);

  if (n == 1)
    {
      mp_limb_t al, bl;
      al = ap[0];
      bl = bp[0];

      TMP_FREE;
      if (bl == 1)
	return 1 - 2*(bits & 1);
      else
	return mpn_jacobi_base (al, bl, bits << 1);
    }

  else
    {
      int res = mpn_jacobi_2 (ap, bp, bits & 1);
      TMP_FREE;
      return res;
    }
}
Beispiel #9
0
mp_limb_t
mpn_sb_divrem_mn (mp_ptr qp,
		  mp_ptr np, mp_size_t nn,
		  mp_srcptr dp, mp_size_t dn)
{
  mp_limb_t most_significant_q_limb = 0;
  mp_size_t qn = nn - dn;
  mp_size_t i;
  mp_limb_t dx, d1, n0;
  mp_limb_t dxinv;
  int use_preinv;

  ASSERT (dn > 2);
  ASSERT (nn >= dn);
  ASSERT (dp[dn-1] & GMP_NUMB_HIGHBIT);
  ASSERT (! MPN_OVERLAP_P (np, nn, dp, dn));
  ASSERT (! MPN_OVERLAP_P (qp, nn-dn, dp, dn));
  ASSERT (! MPN_OVERLAP_P (qp, nn-dn, np, nn) || qp+dn >= np);
  ASSERT_MPN (np, nn);
  ASSERT_MPN (dp, dn);

  np += qn;
  dx = dp[dn - 1];
  d1 = dp[dn - 2];
  n0 = np[dn - 1];

  if (n0 >= dx)
    {
      if (n0 > dx || mpn_cmp (np, dp, dn - 1) >= 0)
	{
	  mpn_sub_n (np, np, dp, dn);
	  most_significant_q_limb = 1;
	}
    }

  use_preinv = ABOVE_THRESHOLD (qn, DIV_SB_PREINV_THRESHOLD);
  if (use_preinv)
    invert_limb (dxinv, dx);

  for (i = qn - 1; i >= 0; i--)
    {
      mp_limb_t q;
      mp_limb_t nx;
      mp_limb_t cy_limb;

      nx = np[dn - 1];		/* FIXME: could get value from r1 */
      np--;

      if (nx == dx)
	{
	  /* This might over-estimate q, but it's probably not worth
	     the extra code here to find out.  */
	  q = GMP_NUMB_MASK;

#if 1
	  cy_limb = mpn_submul_1 (np, dp, dn, q);
#else
	  /* This should be faster on many machines */
	  cy_limb = mpn_sub_n (np + 1, np + 1, dp, dn);
	  cy = mpn_add_n (np, np, dp, dn);
	  np[dn] += cy;
#endif

	  if (nx != cy_limb)
	    {
	      mpn_add_n (np, np, dp, dn);
	      q--;
	    }

	  qp[i] = q;
	}
      else
	{
	  mp_limb_t rx, r1, r0, p1, p0;

	  /* "workaround" avoids a problem with gcc 2.7.2.3 i386 register usage
	     when np[dn-1] is used in an asm statement like umul_ppmm in
	     udiv_qrnnd_preinv.  The symptom is seg faults due to registers
	     being clobbered.  gcc 2.95 i386 doesn't have the problem. */
	  {
	    mp_limb_t  workaround = np[dn - 1];
	    if (CACHED_ABOVE_THRESHOLD (use_preinv, DIV_SB_PREINV_THRESHOLD))
	      udiv_qrnnd_preinv (q, r1, nx, workaround, dx, dxinv);
	    else
	      {
		udiv_qrnnd (q, r1, nx, workaround << GMP_NAIL_BITS,
			    dx << GMP_NAIL_BITS);
		r1 >>= GMP_NAIL_BITS;
	      }
	  }
	  umul_ppmm (p1, p0, d1, q << GMP_NAIL_BITS);
	  p0 >>= GMP_NAIL_BITS;

	  r0 = np[dn - 2];
	  rx = 0;
	  if (r1 < p1 || (r1 == p1 && r0 < p0))
	    {
	      p1 -= p0 < d1;
	      p0 = (p0 - d1) & GMP_NUMB_MASK;
	      q--;
	      r1 = (r1 + dx) & GMP_NUMB_MASK;
	      rx = r1 < dx;
	    }

	  p1 += r0 < p0;	/* cannot carry! */
	  rx -= r1 < p1;	/* may become 11..1 if q is still too large */
	  r1 = (r1 - p1) & GMP_NUMB_MASK;
	  r0 = (r0 - p0) & GMP_NUMB_MASK;

	  cy_limb = mpn_submul_1 (np, dp, dn - 2, q);

	  /* Check if we've over-estimated q, and adjust as needed.  */
	  {
	    mp_limb_t cy1, cy2;
	    cy1 = r0 < cy_limb;
	    r0 = (r0 - cy_limb) & GMP_NUMB_MASK;
	    cy2 = r1 < cy1;
	    r1 -= cy1;
	    np[dn - 1] = r1;
	    np[dn - 2] = r0;
	    if (cy2 != rx)
	      {
		mpn_add_n (np, np, dp, dn);
		q--;
	      }
	  }
	  qp[i] = q;
	}
    }

  /* ______ ______ ______
    |__rx__|__r1__|__r0__|		partial remainder
	    ______ ______
	 - |__p1__|__p0__|		partial product to subtract
	    ______ ______
	 - |______|cylimb|

     rx is -1, 0 or 1.  If rx=1, then q is correct (it should match
     carry out).  If rx=-1 then q is too large.  If rx=0, then q might
     be too large, but it is most likely correct.
  */

  return most_significant_q_limb;
}