Esempio n. 1
0
/* FIXME: Document storage need. */
mp_size_t
mpn_hgcd_reduce (struct hgcd_matrix *M,
		 mp_ptr ap, mp_ptr bp, mp_size_t n, mp_size_t p,
		 mp_ptr tp)
{
  mp_size_t nn;
  if (BELOW_THRESHOLD (n, HGCD_REDUCE_THRESHOLD))
    {
      nn = mpn_hgcd (ap + p, bp + p, n - p, M, tp);
      if (nn > 0)
	/* Needs 2*(p + M->n) <= 2*(floor(n/2) + ceil(n/2) - 1)
	   = 2 (n - 1) */
	return mpn_hgcd_matrix_adjust (M, p + nn, ap, bp, p, tp);
    }
  else
    {
      MPN_COPY (tp, ap + p, n - p);
      MPN_COPY (tp + n - p, bp + p, n - p);
      if (mpn_hgcd_appr (tp, tp + n - p, n - p, M, tp + 2*(n-p)))
	return hgcd_matrix_apply (M, ap, bp, n);
    }
  return 0;
}
Esempio n. 2
0
static mp_size_t
one_test (mpz_t a, mpz_t b, int i)
{
  struct hgcd_matrix hgcd;
  struct hgcd_ref ref;

  mpz_t ref_r0;
  mpz_t ref_r1;
  mpz_t hgcd_r0;
  mpz_t hgcd_r1;

  mp_size_t res[2];
  mp_size_t asize;
  mp_size_t bsize;

  mp_size_t hgcd_init_scratch;
  mp_size_t hgcd_scratch;

  mp_ptr hgcd_init_tp;
  mp_ptr hgcd_tp;

  asize = a->_mp_size;
  bsize = b->_mp_size;

  ASSERT (asize >= bsize);

  hgcd_init_scratch = MPN_HGCD_MATRIX_INIT_ITCH (asize);
  hgcd_init_tp = refmpn_malloc_limbs (hgcd_init_scratch);
  mpn_hgcd_matrix_init (&hgcd, asize, hgcd_init_tp);

  hgcd_scratch = mpn_hgcd_itch (asize);
  hgcd_tp = refmpn_malloc_limbs (hgcd_scratch);

#if 0
  fprintf (stderr,
	   "one_test: i = %d asize = %d, bsize = %d\n",
	   i, a->_mp_size, b->_mp_size);

  gmp_fprintf (stderr,
	       "one_test: i = %d\n"
	       "  a = %Zx\n"
	       "  b = %Zx\n",
	       i, a, b);
#endif
  hgcd_ref_init (&ref);

  mpz_init_set (ref_r0, a);
  mpz_init_set (ref_r1, b);
  res[0] = hgcd_ref (&ref, ref_r0, ref_r1);

  mpz_init_set (hgcd_r0, a);
  mpz_init_set (hgcd_r1, b);
  if (bsize < asize)
    {
      _mpz_realloc (hgcd_r1, asize);
      MPN_ZERO (hgcd_r1->_mp_d + bsize, asize - bsize);
    }
  res[1] = mpn_hgcd (hgcd_r0->_mp_d,
		     hgcd_r1->_mp_d,
		     asize,
		     &hgcd, hgcd_tp);

  if (res[0] != res[1])
    {
      fprintf (stderr, "ERROR in test %d\n", i);
      fprintf (stderr, "Different return value from hgcd and hgcd_ref\n");
      fprintf (stderr, "op1=");                 debug_mp (a, -16);
      fprintf (stderr, "op2=");                 debug_mp (b, -16);
      fprintf (stderr, "hgcd_ref: %ld\n", (long) res[0]);
      fprintf (stderr, "mpn_hgcd: %ld\n", (long) res[1]);
      abort ();
    }
  if (res[0] > 0)
    {
      if (!hgcd_ref_equal (&hgcd, &ref)
	  || !mpz_mpn_equal (ref_r0, hgcd_r0->_mp_d, res[1])
	  || !mpz_mpn_equal (ref_r1, hgcd_r1->_mp_d, res[1]))
	{
	  fprintf (stderr, "ERROR in test %d\n", i);
	  fprintf (stderr, "mpn_hgcd and hgcd_ref returned different values\n");
	  fprintf (stderr, "op1=");                 debug_mp (a, -16);
	  fprintf (stderr, "op2=");                 debug_mp (b, -16);
	  abort ();
	}
    }

  refmpn_free_limbs (hgcd_init_tp);
  refmpn_free_limbs (hgcd_tp);
  hgcd_ref_clear (&ref);
  mpz_clear (ref_r0);
  mpz_clear (ref_r1);
  mpz_clear (hgcd_r0);
  mpz_clear (hgcd_r1);

  return res[0];
}
Esempio n. 3
0
File: gcdext.c Progetto: qsnake/mpir
mp_size_t
mpn_gcdext (mp_ptr gp, mp_ptr up, mp_size_t *usizep,
	    mp_ptr ap, mp_size_t an, mp_ptr bp, mp_size_t n)
{
  mp_size_t talloc;
  mp_size_t scratch;
  mp_size_t matrix_scratch;
  mp_size_t ualloc = n + 1;

  mp_size_t un;
  mp_ptr u0;
  mp_ptr u1;

  mp_ptr tp;

  TMP_DECL;

  ASSERT (an >= n);
  ASSERT (n > 0);

  TMP_MARK;

  /* FIXME: Check for small sizes first, before setting up temporary
     storage etc. */
  talloc = MPN_GCDEXT_LEHMER_N_ITCH(n);

  /* For initial division */
  scratch = an - n + 1;
  if (scratch > talloc)
    talloc = scratch;

  if (ABOVE_THRESHOLD (n, GCDEXT_DC_THRESHOLD))
    {
      /* For hgcd loop. */
      mp_size_t hgcd_scratch;
      mp_size_t update_scratch;
      mp_size_t p1 = CHOOSE_P_1 (n);
      mp_size_t p2 = CHOOSE_P_2 (n);
      mp_size_t min_p = MIN(p1, p2);
      mp_size_t max_p = MAX(p1, p2);
      matrix_scratch = MPN_HGCD_MATRIX_INIT_ITCH (n - min_p);
      hgcd_scratch = mpn_hgcd_itch (n - min_p);
      update_scratch = max_p + n - 1;

      scratch = matrix_scratch + MAX(hgcd_scratch, update_scratch);
      if (scratch > talloc)
	talloc = scratch;

      /* Final mpn_gcdext_lehmer_n call. Need space for u and for
	 copies of a and b. */
      scratch = MPN_GCDEXT_LEHMER_N_ITCH (GCDEXT_DC_THRESHOLD)
	+ 3*GCDEXT_DC_THRESHOLD;

      if (scratch > talloc)
	talloc = scratch;

      /* Cofactors u0 and u1 */
      talloc += 2*(n+1);
    }

  tp = TMP_ALLOC_LIMBS(talloc);

  if (an > n)
    {
      mpn_tdiv_qr (tp, ap, 0, ap, an, bp, n);

      if (mpn_zero_p (ap, n))
	{
	  MPN_COPY (gp, bp, n);
	  *usizep = 0;
	  TMP_FREE;
	  return n;
	}
    }

  if (BELOW_THRESHOLD (n, GCDEXT_DC_THRESHOLD))
    {
      mp_size_t gn = mpn_gcdext_lehmer_n(gp, up, usizep, ap, bp, n, tp);

      TMP_FREE;
      return gn;
    }

  MPN_ZERO (tp, 2*ualloc);
  u0 = tp; tp += ualloc;
  u1 = tp; tp += ualloc;

  {
    /* For the first hgcd call, there are no u updates, and it makes
       some sense to use a different choice for p. */

    /* FIXME: We could trim use of temporary storage, since u0 and u1
       are not used yet. For the hgcd call, we could swap in the u0
       and u1 pointers for the relevant matrix elements. */

    struct hgcd_matrix M;
    mp_size_t p = CHOOSE_P_1 (n);
    mp_size_t nn;

    mpn_hgcd_matrix_init (&M, n - p, tp);
    nn = mpn_hgcd (ap + p, bp + p, n - p, &M, tp + matrix_scratch);
    if (nn > 0)
      {
	ASSERT (M.n <= (n - p - 1)/2);
	ASSERT (M.n + p <= (p + n - 1) / 2);

	/* Temporary storage 2 (p + M->n) <= p + n - 1 */
	n = mpn_hgcd_matrix_adjust (&M, p + nn, ap, bp, p, tp + matrix_scratch);

	MPN_COPY (u0, M.p[1][0], M.n);
	MPN_COPY (u1, M.p[1][1], M.n);
	un = M.n;
	while ( (u0[un-1] | u1[un-1] ) == 0)
	  un--;
      }
    else
      {
	/* mpn_hgcd has failed. Then either one of a or b is very
	   small, or the difference is very small. Perform one
	   subtraction followed by one division. */
	mp_size_t gn;
	mp_size_t updated_un = 1;

	u1[0] = 1;

	/* Temporary storage 2n + 1 */
	n = mpn_gcdext_subdiv_step (gp, &gn, up, usizep, ap, bp, n,
				    u0, u1, &updated_un, tp, tp + n);
	if (n == 0)
	  {
	    TMP_FREE;
	    return gn;
	  }

	un = updated_un;
	ASSERT (un < ualloc);
      }
  }

  while (ABOVE_THRESHOLD (n, GCDEXT_DC_THRESHOLD))
    {
      struct hgcd_matrix M;
      mp_size_t p = CHOOSE_P_2 (n);
      mp_size_t nn;

      mpn_hgcd_matrix_init (&M, n - p, tp);
      nn = mpn_hgcd (ap + p, bp + p, n - p, &M, tp + matrix_scratch);
      if (nn > 0)
	{
	  mp_ptr t0;

	  t0 = tp + matrix_scratch;
	  ASSERT (M.n <= (n - p - 1)/2);
	  ASSERT (M.n + p <= (p + n - 1) / 2);

	  /* Temporary storage 2 (p + M->n) <= p + n - 1 */
	  n = mpn_hgcd_matrix_adjust (&M, p + nn, ap, bp, p, t0);

	  /* By the same analysis as for mpn_hgcd_matrix_mul */
	  ASSERT (M.n + un <= ualloc);

	  /* FIXME: This copying could be avoided by some swapping of
	   * pointers. May need more temporary storage, though. */
	  MPN_COPY (t0, u0, un);

	  /* Temporary storage ualloc */
	  un = hgcd_mul_matrix_vector (&M, u0, t0, u1, un, t0 + un);

	  ASSERT (un < ualloc);
	  ASSERT ( (u0[un-1] | u1[un-1]) > 0);
	}
      else
	{
	  /* mpn_hgcd has failed. Then either one of a or b is very
	     small, or the difference is very small. Perform one
	     subtraction followed by one division. */
	  mp_size_t gn;
	  mp_size_t updated_un = un;

	  /* Temporary storage 2n + 1 */
	  n = mpn_gcdext_subdiv_step (gp, &gn, up, usizep, ap, bp, n,
				      u0, u1, &updated_un, tp, tp + n);
	  if (n == 0)
	    {
	      TMP_FREE;
	      return gn;
	    }

	  un = updated_un;
	  ASSERT (un < ualloc);
	}
    }

  if (UNLIKELY (mpn_cmp (ap, bp, n) == 0))
    {
      /* Must return the smallest cofactor, +u1 or -u0 */
      int c;

      MPN_COPY (gp, ap, n);

      MPN_CMP (c, u0, u1, un);
      ASSERT (c != 0);
      if (c < 0)
	{
	  MPN_NORMALIZE (u0, un);
	  MPN_COPY (up, u0, un);
	  *usizep = -un;
	}
      else
	{
	  MPN_NORMALIZE_NOT_ZERO (u1, un);
	  MPN_COPY (up, u1, un);
	  *usizep = un;
	}

      TMP_FREE;
      return n;
    }
  else if (mpn_zero_p (u0, un))
    {
      mp_size_t gn;
      ASSERT (un == 1);
      ASSERT (u1[0] == 1);

      /* g = u a + v b = (u u1 - v u0) A + (...) B = u A + (...) B */
      gn = mpn_gcdext_lehmer_n (gp, up, usizep, ap, bp, n, tp);

      TMP_FREE;
      return gn;
    }
  else
    {
      /* We have A = ... a + ... b
		 B =  u0 a +  u1 b

		 a = u1  A + ... B
		 b = -u0 A + ... B

	 with bounds

	   |u0|, |u1| <= B / min(a, b)

	 Compute g = u a + v b = (u u1 - v u0) A + (...) B
	 Here, u, v are bounded by

	 |u| <= b,
	 |v| <= a
      */

      mp_size_t u0n;
      mp_size_t u1n;
      mp_size_t lehmer_un;
      mp_size_t lehmer_vn;
      mp_size_t gn;

      mp_ptr lehmer_up;
      mp_ptr lehmer_vp;
      int negate;

      lehmer_up = tp; tp += n;

      /* Call mpn_gcdext_lehmer_n with copies of a and b. */
      MPN_COPY (tp, ap, n);
      MPN_COPY (tp + n, bp, n);
      gn = mpn_gcdext_lehmer_n (gp, lehmer_up, &lehmer_un, tp, tp + n, n, tp + 2*n);

      u0n = un;
      MPN_NORMALIZE (u0, u0n);
      if (lehmer_un == 0)
	{
	  /* u == 0  ==>  v = g / b == 1  ==> g = - u0 A + (...) B */
	  MPN_COPY (up, u0, u0n);
	  *usizep = -u0n;

	  TMP_FREE;
	  return gn;
	}

      lehmer_vp = tp;
      /* Compute v = (g - u a) / b */
      lehmer_vn = compute_v (lehmer_vp,
			     ap, bp, n, gp, gn, lehmer_up, lehmer_un, tp + n + 1);

      if (lehmer_un > 0)
	negate = 0;
      else
	{
	  lehmer_un = -lehmer_un;
	  negate = 1;
	}

      u1n = un;
      MPN_NORMALIZE (u1, u1n);

      /* It's possible that u0 = 1, u1 = 0 */
      if (u1n == 0)
	{
	  ASSERT (un == 1);
	  ASSERT (u0[0] == 1);

	  /* u1 == 0 ==> u u1 + v u0 = v */
	  MPN_COPY (up, lehmer_vp, lehmer_vn);
	  *usizep = negate ? lehmer_vn : - lehmer_vn;

	  TMP_FREE;
	  return gn;
	}

      ASSERT (lehmer_un + u1n <= ualloc);
      ASSERT (lehmer_vn + u0n <= ualloc);

      /* Now u0, u1, u are non-zero. We may still have v == 0 */

      /* Compute u u0 */
      if (lehmer_un <= u1n)
	/* Should be the common case */
	mpn_mul (up, u1, u1n, lehmer_up, lehmer_un);
      else
	mpn_mul (up, lehmer_up, lehmer_un, u1, u1n);

      un = u1n + lehmer_un;
      un -= (up[un - 1] == 0);

      if (lehmer_vn > 0)
	{
	  mp_limb_t cy;

	  /* Overwrites old u1 value */
	  if (lehmer_vn <= u0n)
	    /* Should be the common case */
	    mpn_mul (u1, u0, u0n, lehmer_vp, lehmer_vn);
	  else
	    mpn_mul (u1, lehmer_vp, lehmer_vn, u0, u0n);

	  u1n = u0n + lehmer_vn;
	  u1n -= (u1[u1n - 1] == 0);

	  if (u1n <= un)
	    {
	      cy = mpn_add (up, up, un, u1, u1n);
	    }
	  else
	    {
	      cy = mpn_add (up, u1, u1n, up, un);
	      un = u1n;
	    }
	  up[un] = cy;
	  un += (cy != 0);

	  ASSERT (un < ualloc);
	}
      *usizep = negate ? -un : un;

      TMP_FREE;
      return gn;
    }
}
Esempio n. 4
0
File: hgcd.c Progetto: qsnake/mpir
mp_size_t
mpn_hgcd (mp_ptr ap, mp_ptr bp, mp_size_t n,
      struct hgcd_matrix *M, mp_ptr tp)
{
  mp_size_t s = n/2 + 1;
  mp_size_t n2 = (3*n)/4 + 1;

  mp_size_t p, nn;
  int success = 0;

  if (n <= s)
    /* Happens when n <= 2, a fairly uninteresting case but exercised
       by the random inputs of the testsuite. */
    return 0;

  ASSERT ((ap[n-1] | bp[n-1]) > 0);

  ASSERT ((n+1)/2 - 1 < M->alloc);

  if (BELOW_THRESHOLD (n, HGCD_THRESHOLD))
    return mpn_hgcd_lehmer (ap, bp, n, M, tp);

  p = n/2;
  nn = mpn_hgcd (ap + p, bp + p, n - p, M, tp);
  if (nn > 0)
    {
      /* Needs 2*(p + M->n) <= 2*(floor(n/2) + ceil(n/2) - 1)
     = 2 (n - 1) */
      n = mpn_hgcd_matrix_adjust (M, p + nn, ap, bp, p, tp);
      success = 1;
    }
  while (n > n2)
    {
      /* Needs n + 1 storage */
      nn = hgcd_step (n, ap, bp, s, M, tp);
      if (!nn)
    return success ? n : 0;
      n = nn;
      success = 1;
    }

  if (n > s + 2)
    {
      struct hgcd_matrix M1;
      mp_size_t scratch;

      p = 2*s - n + 1;
      scratch = MPN_HGCD_MATRIX_INIT_ITCH (n-p);

      mpn_hgcd_matrix_init(&M1, n - p, tp);
      nn = mpn_hgcd (ap + p, bp + p, n - p, &M1, tp + scratch);
      if (nn > 0)
    {
      /* We always have max(M) > 2^{-(GMP_NUMB_BITS + 1)} max(M1) */
      ASSERT (M->n + 2 >= M1.n);

      /* Furthermore, assume M ends with a quotient (1, q; 0, 1),
         then either q or q + 1 is a correct quotient, and M1 will
         start with either (1, 0; 1, 1) or (2, 1; 1, 1). This
         rules out the case that the size of M * M1 is much
         smaller than the expected M->n + M1->n. */

      ASSERT (M->n + M1.n < M->alloc);

      /* Needs 2 (p + M->n) <= 2 (2*s - n2 + 1 + n2 - s - 1)
         = 2*s <= 2*(floor(n/2) + 1) <= n + 2. */
      n = mpn_hgcd_matrix_adjust (&M1, p + nn, ap, bp, p, tp + scratch);

      /* We need a bound for of M->n + M1.n. Let n be the original
         input size. Then

           ceil(n/2) - 1 >= size of product >= M.n + M1.n - 2

         and it follows that

           M.n + M1.n <= ceil(n/2) + 1

         Then 3*(M.n + M1.n) + 5 <= 3 * ceil(n/2) + 8 is the
         amount of needed scratch space. */
      mpn_hgcd_matrix_mul (M, &M1, tp + scratch);
      success = 1;
    }
    }

  /* This really is the base case */
  for (;;)
    {
      /* Needs s+3 < n */
      nn = hgcd_step (n, ap, bp, s, M, tp);
      if (!nn)
    return success ? n : 0;

      n = nn;
      success = 1;
    }
}