Ejemplo n.º 1
0
/* Add LENGTH bytes of randomness from buffer to the pool.  ORIGIN is
   used to specify the randomness origin.  This is one of the
   RANDOM_ORIGIN_* values. */
static void
add_randomness (const void *buffer, size_t length, enum random_origins origin)
{
  const unsigned char *p = buffer;
  size_t count = 0;

  gcry_assert (pool_is_locked);

  rndstats.addbytes += length;
  rndstats.naddbytes++;
  while (length-- )
	{
	  rndpool[pool_writepos++] ^= *p++;
	  count++;
	  if (pool_writepos >= POOLSIZE )
		{
		  /* It is possible that we are invoked before the pool is
			 filled using an unreliable origin of entropy, for example
			 the fast random poll.  To avoid flagging the pool as
			 filled in this case, we track the initial filling state
			 separately.  See also the remarks about the seed file. */
		  if (origin >= RANDOM_ORIGIN_SLOWPOLL && !pool_filled)
			{
			  pool_filled_counter += count;
			  count = 0;
			  if (pool_filled_counter >= POOLSIZE)
				pool_filled = 1;
			}
		  pool_writepos = 0;
		  mix_pool(rndpool); rndstats.mixrnd++;
		  just_mixed = !length;
	}
	}
}
Ejemplo n.º 2
0
/* If we do not want to enforce the fips mode, we can set a flag so
   that the application may check whether it is still in fips mode.
   TEXT will be printed as part of a syslog message.  This function
   may only be be called if in fips mode. */
void
_gcry_inactivate_fips_mode (const char *text)
{
  gcry_assert (_gcry_fips_mode ());

  if (_gcry_enforced_fips_mode () )
    {
      /* Get us into the error state. */
      fips_signal_error (text);
      return;
    }

  lock_fsm ();
  if (!inactive_fips_mode)
    {
      inactive_fips_mode = 1;
      unlock_fsm ();
#ifdef HAVE_SYSLOG
      syslog (LOG_USER|LOG_WARNING, "Libgcrypt warning: "
              "%s - FIPS mode inactivated", text);
#endif /*HAVE_SYSLOG*/
    }
  else
    unlock_fsm ();
}
Ejemplo n.º 3
0
/* Basic initialization which is required to initialize mutexes and
   such.  It does not run a full initialization so that the filling of
   the random pool can be delayed until it is actually needed.  We
   assume that this function is used before any concurrent access
   happens. */
static void
initialize_basics(void)
{
  static int initialized;
  int err;

  if (!initialized)
	{
	  initialized = 1;
	  err = ath_mutex_init (&pool_lock);
	  if (err)
		log_fatal ("failed to create the pool lock: %s\n", strerror (err) );
	  
	  err = ath_mutex_init (&nonce_buffer_lock);
	  if (err)
		log_fatal ("failed to create the nonce buffer lock: %s\n",
				   strerror (err) );

#ifdef USE_RANDOM_DAEMON
	  _gcry_daemon_initialize_basics ();
#endif /*USE_RANDOM_DAEMON*/

	  /* Make sure that we are still using the values we have
		 traditionally used for the random levels.  */
	  gcry_assert (GCRY_WEAK_RANDOM == 0 
				   && GCRY_STRONG_RANDOM == 1
				   && GCRY_VERY_STRONG_RANDOM == 2);
	}
}
Ejemplo n.º 4
0
/* Fill BUFFER with LENGTH bytes of random at quality LEVEL.  The
   function either succeeds or terminates the process in case of a
   fatal error. */
static void
get_random (void *buffer, size_t length, int level)
{
    int rc;

    gcry_assert (buffer);

    read_cb_buffer = buffer;
    read_cb_size   = length;
    read_cb_len    = 0;

#if USE_RNDLINUX
    rc = _gcry_rndlinux_gather_random (read_cb, 0, length, level);
#elif USE_RNDUNIX
    rc = _gcry_rndunix_gather_random (read_cb, 0, length, level);
#elif USE_RNDW32
    do
    {
        rc = _gcry_rndw32_gather_random (read_cb, 0, length, level);
    }
    while (rc >= 0 && read_cb_len < read_cb_size);
#else
    rc = -1;
#endif

    if (rc < 0 || read_cb_len != read_cb_size)
    {
        log_fatal ("error reading random from system RNG (rc=%d)\n", rc);
    }
}
Ejemplo n.º 5
0
/* Callback for _gcry_rnd*_gather_random.  */
static void
read_cb (const void *buffer, size_t length, enum random_origins origin)
{
    const unsigned char *p = buffer;

    (void)origin;

    gcry_assert (system_rng_is_locked);
    gcry_assert (read_cb_buffer);

    /* Note that we need to protect against gatherers returning more
       than the requested bytes (e.g. rndw32).  */
    while (length-- && read_cb_len < read_cb_size)
    {
        read_cb_buffer[read_cb_len++] = *p++;
    }
}
Ejemplo n.º 6
0
/* Note: This function requires LENGTH > 0.  */
static void
salsa20_do_encrypt_stream (SALSA20_context_t *ctx,
                           byte *outbuf, const byte *inbuf,
                           unsigned int length)
{
  if (ctx->unused)
    {
      unsigned char *p = (void*)ctx->pad;
      unsigned int n;

      gcry_assert (ctx->unused < SALSA20_BLOCK_SIZE);

      n = ctx->unused;
      if (n > length)
        n = length;
      buf_xor (outbuf, inbuf, p + SALSA20_BLOCK_SIZE - ctx->unused, n);
      length -= n;
      outbuf += n;
      inbuf  += n;
      ctx->unused -= n;
      if (!length)
        return;
      gcry_assert (!ctx->unused);
    }

  for (;;)
    {
      /* Create the next pad and bump the block counter.  Note that it
         is the user's duty to change to another nonce not later than
         after 2^70 processed bytes.  */
      salsa20_core (ctx->pad, ctx->input);
      if (!++ctx->input[8])
        ctx->input[9]++;

      if (length <= SALSA20_BLOCK_SIZE)
	{
	  buf_xor (outbuf, inbuf, ctx->pad, length);
          ctx->unused = SALSA20_BLOCK_SIZE - length;
	  return;
	}
      buf_xor (outbuf, inbuf, ctx->pad, SALSA20_BLOCK_SIZE);
      length -= SALSA20_BLOCK_SIZE;
      outbuf += SALSA20_BLOCK_SIZE;
      inbuf  += SALSA20_BLOCK_SIZE;
  }
}
Ejemplo n.º 7
0
/* Basic initialization is required to initialize mutexes and
   do a few checks on the implementation.  */
static void
basic_initialization (void)
{
  static int initialized;

  if (initialized)
    return;
  initialized = 1;

  system_rng_is_locked = 0;

  /* Make sure that we are still using the values we traditionally
     used for the random levels.  */
  gcry_assert (GCRY_WEAK_RANDOM == 0
               && GCRY_STRONG_RANDOM == 1
               && GCRY_VERY_STRONG_RANDOM == 2);

}
Ejemplo n.º 8
0
static void
mpi_set_secure( gcry_mpi_t a )
{
  mpi_ptr_t ap, bp;

  if ( (a->flags & 1) )
    return;
  a->flags |= 1;
  ap = a->d;
  if (!a->nlimbs)
    {
      gcry_assert (!ap);
      return;
    }
  bp = mpi_alloc_limb_space (a->nlimbs, 1);
  MPN_COPY( bp, ap, a->nlimbs );
  a->d = bp;
  _gcry_mpi_free_limb_space (ap, a->alloced);
}
Ejemplo n.º 9
0
/* Basic initialization which is required to initialize mutexes and
   such.  It does not run a full initialization so that the filling of
   the random pool can be delayed until it is actually needed.  We
   assume that this function is used before any concurrent access
   happens. */
static void
initialize_basics(void)
{
  static int initialized;

  if (!initialized)
    {
      initialized = 1;

#ifdef USE_RANDOM_DAEMON
      _gcry_daemon_initialize_basics ();
#endif /*USE_RANDOM_DAEMON*/

      /* Make sure that we are still using the values we have
         traditionally used for the random levels.  */
      gcry_assert (GCRY_WEAK_RANDOM == 0
                   && GCRY_STRONG_RANDOM == 1
                   && GCRY_VERY_STRONG_RANDOM == 2);
    }
}
Ejemplo n.º 10
0
/* Basic initialization is required to initialize mutexes and
   do a few checks on the implementation.  */
static void
basic_initialization (void)
{
    static int initialized;
    int my_errno;

    if (initialized)
        return;
    initialized = 1;

    my_errno = ath_mutex_init (&system_rng_lock);
    if (my_errno)
        log_fatal ("failed to create the System RNG lock: %s\n",
                   strerror (my_errno));
    system_rng_is_locked = 0;

    /* Make sure that we are still using the values we traditionally
       used for the random levels.  */
    gcry_assert (GCRY_WEAK_RANDOM == 0
                 && GCRY_STRONG_RANDOM == 1
                 && GCRY_VERY_STRONG_RANDOM == 2);

}
Ejemplo n.º 11
0
/****************
 * RES = BASE ^ EXPO mod MOD
 */
void
gcry_mpi_powm (gcry_mpi_t res,
               gcry_mpi_t base, gcry_mpi_t expo, gcry_mpi_t mod)
{
  /* Pointer to the limbs of the arguments, their size and signs. */
  mpi_ptr_t  rp, ep, mp, bp;
  mpi_size_t esize, msize, bsize, rsize;
  int               msign, bsign, rsign;
  /* Flags telling the secure allocation status of the arguments.  */
  int        esec,  msec,  bsec;
  /* Size of the result including space for temporary values.  */
  mpi_size_t size;
  /* Helper.  */
  int mod_shift_cnt;
  int negative_result;
  mpi_ptr_t mp_marker = NULL;
  mpi_ptr_t bp_marker = NULL;
  mpi_ptr_t ep_marker = NULL;
  mpi_ptr_t xp_marker = NULL;
  unsigned int mp_nlimbs = 0;
  unsigned int bp_nlimbs = 0;
  unsigned int ep_nlimbs = 0;
  unsigned int xp_nlimbs = 0;
  mpi_ptr_t tspace = NULL;
  mpi_size_t tsize = 0;


  esize = expo->nlimbs;
  msize = mod->nlimbs;
  size = 2 * msize;
  msign = mod->sign;

  esec = mpi_is_secure(expo);
  msec = mpi_is_secure(mod);
  bsec = mpi_is_secure(base);

  rp = res->d;
  ep = expo->d;

  if (!msize)
    grub_fatal ("mpi division by zero");

  if (!esize)
    {
      /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0 depending
         on if MOD equals 1.  */
      res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1;
      if (res->nlimbs)
        {
          RESIZE_IF_NEEDED (res, 1);
          rp = res->d;
          rp[0] = 1;
        }
      res->sign = 0;
      goto leave;
    }

  /* Normalize MOD (i.e. make its most significant bit set) as
     required by mpn_divrem.  This will make the intermediate values
     in the calculation slightly larger, but the correct result is
     obtained after a final reduction using the original MOD value. */
  mp_nlimbs = msec? msize:0;
  mp = mp_marker = mpi_alloc_limb_space(msize, msec);
  count_leading_zeros (mod_shift_cnt, mod->d[msize-1]);
  if (mod_shift_cnt)
    _gcry_mpih_lshift (mp, mod->d, msize, mod_shift_cnt);
  else
    MPN_COPY( mp, mod->d, msize );

  bsize = base->nlimbs;
  bsign = base->sign;
  if (bsize > msize)
    {
      /* The base is larger than the module.  Reduce it.

         Allocate (BSIZE + 1) with space for remainder and quotient.
         (The quotient is (bsize - msize + 1) limbs.)  */
      bp_nlimbs = bsec ? (bsize + 1):0;
      bp = bp_marker = mpi_alloc_limb_space( bsize + 1, bsec );
      MPN_COPY ( bp, base->d, bsize );
      /* We don't care about the quotient, store it above the
       * remainder, at BP + MSIZE.  */
      _gcry_mpih_divrem( bp + msize, 0, bp, bsize, mp, msize );
      bsize = msize;
      /* Canonicalize the base, since we are going to multiply with it
	 quite a few times.  */
      MPN_NORMALIZE( bp, bsize );
    }
  else
    bp = base->d;

  if (!bsize)
    {
      res->nlimbs = 0;
      res->sign = 0;
      goto leave;
    }


  /* Make BASE, EXPO and MOD not overlap with RES.  */
  if ( rp == bp )
    {
      /* RES and BASE are identical.  Allocate temp. space for BASE.  */
      gcry_assert (!bp_marker);
      bp_nlimbs = bsec? bsize:0;
      bp = bp_marker = mpi_alloc_limb_space( bsize, bsec );
      MPN_COPY(bp, rp, bsize);
    }
  if ( rp == ep )
    {
      /* RES and EXPO are identical.  Allocate temp. space for EXPO.  */
      ep_nlimbs = esec? esize:0;
      ep = ep_marker = mpi_alloc_limb_space( esize, esec );
      MPN_COPY(ep, rp, esize);
    }
  if ( rp == mp )
    {
      /* RES and MOD are identical.  Allocate temporary space for MOD.*/
      gcry_assert (!mp_marker);
      mp_nlimbs = msec?msize:0;
      mp = mp_marker = mpi_alloc_limb_space( msize, msec );
      MPN_COPY(mp, rp, msize);
    }

  /* Copy base to the result.  */
  if (res->alloced < size)
    {
      mpi_resize (res, size);
      rp = res->d;
    }
  MPN_COPY ( rp, bp, bsize );
  rsize = bsize;
  rsign = bsign;

  /* Main processing.  */
  {
    mpi_size_t i;
    mpi_ptr_t xp;
    int c;
    mpi_limb_t e;
    mpi_limb_t carry_limb;
    struct karatsuba_ctx karactx;

    xp_nlimbs = msec? (2 * (msize + 1)):0;
    xp = xp_marker = mpi_alloc_limb_space( 2 * (msize + 1), msec );

    memset( &karactx, 0, sizeof karactx );
    negative_result = (ep[0] & 1) && base->sign;

    i = esize - 1;
    e = ep[i];
    count_leading_zeros (c, e);
    e = (e << c) << 1;     /* Shift the expo bits to the left, lose msb.  */
    c = BITS_PER_MPI_LIMB - 1 - c;

    /* Main loop.

       Make the result be pointed to alternately by XP and RP.  This
       helps us avoid block copying, which would otherwise be
       necessary with the overlap restrictions of
       _gcry_mpih_divmod. With 50% probability the result after this
       loop will be in the area originally pointed by RP (==RES->d),
       and with 50% probability in the area originally pointed to by XP. */
    for (;;)
      {
        while (c)
          {
            mpi_ptr_t tp;
            mpi_size_t xsize;

            /*mpih_mul_n(xp, rp, rp, rsize);*/
            if ( rsize < KARATSUBA_THRESHOLD )
              _gcry_mpih_sqr_n_basecase( xp, rp, rsize );
            else
              {
                if ( !tspace )
                  {
                    tsize = 2 * rsize;
                    tspace = mpi_alloc_limb_space( tsize, 0 );
                  }
                else if ( tsize < (2*rsize) )
                  {
                    _gcry_mpi_free_limb_space (tspace, 0);
                    tsize = 2 * rsize;
                    tspace = mpi_alloc_limb_space (tsize, 0 );
                  }
                _gcry_mpih_sqr_n (xp, rp, rsize, tspace);
              }

            xsize = 2 * rsize;
            if ( xsize > msize )
              {
                _gcry_mpih_divrem(xp + msize, 0, xp, xsize, mp, msize);
                xsize = msize;
              }

            tp = rp; rp = xp; xp = tp;
            rsize = xsize;

            /* To mitigate the Yarom/Falkner flush+reload cache
             * side-channel attack on the RSA secret exponent, we do
             * the multiplication regardless of the value of the
             * high-bit of E.  But to avoid this performance penalty
             * we do it only if the exponent has been stored in secure
             * memory and we can thus assume it is a secret exponent.  */
            if (esec || (mpi_limb_signed_t)e < 0)
              {
                /*mpih_mul( xp, rp, rsize, bp, bsize );*/
                if( bsize < KARATSUBA_THRESHOLD )
                  _gcry_mpih_mul ( xp, rp, rsize, bp, bsize );
                else
                  _gcry_mpih_mul_karatsuba_case (xp, rp, rsize, bp, bsize,
                                                 &karactx);

                xsize = rsize + bsize;
                if ( xsize > msize )
                  {
                    _gcry_mpih_divrem(xp + msize, 0, xp, xsize, mp, msize);
                    xsize = msize;
                  }
              }
            if ( (mpi_limb_signed_t)e < 0 )
              {
                tp = rp; rp = xp; xp = tp;
                rsize = xsize;
              }
            e <<= 1;
            c--;
          }

        i--;
        if ( i < 0 )
          break;
        e = ep[i];
        c = BITS_PER_MPI_LIMB;
      }

    /* We shifted MOD, the modulo reduction argument, left
       MOD_SHIFT_CNT steps.  Adjust the result by reducing it with the
       original MOD.

       Also make sure the result is put in RES->d (where it already
       might be, see above).  */
    if ( mod_shift_cnt )
      {
        carry_limb = _gcry_mpih_lshift( res->d, rp, rsize, mod_shift_cnt);
        rp = res->d;
        if ( carry_limb )
          {
            rp[rsize] = carry_limb;
            rsize++;
          }
      }
    else if (res->d != rp)
      {
        MPN_COPY (res->d, rp, rsize);
        rp = res->d;
      }

    if ( rsize >= msize )
      {
        _gcry_mpih_divrem(rp + msize, 0, rp, rsize, mp, msize);
        rsize = msize;
      }

    /* Remove any leading zero words from the result.  */
    if ( mod_shift_cnt )
      _gcry_mpih_rshift( rp, rp, rsize, mod_shift_cnt);
    MPN_NORMALIZE (rp, rsize);

    _gcry_mpih_release_karatsuba_ctx (&karactx );
  }

  /* Fixup for negative results.  */
  if ( negative_result && rsize )
    {
      if ( mod_shift_cnt )
        _gcry_mpih_rshift( mp, mp, msize, mod_shift_cnt);
      _gcry_mpih_sub( rp, mp, msize, rp, rsize);
      rsize = msize;
      rsign = msign;
      MPN_NORMALIZE(rp, rsize);
    }
  gcry_assert (res->d == rp);
  res->nlimbs = rsize;
  res->sign = rsign;

 leave:
  if (mp_marker)
    _gcry_mpi_free_limb_space( mp_marker, mp_nlimbs );
  if (bp_marker)
    _gcry_mpi_free_limb_space( bp_marker, bp_nlimbs );
  if (ep_marker)
    _gcry_mpi_free_limb_space( ep_marker, ep_nlimbs );
  if (xp_marker)
    _gcry_mpi_free_limb_space( xp_marker, xp_nlimbs );
  if (tspace)
    _gcry_mpi_free_limb_space( tspace, 0 );
}
Ejemplo n.º 12
0
gcry_err_code_t
_gcry_cipher_ctr_encrypt (gcry_cipher_hd_t c,
                          unsigned char *outbuf, size_t outbuflen,
                          const unsigned char *inbuf, size_t inbuflen)
{
  size_t n;
  int i;
  gcry_cipher_encrypt_t enc_fn = c->spec->encrypt;
  unsigned int blocksize = c->spec->blocksize;
  size_t nblocks;
  unsigned int burn, nburn;

  /* Tell compiler that we require a cipher with a 64bit or 128 bit block
   * length, to allow better optimization of this function.  */
  if (blocksize > 16 || blocksize < 8 || blocksize & (8 - 1))
    return GPG_ERR_INV_LENGTH;

  if (outbuflen < inbuflen)
    return GPG_ERR_BUFFER_TOO_SHORT;

  burn = 0;

  /* First process a left over encrypted counter.  */
  if (c->unused)
    {
      gcry_assert (c->unused < blocksize);
      i = blocksize - c->unused;
      n = c->unused > inbuflen ? inbuflen : c->unused;
      buf_xor(outbuf, inbuf, &c->lastiv[i], n);
      c->unused -= n;
      inbuf  += n;
      outbuf += n;
      inbuflen -= n;
    }

  /* Use a bulk method if available.  */
  nblocks = inbuflen / blocksize;
  if (nblocks && c->bulk.ctr_enc)
    {
      c->bulk.ctr_enc (&c->context.c, c->u_ctr.ctr, outbuf, inbuf, nblocks);
      inbuf  += nblocks * blocksize;
      outbuf += nblocks * blocksize;
      inbuflen -= nblocks * blocksize;
    }

  /* If we don't have a bulk method use the standard method.  We also
     use this method for the a remaining partial block.  */
  if (inbuflen)
    {
      unsigned char tmp[MAX_BLOCKSIZE];

      do {
        nburn = enc_fn (&c->context.c, tmp, c->u_ctr.ctr);
        burn = nburn > burn ? nburn : burn;

        for (i = blocksize; i > 0; i--)
          {
            c->u_ctr.ctr[i-1]++;
            if (c->u_ctr.ctr[i-1] != 0)
              break;
          }

        n = blocksize < inbuflen ? blocksize : inbuflen;
        buf_xor(outbuf, inbuf, tmp, n);

        inbuflen -= n;
        outbuf += n;
        inbuf += n;
      } while (inbuflen);

      /* Save the unused bytes of the counter.  */
      c->unused = blocksize - n;
      if (c->unused)
        buf_cpy (c->lastiv+n, tmp+n, c->unused);

      wipememory (tmp, sizeof tmp);
    }

  if (burn > 0)
    _gcry_burn_stack (burn + 4 * sizeof(void *));

  return 0;
}
Ejemplo n.º 13
0
/* Read in a seed from the random_seed file and return true if this
   was successful.

   Note: Multiple instances of applications sharing the same random
   seed file can be started in parallel, in which case they will read
   out the same pool and then race for updating it (the last update
   overwrites earlier updates).  They will differentiate only by the
   weak entropy that is added in read_seed_file based on the PID and
   clock, and up to 16 bytes of weak random non-blockingly.  The
   consequence is that the output of these different instances is
   correlated to some extent.  In the perfect scenario, the attacker
   can control (or at least guess) the PID and clock of the
   application, and drain the system's entropy pool to reduce the "up
   to 16 bytes" above to 0.  Then the dependencies of the inital
   states of the pools are completely known.  */
static int
read_seed_file (void)
{
  int fd;
  struct stat sb;
  unsigned char buffer[POOLSIZE];
  int n;

  gcry_assert (pool_is_locked);

  if (!seed_file_name)
	return 0;
  
#ifdef HAVE_DOSISH_SYSTEM
  fd = open( seed_file_name, O_RDONLY | O_BINARY );
#else
  fd = open( seed_file_name, O_RDONLY );
#endif
  if( fd == -1 && errno == ENOENT)
	{
	  allow_seed_file_update = 1;
	  return 0;
	}

  if (fd == -1 )
	{
	  log_info(_("can't open `%s': %s\n"), seed_file_name, strerror(errno) );
	  return 0;
	}
  if (lock_seed_file (fd, seed_file_name, 0))
	{
	  close (fd);
	  return 0;
	}
  if (fstat( fd, &sb ) )
	{
	  log_info(_("can't stat `%s': %s\n"), seed_file_name, strerror(errno) );
	  close(fd);
	  return 0;
	}
  if (!S_ISREG(sb.st_mode) )
	{
	  log_info(_("`%s' is not a regular file - ignored\n"), seed_file_name );
	  close(fd);
	  return 0;
	}
  if (!sb.st_size )
	{
	  log_info(_("note: random_seed file is empty\n") );
	  close(fd);
	  allow_seed_file_update = 1;
	  return 0;
	}
  if (sb.st_size != POOLSIZE ) 
	{
	  log_info(_("warning: invalid size of random_seed file - not used\n") );
	  close(fd);
	  return 0;
	}

  do
	{
	  n = read( fd, buffer, POOLSIZE );
	} 
  while (n == -1 && errno == EINTR );

  if (n != POOLSIZE)
	{
	  log_fatal(_("can't read `%s': %s\n"), seed_file_name,strerror(errno) );
	  close(fd);/*NOTREACHED*/
	  return 0;
	}
  
  close(fd);

  add_randomness( buffer, POOLSIZE, RANDOM_ORIGIN_INIT );
  /* add some minor entropy to the pool now (this will also force a mixing) */
  {	
	pid_t x = getpid();
	add_randomness( &x, sizeof(x), RANDOM_ORIGIN_INIT );
  }
  {
	time_t x = time(NULL);
	add_randomness( &x, sizeof(x), RANDOM_ORIGIN_INIT );
  }
  {	
	clock_t x = clock();
	add_randomness( &x, sizeof(x), RANDOM_ORIGIN_INIT );
  }

  /* And read a few bytes from our entropy source.  By using a level
   * of 0 this will not block and might not return anything with some
   * entropy drivers, however the rndlinux driver will use
   * /dev/urandom and return some stuff - Do not read too much as we
   * want to be friendly to the scare system entropy resource. */
  read_random_source ( RANDOM_ORIGIN_INIT, 16, GCRY_WEAK_RANDOM );

  allow_seed_file_update = 1;
  return 1;
}
Ejemplo n.º 14
0
/* Read random out of the pool.  This function is the core of the
   public random functions.  Note that Level GCRY_WEAK_RANDOM is not
   anymore handled special and in fact is an alias in the API for
   level GCRY_STRONG_RANDOM.  Must be called with the pool already
   locked.  */
static void
read_pool (byte *buffer, size_t length, int level)
{
  int i;
  unsigned long *sp, *dp;
  /* The volatile is there to make sure the compiler does not optimize
	 the code away in case the getpid function is badly attributed.
	 Note that we keep a pid in a static variable as well as in a
	 stack based one; the latter is to detect ill behaving thread
	 libraries, ignoring the pool mutexes. */
  static volatile pid_t my_pid = (pid_t)(-1); 
  volatile pid_t my_pid2;

  gcry_assert (pool_is_locked);

 retry:
  /* Get our own pid, so that we can detect a fork. */
  my_pid2 = getpid ();
  if (my_pid == (pid_t)(-1))                                
	my_pid = my_pid2;
  if ( my_pid != my_pid2 )
	{
	  /* We detected a plain fork; i.e. we are now the child.  Update
		 the static pid and add some randomness. */
	  pid_t x;

	  my_pid = my_pid2;
	  x = my_pid;
	  add_randomness (&x, sizeof(x), RANDOM_ORIGIN_INIT);
	  just_mixed = 0; /* Make sure it will get mixed. */
	}

  gcry_assert (pool_is_locked);

  /* Our code does not allow to extract more than POOLSIZE.  Better
	 check it here. */
  if (length > POOLSIZE)
	{
	  log_bug("too many random bits requested\n");
	}

  if (!pool_filled)
	{
	  if (read_seed_file() )
		pool_filled = 1;
	}

  /* For level 2 quality (key generation) we always make sure that the
	 pool has been seeded enough initially. */
  if (level == GCRY_VERY_STRONG_RANDOM && !did_initial_extra_seeding)
	{
	  size_t needed;

	  pool_balance = 0;
	  needed = length - pool_balance;
	  if (needed < POOLSIZE/2)
		needed = POOLSIZE/2;
	  else if( needed > POOLSIZE )
		BUG ();
	  read_random_source (RANDOM_ORIGIN_EXTRAPOLL, needed,
						  GCRY_VERY_STRONG_RANDOM);
	  pool_balance += needed;
	  did_initial_extra_seeding = 1;
	}

  /* For level 2 make sure that there is enough random in the pool. */
  if (level == GCRY_VERY_STRONG_RANDOM && pool_balance < length)
	{
	  size_t needed;
	  
	  if (pool_balance < 0)
		pool_balance = 0;
	  needed = length - pool_balance;
	  if (needed > POOLSIZE)
		BUG ();
	  read_random_source (RANDOM_ORIGIN_EXTRAPOLL, needed,
						  GCRY_VERY_STRONG_RANDOM);
	  pool_balance += needed;
	}

  /* Make sure the pool is filled. */
  while (!pool_filled)
	random_poll();

  /* Always do a fast random poll (we have to use the unlocked version). */
  do_fast_random_poll();
  
  /* Mix the pid in so that we for sure won't deliver the same random
	 after a fork. */
  {
	pid_t apid = my_pid;
	add_randomness (&apid, sizeof (apid), RANDOM_ORIGIN_INIT);
  }

  /* Mix the pool (if add_randomness() didn't it). */
  if (!just_mixed)
	{
	  mix_pool(rndpool);
	  rndstats.mixrnd++;
	}

  /* Create a new pool. */
  for(i=0,dp=(unsigned long*)keypool, sp=(unsigned long*)rndpool;
	  i < POOLWORDS; i++, dp++, sp++ )
	*dp = *sp + ADD_VALUE;

  /* Mix both pools. */
  mix_pool(rndpool); rndstats.mixrnd++;
  mix_pool(keypool); rndstats.mixkey++;

  /* Read the requested data.  We use a read pointer to read from a
	 different position each time.  */
  while (length--)
	{
	  *buffer++ = keypool[pool_readpos++];
	  if (pool_readpos >= POOLSIZE)
		pool_readpos = 0;
	  pool_balance--;
	}
 
  if (pool_balance < 0)
	pool_balance = 0;

  /* Clear the keypool. */
  memset (keypool, 0, POOLSIZE);

  /* We need to detect whether a fork has happened.  A fork might have
	 an identical pool and thus the child and the parent could emit
	 the very same random number.  This test here is to detect forks
	 in a multi-threaded process.  It does not work with all thread
	 implementations in particular not with pthreads.  However it is
	 good enough for GNU Pth. */
  if ( getpid () != my_pid2 )
	{
	  pid_t x = getpid();
	  add_randomness (&x, sizeof(x), RANDOM_ORIGIN_INIT);
	  just_mixed = 0; /* Make sure it will get mixed. */
	  my_pid = x;     /* Also update the static pid. */
	  goto retry;
	}
}
Ejemplo n.º 15
0
gcry_error_t
gcry_cipher_ctl( gcry_cipher_hd_t h, int cmd, void *buffer, size_t buflen)
{
  gcry_err_code_t rc = GPG_ERR_NO_ERROR;

  switch (cmd)
    {
    case GCRYCTL_SET_KEY:  /* Deprecated; use gcry_cipher_setkey.  */
      rc = cipher_setkey( h, buffer, buflen );
      break;

    case GCRYCTL_SET_IV:   /* Deprecated; use gcry_cipher_setiv.  */
      cipher_setiv( h, buffer, buflen );
      break;

    case GCRYCTL_RESET:
      cipher_reset (h);
      break;

    case GCRYCTL_CFB_SYNC:
      cipher_sync( h );
      break;

    case GCRYCTL_SET_CBC_CTS:
      if (buflen)
	if (h->flags & GCRY_CIPHER_CBC_MAC)
	  rc = GPG_ERR_INV_FLAG;
	else
	  h->flags |= GCRY_CIPHER_CBC_CTS;
      else
	h->flags &= ~GCRY_CIPHER_CBC_CTS;
      break;

    case GCRYCTL_SET_CBC_MAC:
      if (buflen)
	if (h->flags & GCRY_CIPHER_CBC_CTS)
	  rc = GPG_ERR_INV_FLAG;
	else
	  h->flags |= GCRY_CIPHER_CBC_MAC;
      else
	h->flags &= ~GCRY_CIPHER_CBC_MAC;
      break;

    case GCRYCTL_DISABLE_ALGO:
      /* This command expects NULL for H and BUFFER to point to an
         integer with the algo number.  */
      if( h || !buffer || buflen != sizeof(int) )
	return gcry_error (GPG_ERR_CIPHER_ALGO);
      disable_cipher_algo( *(int*)buffer );
      break;

    case GCRYCTL_SET_CTR: /* Deprecated; use gcry_cipher_setctr.  */
      rc = gpg_err_code (_gcry_cipher_setctr (h, buffer, buflen));
      break;

    case 61:  /* Disable weak key detection (private).  */
      if (h->extraspec->set_extra_info)
        rc = h->extraspec->set_extra_info
          (&h->context.c, CIPHER_INFO_NO_WEAK_KEY, NULL, 0);
      else
        rc = GPG_ERR_NOT_SUPPORTED;
      break;

    case 62: /* Return current input vector (private).  */
      /* This is the input block as used in CFB and OFB mode which has
         initially been set as IV.  The returned format is:
           1 byte  Actual length of the block in bytes.
           n byte  The block.
         If the provided buffer is too short, an error is returned. */
      if (buflen < (1 + h->cipher->blocksize))
        rc = GPG_ERR_TOO_SHORT;
      else
        {
          unsigned char *ivp;
          unsigned char *dst = buffer;
          int n = h->unused;

          if (!n)
            n = h->cipher->blocksize;
          gcry_assert (n <= h->cipher->blocksize);
          *dst++ = n;
          ivp = h->u_iv.iv + h->cipher->blocksize - n;
          while (n--)
            *dst++ = *ivp++;
        }
      break;

    default:
      rc = GPG_ERR_INV_OP;
    }

  return gcry_error (rc);
}
Ejemplo n.º 16
0
/*
   Mix the pool:

   |........blocks*20byte........|20byte|..44byte..|
   <..44byte..>           <20byte> 
		|                    |
		|                    +------+
		+---------------------------|----------+
									v          v
   |........blocks*20byte........|20byte|..44byte..|
								 <.....64bytes.....>   
										 |
	  +----------------------------------+
	 Hash
	  v
   |.............................|20byte|..44byte..|
   <20byte><20byte><..44byte..>
	  |                |
	  |                +---------------------+
	  +-----------------------------+        |
									v        v
   |.............................|20byte|..44byte..|
								 <.....64byte......>
										|
			  +-------------------------+
			 Hash
			  v
   |.............................|20byte|..44byte..|
   <20byte><20byte><..44byte..>

   and so on until we did this for all blocks. 

   To better protect against implementation errors in this code, we
   xor a digest of the entire pool into the pool before mixing.

   Note: this function must only be called with a locked pool.
 */
static void
mix_pool(unsigned char *pool)
{
  static unsigned char failsafe_digest[DIGESTLEN];
  static int failsafe_digest_valid;

  unsigned char *hashbuf = pool + POOLSIZE;
  unsigned char *p, *pend;
  int i, n;
  RMD160_CONTEXT md;

#if DIGESTLEN != 20
#error must have a digest length of 20 for ripe-md-160
#endif

  gcry_assert (pool_is_locked);
  _gcry_rmd160_init( &md );

  /* Loop over the pool.  */
  pend = pool + POOLSIZE;
  memcpy(hashbuf, pend - DIGESTLEN, DIGESTLEN );
  memcpy(hashbuf+DIGESTLEN, pool, BLOCKLEN-DIGESTLEN);
  _gcry_rmd160_mixblock( &md, hashbuf);
  memcpy(pool, hashbuf, 20 );

  if (failsafe_digest_valid && pool == rndpool)
	{
	  for (i=0; i < 20; i++)
		pool[i] ^= failsafe_digest[i];
	}
  
  p = pool;
  for (n=1; n < POOLBLOCKS; n++)
	{
	  memcpy (hashbuf, p, DIGESTLEN);

	  p += DIGESTLEN;
	  if (p+DIGESTLEN+BLOCKLEN < pend)
		memcpy (hashbuf+DIGESTLEN, p+DIGESTLEN, BLOCKLEN-DIGESTLEN);
	  else 
		{
		  unsigned char *pp = p + DIGESTLEN;
		  
		  for (i=DIGESTLEN; i < BLOCKLEN; i++ )
			{
			  if ( pp >= pend )
				pp = pool;
			  hashbuf[i] = *pp++;
		}
	}
	  
	  _gcry_rmd160_mixblock ( &md, hashbuf);
	  memcpy(p, hashbuf, 20 );
	}

	/* Our hash implementation does only leave small parts (64 bytes)
	   of the pool on the stack, so it is okay not to require secure
	   memory here.  Before we use this pool, it will be copied to the
	   help buffer anyway. */
	if ( pool == rndpool)
	  {
		_gcry_rmd160_hash_buffer (failsafe_digest, pool, POOLSIZE);
		failsafe_digest_valid = 1;
	  }

	_gcry_burn_stack (384); /* for the rmd160_mixblock(), rmd160_hash_buffer */
}
Ejemplo n.º 17
0
static void
do_fast_random_poll (void)
{
  gcry_assert (pool_is_locked);

  rndstats.fastpolls++;

  if (fast_gather_fnc)
	fast_gather_fnc (add_randomness, RANDOM_ORIGIN_FASTPOLL);

  /* Continue with the generic functions. */
#if HAVE_GETHRTIME
  {	
	hrtime_t tv;
	tv = gethrtime();
	add_randomness( &tv, sizeof(tv), RANDOM_ORIGIN_FASTPOLL );
  }
#elif HAVE_GETTIMEOFDAY
  {	
	struct timeval tv;
	if( gettimeofday( &tv, NULL ) )
	  BUG();
	add_randomness( &tv.tv_sec, sizeof(tv.tv_sec), RANDOM_ORIGIN_FASTPOLL );
	add_randomness( &tv.tv_usec, sizeof(tv.tv_usec), RANDOM_ORIGIN_FASTPOLL );
  }
#elif HAVE_CLOCK_GETTIME
  {	struct timespec tv;
  if( clock_gettime( CLOCK_REALTIME, &tv ) == -1 )
	BUG();
  add_randomness( &tv.tv_sec, sizeof(tv.tv_sec), RANDOM_ORIGIN_FASTPOLL );
  add_randomness( &tv.tv_nsec, sizeof(tv.tv_nsec), RANDOM_ORIGIN_FASTPOLL );
  }
#else /* use times */
# ifndef HAVE_DOSISH_SYSTEM
  {	struct tms buf;
  times( &buf );
  add_randomness( &buf, sizeof buf, RANDOM_ORIGIN_FASTPOLL );
  }
# endif
#endif

#ifdef HAVE_GETRUSAGE
# ifdef RUSAGE_SELF
  {	
	struct rusage buf;
	/* QNX/Neutrino does return ENOSYS - so we just ignore it and add
	   whatever is in buf.  In a chroot environment it might not work
	   at all (i.e. because /proc/ is not accessible), so we better
	   ignore all error codes and hope for the best. */
	getrusage (RUSAGE_SELF, &buf );
	add_randomness( &buf, sizeof buf, RANDOM_ORIGIN_FASTPOLL );
	memset( &buf, 0, sizeof buf );
  }
# else /*!RUSAGE_SELF*/
#  ifdef __GCC__
#   warning There is no RUSAGE_SELF on this system
#  endif
# endif /*!RUSAGE_SELF*/
#endif /*HAVE_GETRUSAGE*/

  /* Time and clock are availabe on all systems - so we better do it
	 just in case one of the above functions didn't work.  */
  {
	time_t x = time(NULL);
	add_randomness( &x, sizeof(x), RANDOM_ORIGIN_FASTPOLL );
  }
  {	
	clock_t x = clock();
	add_randomness( &x, sizeof(x), RANDOM_ORIGIN_FASTPOLL );
  }

  /* If the system features a fast hardware RNG, read some bytes from
	 there.  */
  _gcry_rndhw_poll_fast (add_randomness, RANDOM_ORIGIN_FASTPOLL);
}
Ejemplo n.º 18
0
/****************
 * RES = (BASE[0] ^ EXP[0]) *  (BASE[1] ^ EXP[1]) * ... * mod M
 */
void
_gcry_mpi_mulpowm( gcry_mpi_t res, gcry_mpi_t *basearray, gcry_mpi_t *exparray, gcry_mpi_t m)
{
    int k;	/* number of elements */
    int t;	/* bit size of largest exponent */
    int i, j, idx;
    gcry_mpi_t *G;	/* table with precomputed values of size 2^k */
    gcry_mpi_t tmp;
#ifdef USE_BARRETT
    gcry_mpi_t barrett_y, barrett_r1, barrett_r2;
    int barrett_k;
#endif

    for(k=0; basearray[k]; k++ )
	;
    gcry_assert(k);
    for(t=0, i=0; (tmp=exparray[i]); i++ ) {
	/*log_mpidump("exp: ", tmp );*/
	j = mpi_get_nbits(tmp);
	if( j > t )
	    t = j;
    }
    /*log_mpidump("mod: ", m );*/
    gcry_assert (i==k);
    gcry_assert (t);
    gcry_assert (k < 10);

    G = xcalloc( (1<<k) , sizeof *G );
#ifdef USE_BARRETT
    barrett_y = init_barrett( m, &barrett_k, &barrett_r1, &barrett_r2 );
#endif
    /* and calculate */
    tmp =  mpi_alloc( mpi_get_nlimbs(m)+1 );
    mpi_set_ui( res, 1 );
    for(i = 1; i <= t; i++ ) {
	barrett_mulm(tmp, res, res, m, barrett_y, barrett_k,
				       barrett_r1, barrett_r2 );
	idx = build_index( exparray, k, i, t );
	gcry_assert (idx >= 0 && idx < (1<<k));
	if( !G[idx] ) {
	    if( !idx )
		 G[0] = mpi_alloc_set_ui( 1 );
	    else {
		for(j=0; j < k; j++ ) {
		    if( (idx & (1<<j) ) ) {
			if( !G[idx] )
			    G[idx] = mpi_copy( basearray[j] );
			else
			    barrett_mulm( G[idx], G[idx], basearray[j],
					       m, barrett_y, barrett_k, barrett_r1, barrett_r2	);
		    }
		}
		if( !G[idx] )
		    G[idx] = mpi_alloc(0);
	    }
	}
	barrett_mulm(res, tmp, G[idx], m, barrett_y, barrett_k, barrett_r1, barrett_r2	);
    }

    /* cleanup */
    mpi_free(tmp);
#ifdef USE_BARRETT
    mpi_free(barrett_y);
    mpi_free(barrett_r1);
    mpi_free(barrett_r2);
#endif
    for(i=0; i < (1<<k); i++ )
	mpi_free(G[i]);
    xfree(G);
}
Ejemplo n.º 19
0
/* Run a selftest for hash algorithm ALGO.  If the resulting digest
   matches EXPECT/EXPECTLEN and everything else is fine as well,
   return NULL.  If an error occurs, return a static text string
   describing the error.

   DATAMODE controls what will be hashed according to this table:

     0 - Hash the supplied DATA of DATALEN.
     1 - Hash one million times a 'a'.  DATA and DATALEN are ignored.

*/
const char *
_gcry_hash_selftest_check_one (int algo,
                               int datamode, const void *data, size_t datalen,
                               const void *expect, size_t expectlen)
{
  const char *result = NULL;
  gcry_error_t err = 0;
  gcry_md_hd_t hd;
  unsigned char *digest;
  char aaa[1000];
  int xof = 0;

  if (_gcry_md_get_algo_dlen (algo) == 0)
    xof = 1;
  else if (_gcry_md_get_algo_dlen (algo) != expectlen)
    return "digest size does not match expected size";

  err = _gcry_md_open (&hd, algo, 0);
  if (err)
    return "gcry_md_open failed";

  switch (datamode)
    {
    case 0:
      _gcry_md_write (hd, data, datalen);
      break;

    case 1: /* Hash one million times an "a". */
      {
        int i;

        /* Write in odd size chunks so that we test the buffering.  */
        memset (aaa, 'a', 1000);
        for (i = 0; i < 1000; i++)
          _gcry_md_write (hd, aaa, 1000);
      }
      break;

    default:
      result = "invalid DATAMODE";
    }

  if (!result)
    {
      if (!xof)
	{
	  digest = _gcry_md_read (hd, algo);

	  if ( memcmp (digest, expect, expectlen) )
	    result = "digest mismatch";
	}
      else
	{
	  gcry_assert(expectlen <= sizeof(aaa));

	  err = _gcry_md_extract (hd, algo, aaa, expectlen);
	  if (err)
	    result = "error extracting output from XOF";
	  else if ( memcmp (aaa, expect, expectlen) )
	    result = "digest mismatch";
	}
    }

  _gcry_md_close (hd);

  return result;
}
Ejemplo n.º 20
0
static void
slow_gatherer ( void (*add)(const void*, size_t, enum random_origins),
                enum random_origins requester )
{
  static int is_initialized = 0;
  static int is_workstation = 1;
  HANDLE hDevice;
  DWORD dwType, dwSize, dwResult;
  ULONG ulSize;
  int drive_no, status;
  int no_results = 0;
  void *buffer;

  if ( !is_initialized )
    {
      HKEY hKey;

      if ( debug_me )
        log_debug ("rndw32#slow_gatherer: init toolkit\n" );
      /* Find out whether this is an NT server or workstation if necessary */
      if (RegOpenKeyEx (HKEY_LOCAL_MACHINE,
                        "SYSTEM\\CurrentControlSet\\Control\\ProductOptions",
                        0, KEY_READ, &hKey) == ERROR_SUCCESS)
        {
          BYTE szValue[32 + 8];
          dwSize = 32;

          if ( debug_me )
            log_debug ("rndw32#slow_gatherer: check product options\n" );

          status = RegQueryValueEx (hKey, "ProductType", 0, NULL,
                                    szValue, &dwSize);
          if (status == ERROR_SUCCESS && stricmp (szValue, "WinNT"))
            {
              /* Note: There are (at least) three cases for ProductType:
                 WinNT = NT Workstation, ServerNT = NT Server, LanmanNT =
                 NT Server acting as a Domain Controller.  */
              is_workstation = 0;
              if ( debug_me )
                log_debug ("rndw32: this is a NT server\n");
            }
          RegCloseKey (hKey);
        }

      /* The following are fixed for the lifetime of the process so we
         only add them once */
      /* readPnPData ();  - we have not implemented that.  */

      /* Initialize the NetAPI32 function pointers if necessary */
      hNetAPI32 = LoadLibrary ("NETAPI32.DLL");
      if (hNetAPI32)
        {
          if (debug_me)
            log_debug ("rndw32#slow_gatherer: netapi32 loaded\n" );
          pNetStatisticsGet = (NETSTATISTICSGET)
            GetProcAddress (hNetAPI32, "NetStatisticsGet");
          pNetApiBufferSize = (NETAPIBUFFERSIZE)
            GetProcAddress (hNetAPI32, "NetApiBufferSize");
          pNetApiBufferFree = (NETAPIBUFFERFREE)
            GetProcAddress (hNetAPI32, "NetApiBufferFree");

          if (!pNetStatisticsGet || !pNetApiBufferSize || !pNetApiBufferFree)
            {
              FreeLibrary (hNetAPI32);
              hNetAPI32 = NULL;
              log_debug ("rndw32: No NETAPI found\n" );
            }
        }

      /* Initialize the NT kernel native API function pointers if necessary */
      hNTAPI = GetModuleHandle ("NTDll.dll");
      if (hNTAPI)
        {
          /* Get a pointer to the NT native information query functions */
          pNtQuerySystemInformation = (NTQUERYSYSTEMINFORMATION)
            GetProcAddress (hNTAPI, "NtQuerySystemInformation");
          pNtQueryInformationProcess = (NTQUERYINFORMATIONPROCESS)
            GetProcAddress (hNTAPI, "NtQueryInformationProcess");
          pNtPowerInformation = (NTPOWERINFORMATION)
            GetProcAddress(hNTAPI, "NtPowerInformation");

          if (!pNtQuerySystemInformation || !pNtQueryInformationProcess)
            hNTAPI = NULL;
        }


      is_initialized = 1;
    }

  read_system_rng ( add, requester );
  read_mbm_data ( add, requester );

  /* Get network statistics.    Note: Both NT Workstation and NT Server by
     default will be running both the workstation and server services.  The
     heuristic below is probably useful though on the assumption that the
     majority of the network traffic will be via the appropriate service.
     In any case the network statistics return almost no randomness.  */
  {
    LPBYTE lpBuffer;

    if (hNetAPI32
        && !pNetStatisticsGet (NULL,
                               is_workstation ? L"LanmanWorkstation" :
                               L"LanmanServer", 0, 0, &lpBuffer))
      {
        if ( debug_me )
          log_debug ("rndw32#slow_gatherer: get netstats\n" );
        pNetApiBufferSize (lpBuffer, &dwSize);
        (*add) ( lpBuffer, dwSize, requester );
        pNetApiBufferFree (lpBuffer);
      }
  }

  /* Get disk I/O statistics for all the hard drives.  100 is an
     arbitrary failsafe limit.  */
  for (drive_no = 0; drive_no < 100 ; drive_no++)
    {
      char diskPerformance[SIZEOF_DISK_PERFORMANCE_STRUCT + 8];
      char szDevice[50];

      /* Check whether we can access this device.  */
      snprintf (szDevice, sizeof szDevice, "\\\\.\\PhysicalDrive%d",
                drive_no);
      hDevice = CreateFile (szDevice, 0, FILE_SHARE_READ | FILE_SHARE_WRITE,
                            NULL, OPEN_EXISTING, 0, NULL);
      if (hDevice == INVALID_HANDLE_VALUE)
        break; /* No more drives.  */

      /* Note: This only works if you have turned on the disk performance
         counters with 'diskperf -y'.  These counters are off by default. */
      dwSize = sizeof diskPerformance;
      if (DeviceIoControl (hDevice, IOCTL_DISK_PERFORMANCE, NULL, 0,
                           diskPerformance, SIZEOF_DISK_PERFORMANCE_STRUCT,
                           &dwSize, NULL))
        {
          if ( debug_me )
            log_debug ("rndw32#slow_gatherer: iostat drive %d\n",
                       drive_no);
          (*add) (diskPerformance, dwSize, requester);
        }
      else
        {
          log_info ("NOTE: you should run 'diskperf -y' "
                    "to enable the disk statistics\n");
        }
      CloseHandle (hDevice);
    }

  /* In theory we should be using the Win32 performance query API to obtain
     unpredictable data from the system, however this is so unreliable (see
     the multiple sets of comments in registryPoll()) that it's too risky
     to rely on it except as a fallback in emergencies.  Instead, we rely
     mostly on the NT native API function NtQuerySystemInformation(), which
     has the dual advantages that it doesn't have as many (known) problems
     as the Win32 equivalent and that it doesn't access the data indirectly
     via pseudo-registry keys, which means that it's much faster.  Note
     that the Win32 equivalent actually works almost all of the time, the
     problem is that on one or two systems it can fail in strange ways that
     are never the same and can't be reproduced on any other system, which
     is why we use the native API here.  Microsoft officially documented
     this function in early 2003, so it'll be fairly safe to use.  */
  if ( !hNTAPI )
    {
      registry_poll (add, requester);
      return;
    }


  /* Scan the first 64 possible information types (we don't bother with
     increasing the buffer size as we do with the Win32 version of the
     performance data read, we may miss a few classes but it's no big deal).
     This scan typically yields around 20 pieces of data, there's nothing
     in the range 65...128 so chances are there won't be anything above
     there either.  */
  buffer = gcry_xmalloc (PERFORMANCE_BUFFER_SIZE);
  for (dwType = 0; dwType < 64; dwType++)
    {
      switch (dwType)
        {
          /* ID 17 = SystemObjectInformation hangs on some win2k systems.  */
        case 17:
          if (system_is_w2000)
            continue;
          break;

          /* Some information types are write-only (the IDs are shared with
             a set-information call), we skip these.  */
        case 26: case 27: case 38: case 46: case 47: case 48: case 52:
          continue;

          /* ID 53 = SystemSessionProcessInformation reads input from the
             output buffer, which has to contain a session ID and pointer
             to the actual buffer in which to store the session information.
             Because this isn't a standard query, we skip this.  */
        case  53:
          continue;
        }

      /* Query the info for this ID.  Some results (for example for
         ID = 6, SystemCallCounts) are only available in checked builds
         of the kernel.  A smaller subcless of results require that
         certain system config flags be set, for example
         SystemObjectInformation requires that the
         FLG_MAINTAIN_OBJECT_TYPELIST be set in NtGlobalFlags.  To avoid
         having to special-case all of these, we try reading each one and
         only use those for which we get a success status.  */
      dwResult = pNtQuerySystemInformation (dwType, buffer,
                                            PERFORMANCE_BUFFER_SIZE - 2048,
                                            &ulSize);
      if (dwResult != ERROR_SUCCESS)
        continue;

      /* Some calls (e.g. ID = 23, SystemProcessorStatistics, and ID = 24,
         SystemDpcInformation) incorrectly return a length of zero, so we
         manually adjust the length to the correct value.  */
      if ( !ulSize )
        {
          if (dwType == 23)
            ulSize = 6 * sizeof (ULONG);
          else if (dwType == 24)
            ulSize = 5 * sizeof (ULONG);
        }

      /* If we got some data back, add it to the entropy pool.  */
      if (ulSize > 0 && ulSize <= PERFORMANCE_BUFFER_SIZE - 2048)
        {
          if (debug_me)
            log_debug ("rndw32#slow_gatherer: %lu bytes from sysinfo %ld\n",
                       ulSize, dwType);
          (*add) (buffer, ulSize, requester);
          no_results++;
        }
    }

  /* Now we would do the same for the process information.  This
     call would rather ugly in that it requires an exact length
     match for the data returned, failing with a
     STATUS_INFO_LENGTH_MISMATCH error code (0xC0000004) if the
     length isn't an exact match.  It requires a compiler to handle
     complex nested structs, alignment issues, and so on, and
     without the headers in which the entries are declared it's
     almost impossible to do.  Thus we don't.  */


  /* Finally, do the same for the system power status information.  There
     are only a limited number of useful information types available so we
     restrict ourselves to the useful types.  In addition since this
     function doesn't return length information, we have to hardcode in
     length data.  */
  if (pNtPowerInformation)
    {
      static const struct { int type; int size; } powerInfo[] = {
        { 0, 128 },     /* SystemPowerPolicyAc */
        { 1, 128 },     /* SystemPowerPolicyDc */
        { 4, 64 },      /* SystemPowerCapabilities */
        { 5, 48 },      /* SystemBatteryState */
        { 11, 48 },     /* ProcessorInformation */
        { 12, 24 },     /* SystemPowerInformation */
        { -1, -1 }
      };
      int i;

      /* The 100 is a failsafe limit.  */
      for (i = 0; powerInfo[i].type != -1 && i < 100; i++ )
        {
          /* Query the info for this ID */
          dwResult = pNtPowerInformation (powerInfo[i].type, NULL, 0, buffer,
                                          PERFORMANCE_BUFFER_SIZE - 2048);
          if (dwResult != ERROR_SUCCESS)
            continue;
          if (debug_me)
            log_debug ("rndw32#slow_gatherer: %u bytes from powerinfo %d\n",
                       powerInfo[i].size, i);
          (*add) (buffer, powerInfo[i].size, requester);
          no_results++;
        }
      gcry_assert (i < 100);
    }
  gcry_free (buffer);

  /* We couldn't get enough results from the kernel, fall back to the
     somewhat troublesome registry poll.  */
  if (no_results < 15)
    registry_poll (add, requester);
}
Ejemplo n.º 21
0
void
_gcry_rndw32_gather_random_fast (void (*add)(const void*, size_t,
                                             enum random_origins),
                                 enum random_origins origin)
{
  static int addedFixedItems = 0;

  if ( debug_me )
    log_debug ("rndw32#gather_random_fast: ori=%d\n", origin );

  /* Get various basic pieces of system information: Handle of active
     window, handle of window with mouse capture, handle of clipboard
     owner handle of start of clpboard viewer list, pseudohandle of
     current process, current process ID, pseudohandle of current
     thread, current thread ID, handle of desktop window, handle of
     window with keyboard focus, whether system queue has any events,
     cursor position for last message, 1 ms time for last message,
     handle of window with clipboard open, handle of process heap,
     handle of procs window station, types of events in input queue,
     and milliseconds since Windows was started.  */

  {
    byte buffer[20*sizeof(ulong)], *bufptr;

    bufptr = buffer;
#define ADD(f)  do { ulong along = (ulong)(f);                  \
                     memcpy (bufptr, &along, sizeof (along) );  \
                     bufptr += sizeof (along);                  \
                   } while (0)

    ADD ( GetActiveWindow ());
    ADD ( GetCapture ());
    ADD ( GetClipboardOwner ());
    ADD ( GetClipboardViewer ());
    ADD ( GetCurrentProcess ());
    ADD ( GetCurrentProcessId ());
    ADD ( GetCurrentThread ());
    ADD ( GetCurrentThreadId ());
    ADD ( GetDesktopWindow ());
    ADD ( GetFocus ());
    ADD ( GetInputState ());
    ADD ( GetMessagePos ());
    ADD ( GetMessageTime ());
    ADD ( GetOpenClipboardWindow ());
    ADD ( GetProcessHeap ());
    ADD ( GetProcessWindowStation ());
    ADD ( GetQueueStatus (QS_ALLEVENTS));
    ADD ( GetTickCount ());

    gcry_assert ( bufptr-buffer < sizeof (buffer) );
    (*add) ( buffer, bufptr-buffer, origin );
#undef ADD
  }

  /* Get multiword system information: Current caret position, current
     mouse cursor position.  */
  {
    POINT point;

    GetCaretPos (&point);
    (*add) ( &point, sizeof (point), origin );
    GetCursorPos (&point);
    (*add) ( &point, sizeof (point), origin );
  }

  /* Get percent of memory in use, bytes of physical memory, bytes of
     free physical memory, bytes in paging file, free bytes in paging
     file, user bytes of address space, and free user bytes.  */
  {
    MEMORYSTATUS memoryStatus;

    memoryStatus.dwLength = sizeof (MEMORYSTATUS);
    GlobalMemoryStatus (&memoryStatus);
    (*add) ( &memoryStatus, sizeof (memoryStatus), origin );
  }

  /* Get thread and process creation time, exit time, time in kernel
     mode, and time in user mode in 100ns intervals.  */
  {
    HANDLE handle;
    FILETIME creationTime, exitTime, kernelTime, userTime;
    DWORD minimumWorkingSetSize, maximumWorkingSetSize;

    handle = GetCurrentThread ();
    GetThreadTimes (handle, &creationTime, &exitTime,
                    &kernelTime, &userTime);
    (*add) ( &creationTime, sizeof (creationTime), origin );
    (*add) ( &exitTime, sizeof (exitTime), origin );
    (*add) ( &kernelTime, sizeof (kernelTime), origin );
    (*add) ( &userTime, sizeof (userTime), origin );

    handle = GetCurrentProcess ();
    GetProcessTimes (handle, &creationTime, &exitTime,
                     &kernelTime, &userTime);
    (*add) ( &creationTime, sizeof (creationTime), origin );
    (*add) ( &exitTime, sizeof (exitTime), origin );
    (*add) ( &kernelTime, sizeof (kernelTime), origin );
    (*add) ( &userTime, sizeof (userTime), origin );

    /* Get the minimum and maximum working set size for the current
       process.  */
    GetProcessWorkingSetSize (handle, &minimumWorkingSetSize,
                              &maximumWorkingSetSize);
    (*add) ( &minimumWorkingSetSize,
             sizeof (minimumWorkingSetSize), origin );
    (*add) ( &maximumWorkingSetSize,
             sizeof (maximumWorkingSetSize), origin );
  }


  /* The following are fixed for the lifetime of the process so we only
   * add them once */
  if (!addedFixedItems)
    {
      STARTUPINFO startupInfo;

      /* Get name of desktop, console window title, new window
         position and size, window flags, and handles for stdin,
         stdout, and stderr.  */
      startupInfo.cb = sizeof (STARTUPINFO);
      GetStartupInfo (&startupInfo);
      (*add) ( &startupInfo, sizeof (STARTUPINFO), origin );
      addedFixedItems = 1;
    }

  /* The performance of QPC varies depending on the architecture it's
     running on and on the OS, the MS documentation is vague about the
     details because it varies so much.  Under Win9x/ME it reads the
     1.193180 MHz PIC timer.  Under NT/Win2K/XP it may or may not read the
     64-bit TSC depending on the HAL and assorted other circumstances,
     generally on machines with a uniprocessor HAL
     KeQueryPerformanceCounter() uses a 3.579545MHz timer and on machines
     with a multiprocessor or APIC HAL it uses the TSC (the exact time
     source is controlled by the HalpUse8254 flag in the kernel).  That
     choice of time sources is somewhat peculiar because on a
     multiprocessor machine it's theoretically possible to get completely
     different TSC readings depending on which CPU you're currently
     running on, while for uniprocessor machines it's not a problem.
     However, the kernel appears to synchronise the TSCs across CPUs at
     boot time (it resets the TSC as part of its system init), so this
     shouldn't really be a problem.  Under WinCE it's completely platform-
     dependant, if there's no hardware performance counter available, it
     uses the 1ms system timer.

     Another feature of the TSC (although it doesn't really affect us here)
     is that mobile CPUs will turn off the TSC when they idle, Pentiums
     will change the rate of the counter when they clock-throttle (to
     match the current CPU speed), and hyperthreading Pentiums will turn
     it off when both threads are idle (this more or less makes sense,
     since the CPU will be in the halted state and not executing any
     instructions to count).

     To make things unambiguous, we detect a CPU new enough to call RDTSC
     directly by checking for CPUID capabilities, and fall back to QPC if
     this isn't present.  */
#ifdef __GNUC__
/*   FIXME: We would need to implement the CPU feature tests first.  */
/*   if (cpu_has_feature_rdtsc) */
/*     { */
/*       uint32_t lo, hi; */
      /* We cannot use "=A", since this would use %rax on x86_64. */
/*       __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); */
      /* Ignore high 32 bits, hwich are >1s res.  */
/*       (*add) (&lo, 4, origin ); */
/*     } */
/*   else */
#endif /*!__GNUC__*/
    {
      LARGE_INTEGER performanceCount;

      if (QueryPerformanceCounter (&performanceCount))
        {
          if ( debug_me )
          log_debug ("rndw32#gather_random_fast: perf data\n");
          (*add) (&performanceCount, sizeof (performanceCount), origin);
        }
      else
        {
          /* Millisecond accuracy at best... */
          DWORD aword = GetTickCount ();
          (*add) (&aword, sizeof (aword), origin );
        }
    }


}
Ejemplo n.º 22
0
/* Check whether the OS is in FIPS mode and record that in a module
   local variable.  If FORCE is passed as true, fips mode will be
   enabled anyway. Note: This function is not thread-safe and should
   be called before any threads are created.  This function may only
   be called once.  */
void
_gcry_initialize_fips_mode (int force)
{
  static int done;
  gpg_error_t err;

  /* Make sure we are not accidentally called twice.  */
  if (done)
    {
      if ( fips_mode () )
        {
          fips_new_state (STATE_FATALERROR);
          fips_noreturn ();
        }
      /* If not in fips mode an assert is sufficient.  */
      gcry_assert (!done);
    }
  done = 1;

  /* If the calling application explicitly requested fipsmode, do so.  */
  if (force)
    {
      gcry_assert (!no_fips_mode_required);
      goto leave;
    }

  /* For testing the system it is useful to override the system
     provided detection of the FIPS mode and force FIPS mode using a
     file.  The filename is hardwired so that there won't be any
     confusion on whether /etc/gcrypt/ or /usr/local/etc/gcrypt/ is
     actually used.  The file itself may be empty.  */
  if ( !access (FIPS_FORCE_FILE, F_OK) )
    {
      gcry_assert (!no_fips_mode_required);
      goto leave;
    }

  /* Checking based on /proc file properties.  */
  {
    static const char procfname[] = "/proc/sys/crypto/fips_enabled";
    FILE *fp;
    int saved_errno;

    fp = fopen (procfname, "r");
    if (fp)
      {
        char line[256];

        if (fgets (line, sizeof line, fp) && atoi (line))
          {
            /* System is in fips mode.  */
            fclose (fp);
            gcry_assert (!no_fips_mode_required);
            goto leave;
          }
        fclose (fp);
      }
    else if ((saved_errno = errno) != ENOENT
             && saved_errno != EACCES
             && !access ("/proc/version", F_OK) )
      {
        /* Problem reading the fips file despite that we have the proc
           file system.  We better stop right away. */
        log_info ("FATAL: error reading `%s' in libgcrypt: %s\n",
                  procfname, strerror (saved_errno));
#ifdef HAVE_SYSLOG
        syslog (LOG_USER|LOG_ERR, "Libgcrypt error: "
                "reading `%s' failed: %s - abort",
                procfname, strerror (saved_errno));
#endif /*HAVE_SYSLOG*/
        abort ();
      }
  }

  /* Fips not not requested, set flag.  */
  no_fips_mode_required = 1;

 leave:
  if (!no_fips_mode_required)
    {
      /* Yes, we are in FIPS mode.  */
      FILE *fp;

      /* Intitialize the lock to protect the FSM.  */
      err = gpgrt_lock_init (&fsm_lock);
      if (err)
        {
          /* If that fails we can't do anything but abort the
             process. We need to use log_info so that the FSM won't
             get involved.  */
          log_info ("FATAL: failed to create the FSM lock in libgcrypt: %s\n",
                    gpg_strerror (err));
#ifdef HAVE_SYSLOG
          syslog (LOG_USER|LOG_ERR, "Libgcrypt error: "
                  "creating FSM lock failed: %s - abort",
                  gpg_strerror (err));
#endif /*HAVE_SYSLOG*/
          abort ();
        }


      /* If the FIPS force files exists, is readable and has a number
         != 0 on its first line, we enable the enforced fips mode.  */
      fp = fopen (FIPS_FORCE_FILE, "r");
      if (fp)
        {
          char line[256];

          if (fgets (line, sizeof line, fp) && atoi (line))
            enforced_fips_mode = 1;
          fclose (fp);
        }

      /* Now get us into the INIT state.  */
      fips_new_state (STATE_INIT);

    }
  return;
}
Ejemplo n.º 23
0
/*
 * Use the NBYTES at BUFFER_ARG to update A.  Set the sign of a to
 * SIGN.
 */
void
_gcry_mpi_set_buffer (gcry_mpi_t a, const void *buffer_arg,
                      unsigned int nbytes, int sign)
{
  const unsigned char *buffer = (const unsigned char*)buffer_arg;
  const unsigned char *p;
  mpi_limb_t alimb;
  int nlimbs;
  int i;

  nlimbs = (nbytes + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB;
  RESIZE_IF_NEEDED(a, nlimbs);
  a->sign = sign;

  for (i=0, p = buffer+nbytes-1; p >= buffer+BYTES_PER_MPI_LIMB; )
    {
#if BYTES_PER_MPI_LIMB == 4
      alimb  = *p--	    ;
      alimb |= *p-- <<  8 ;
      alimb |= *p-- << 16 ;
      alimb |= *p-- << 24 ;
#elif BYTES_PER_MPI_LIMB == 8
      alimb  = (mpi_limb_t)*p--	;
      alimb |= (mpi_limb_t)*p-- <<  8 ;
      alimb |= (mpi_limb_t)*p-- << 16 ;
      alimb |= (mpi_limb_t)*p-- << 24 ;
      alimb |= (mpi_limb_t)*p-- << 32 ;
      alimb |= (mpi_limb_t)*p-- << 40 ;
      alimb |= (mpi_limb_t)*p-- << 48 ;
      alimb |= (mpi_limb_t)*p-- << 56 ;
#else
#       error please implement for this limb size.
#endif
      a->d[i++] = alimb;
    }
  if ( p >= buffer )
    {
#if BYTES_PER_MPI_LIMB == 4
      alimb  = *p--;
      if (p >= buffer)
        alimb |= *p-- <<  8;
      if (p >= buffer)
        alimb |= *p-- << 16;
      if (p >= buffer)
        alimb |= *p-- << 24;
#elif BYTES_PER_MPI_LIMB == 8
      alimb  = (mpi_limb_t)*p--;
      if (p >= buffer)
        alimb |= (mpi_limb_t)*p-- << 8;
      if (p >= buffer)
        alimb |= (mpi_limb_t)*p-- << 16;
      if (p >= buffer)
        alimb |= (mpi_limb_t)*p-- << 24;
      if (p >= buffer)
        alimb |= (mpi_limb_t)*p-- << 32;
      if (p >= buffer)
        alimb |= (mpi_limb_t)*p-- << 40;
      if (p >= buffer)
        alimb |= (mpi_limb_t)*p-- << 48;
      if (p >= buffer)
        alimb |= (mpi_limb_t)*p-- << 56;
#else
#     error please implement for this limb size.
#endif
      a->d[i++] = alimb;
    }
  a->nlimbs = i;
  gcry_assert (i == nlimbs);
}
Ejemplo n.º 24
0
gcry_err_code_t
_gcry_cipher_ctl (gcry_cipher_hd_t h, int cmd, void *buffer, size_t buflen)
{
  gcry_err_code_t rc = 0;

  switch (cmd)
    {
    case GCRYCTL_RESET:
      cipher_reset (h);
      break;

    case GCRYCTL_FINALIZE:
      if (!h || buffer || buflen)
	return GPG_ERR_INV_ARG;
      h->marks.finalize = 1;
      break;

    case GCRYCTL_CFB_SYNC:
      cipher_sync( h );
      break;

    case GCRYCTL_SET_CBC_CTS:
      if (buflen)
	if (h->flags & GCRY_CIPHER_CBC_MAC)
	  rc = GPG_ERR_INV_FLAG;
	else
	  h->flags |= GCRY_CIPHER_CBC_CTS;
      else
	h->flags &= ~GCRY_CIPHER_CBC_CTS;
      break;

    case GCRYCTL_SET_CBC_MAC:
      if (buflen)
	if (h->flags & GCRY_CIPHER_CBC_CTS)
	  rc = GPG_ERR_INV_FLAG;
	else
	  h->flags |= GCRY_CIPHER_CBC_MAC;
      else
	h->flags &= ~GCRY_CIPHER_CBC_MAC;
      break;

    case GCRYCTL_SET_CCM_LENGTHS:
#ifdef HAVE_U64_TYPEDEF
      {
        u64 params[3];
        size_t encryptedlen;
        size_t aadlen;
        size_t authtaglen;

        if (h->mode != GCRY_CIPHER_MODE_CCM)
          return GPG_ERR_INV_CIPHER_MODE;

        if (!buffer || buflen != 3 * sizeof(u64))
          return GPG_ERR_INV_ARG;

        /* This command is used to pass additional length parameters needed
           by CCM mode to initialize CBC-MAC.  */
        memcpy (params, buffer, sizeof(params));
        encryptedlen = params[0];
        aadlen = params[1];
        authtaglen = params[2];

        rc = _gcry_cipher_ccm_set_lengths (h, encryptedlen, aadlen, authtaglen);
      }
#else
      rc = GPG_ERR_NOT_SUPPORTED;
#endif
      break;

    case GCRYCTL_SET_TAGLEN:
      if (!h || !buffer || buflen != sizeof(int) )
	return GPG_ERR_INV_ARG;
      switch (h->mode)
        {
        case GCRY_CIPHER_MODE_OCB:
          switch (*(int*)buffer)
            {
            case 8: case 12: case 16:
              h->u_mode.ocb.taglen = *(int*)buffer;
              break;
            default:
              rc = GPG_ERR_INV_LENGTH; /* Invalid tag length. */
              break;
            }
          break;

        default:
          rc =GPG_ERR_INV_CIPHER_MODE;
          break;
        }
      break;

    case GCRYCTL_DISABLE_ALGO:
      /* This command expects NULL for H and BUFFER to point to an
         integer with the algo number.  */
      if( h || !buffer || buflen != sizeof(int) )
	return GPG_ERR_CIPHER_ALGO;
      disable_cipher_algo( *(int*)buffer );
      break;

    case PRIV_CIPHERCTL_DISABLE_WEAK_KEY:  /* (private)  */
      if (h->spec->set_extra_info)
        rc = h->spec->set_extra_info
          (&h->context.c, CIPHER_INFO_NO_WEAK_KEY, NULL, 0);
      else
        rc = GPG_ERR_NOT_SUPPORTED;
      break;

    case PRIV_CIPHERCTL_GET_INPUT_VECTOR: /* (private)  */
      /* This is the input block as used in CFB and OFB mode which has
         initially been set as IV.  The returned format is:
           1 byte  Actual length of the block in bytes.
           n byte  The block.
         If the provided buffer is too short, an error is returned. */
      if (buflen < (1 + h->spec->blocksize))
        rc = GPG_ERR_TOO_SHORT;
      else
        {
          unsigned char *ivp;
          unsigned char *dst = buffer;
          int n = h->unused;

          if (!n)
            n = h->spec->blocksize;
          gcry_assert (n <= h->spec->blocksize);
          *dst++ = n;
          ivp = h->u_iv.iv + h->spec->blocksize - n;
          while (n--)
            *dst++ = *ivp++;
        }
      break;

    case GCRYCTL_SET_SBOX:
      if (h->spec->set_extra_info)
        rc = h->spec->set_extra_info
          (&h->context.c, GCRYCTL_SET_SBOX, buffer, buflen);
      else
        rc = GPG_ERR_NOT_SUPPORTED;
      break;

    default:
      rc = GPG_ERR_INV_OP;
    }

  return rc;
}
Ejemplo n.º 25
0
/* Note: This function requires LENGTH > 0.  */
static void
salsa20_do_encrypt_stream (SALSA20_context_t *ctx,
                           byte *outbuf, const byte *inbuf,
                           unsigned int length, unsigned rounds)
{
  unsigned int nburn, burn = 0;

  if (ctx->unused)
    {
      unsigned char *p = (void*)ctx->pad;
      unsigned int n;

      gcry_assert (ctx->unused < SALSA20_BLOCK_SIZE);

      n = ctx->unused;
      if (n > length)
        n = length;
      buf_xor (outbuf, inbuf, p + SALSA20_BLOCK_SIZE - ctx->unused, n);
      length -= n;
      outbuf += n;
      inbuf  += n;
      ctx->unused -= n;
      if (!length)
        return;
      gcry_assert (!ctx->unused);
    }

#ifdef USE_AMD64
  if (length >= SALSA20_BLOCK_SIZE)
    {
      unsigned int nblocks = length / SALSA20_BLOCK_SIZE;
      burn = _gcry_salsa20_amd64_encrypt_blocks(ctx->input, inbuf, outbuf,
                                                nblocks, rounds);
      length -= SALSA20_BLOCK_SIZE * nblocks;
      outbuf += SALSA20_BLOCK_SIZE * nblocks;
      inbuf  += SALSA20_BLOCK_SIZE * nblocks;
    }
#endif

#ifdef USE_ARM_NEON_ASM
  if (ctx->use_neon && length >= SALSA20_BLOCK_SIZE)
    {
      unsigned int nblocks = length / SALSA20_BLOCK_SIZE;
      _gcry_arm_neon_salsa20_encrypt (outbuf, inbuf, nblocks, ctx->input,
                                      rounds);
      length -= SALSA20_BLOCK_SIZE * nblocks;
      outbuf += SALSA20_BLOCK_SIZE * nblocks;
      inbuf  += SALSA20_BLOCK_SIZE * nblocks;
    }
#endif

  while (length > 0)
    {
      /* Create the next pad and bump the block counter.  Note that it
         is the user's duty to change to another nonce not later than
         after 2^70 processed bytes.  */
      nburn = ctx->core (ctx->pad, ctx, rounds);
      burn = nburn > burn ? nburn : burn;

      if (length <= SALSA20_BLOCK_SIZE)
	{
	  buf_xor (outbuf, inbuf, ctx->pad, length);
          ctx->unused = SALSA20_BLOCK_SIZE - length;
	  break;
	}
      buf_xor (outbuf, inbuf, ctx->pad, SALSA20_BLOCK_SIZE);
      length -= SALSA20_BLOCK_SIZE;
      outbuf += SALSA20_BLOCK_SIZE;
      inbuf  += SALSA20_BLOCK_SIZE;
    }

  _gcry_burn_stack (burn);
}