Esempio n. 1
0
static gpg_error_t
new_data (const char *string, struct secret_data_s **r_data)
{
  gpg_error_t err;
  struct secret_data_s *d, *d_enc;
  size_t length;
  int total;
  int res;

  *r_data = NULL;

  err = init_encryption ();
  if (err)
    return err;

  length = strlen (string) + 1;

  /* We pad the data to 32 bytes so that it get more complicated
     finding something out by watching allocation patterns.  This is
     usally not possible but we better assume nothing about our secure
     storage provider.  To support the AESWRAP mode we need to add 8
     extra bytes as well. */
  total = (length + 8) + 32 - ((length+8) % 32);

  d = xtrymalloc_secure (sizeof *d + total - 1);
  if (!d)
    return gpg_error_from_syserror ();
  memcpy (d->data, string, length);

  d_enc = xtrymalloc (sizeof *d_enc + total - 1);
  if (!d_enc)
    {
      err = gpg_error_from_syserror ();
      xfree (d);
      return err;
    }

  d_enc->totallen = total;
  res = npth_mutex_lock (&encryption_lock);
  if (res)
    log_fatal ("failed to acquire cache encryption mutex: %s\n",
               strerror (res));

  err = gcry_cipher_encrypt (encryption_handle, d_enc->data, total,
                             d->data, total - 8);
  xfree (d);
  res = npth_mutex_unlock (&encryption_lock);
  if (res)
    log_fatal ("failed to release cache encryption mutex: %s\n", strerror (res));
  if (err)
    {
      xfree (d_enc);
      return err;
    }
  *r_data = d_enc;
  return 0;
}
Esempio n. 2
0
File: app.c Progetto: Juul/gnupg
/* Release a lock on the reader.  See lock_reader(). */
static void
unlock_reader (int slot)
{
  int res;

  if (slot < 0 || slot >= DIM (lock_table)
      || !lock_table[slot].initialized)
    log_bug ("unlock_reader called for invalid slot %d\n", slot);

  apdu_set_progress_cb (slot, NULL, NULL);

  res = npth_mutex_unlock (&lock_table[slot].lock);
  if (res)
    log_error ("failed to release APP lock for slot %d: %s\n",
               slot, strerror (res));
}
Esempio n. 3
0
/* Unlock the pinentry so that another thread can start one and
   disconnect that pinentry - we do this after the unlock so that a
   stalled pinentry does not block other threads.  Fixme: We should
   have a timeout in Assuan for the disconnect operation. */
static int
unlock_pinentry (int rc)
{
  assuan_context_t ctx = entry_ctx;
  int err;

  entry_ctx = NULL;
  err = npth_mutex_unlock (&entry_lock);
  if (err)
    {
      log_error ("failed to release the entry lock: %s\n", strerror (err));
      if (!rc)
        rc = gpg_error_from_errno (err);
    }
  assuan_release (ctx);
  return rc;
}
Esempio n. 4
0
File: t-mutex.c Progetto: 1587/npth
int
main (int argc, char *argv[])
{
  int rc;
  npth_mutex_t mutex;

  rc = npth_init ();
  fail_if_err (rc);

  rc = npth_mutex_init (&mutex, NULL);
  fail_if_err (rc);
  rc = npth_mutex_lock (&mutex);
  fail_if_err (rc);
  rc = npth_mutex_unlock (&mutex);
  fail_if_err (rc);

  return 0;
}
Esempio n. 5
0
/* We do the encryption init on the fly.  We can't do it in the module
   init code because that is run before we listen for connections and
   in case we are started on demand by gpg etc. it will only wait for
   a few seconds to decide whether the agent may now accept
   connections.  Thus we should get into listen state as soon as
   possible.  */
static gpg_error_t
init_encryption (void)
{
  gpg_error_t err;
  void *key;
  int res;

  if (encryption_handle)
    return 0; /* Shortcut - Already initialized.  */

  res = npth_mutex_lock (&encryption_lock);
  if (res)
    log_fatal ("failed to acquire cache encryption mutex: %s\n", strerror (res));

  err = gcry_cipher_open (&encryption_handle, GCRY_CIPHER_AES128,
                          GCRY_CIPHER_MODE_AESWRAP, GCRY_CIPHER_SECURE);
  if (!err)
    {
      key = gcry_random_bytes (ENCRYPTION_KEYSIZE, GCRY_STRONG_RANDOM);
      if (!key)
        err = gpg_error_from_syserror ();
      else
        {
          err = gcry_cipher_setkey (encryption_handle, key, ENCRYPTION_KEYSIZE);
          xfree (key);
        }
      if (err)
        {
          gcry_cipher_close (encryption_handle);
          encryption_handle = NULL;
        }
    }
  if (err)
    log_error ("error initializing cache encryption context: %s\n",
               gpg_strerror (err));

  res = npth_mutex_unlock (&encryption_lock);
  if (res)
    log_fatal ("failed to release cache encryption mutex: %s\n", strerror (res));

  return err? gpg_error (GPG_ERR_NOT_INITIALIZED) : 0;
}
Esempio n. 6
0
File: t-thread.c Progetto: 1587/npth
static void *
thread_one (void *arg)
{
  int rc, i;

  info_msg ("thread-one started");
  npth_usleep (10);  /* Give the other thread some time to start.  */
  for (i=0; i < 10; i++)
    {
      /* We would not need the mutex here, but we use it to allow the
         system to switch to another thread.  */
      rc = npth_mutex_lock (&counter_mutex);
      fail_if_err (rc);

      counter++;

      rc = npth_mutex_unlock (&counter_mutex);
      fail_if_err (rc);
    }
  info_msg ("thread-one terminated");

  return (void*)4711;
}
Esempio n. 7
0
File: t-thread.c Progetto: 1587/npth
static void *
thread_two (void *arg)
{
  int rc, i;

  info_msg ("thread-two started");

  for (i=0; i < 10; i++)
    {
      rc = npth_mutex_lock (&counter_mutex);
      fail_if_err (rc);

      counter--;

      if (i == 5)
        {
          npth_t tid;

          info_msg ("creating thread-twoone");
          rc = npth_create (&tid, NULL, thread_twoone, NULL);
          fail_if_err (rc);
          npth_usleep (10);  /* Give new thread some time to start.  */
        }

      rc = npth_mutex_unlock (&counter_mutex);
      fail_if_err (rc);
    }

  info_msg ("busy waiting for thread twoone");
  while (!thread_twoone_ready)
    npth_sleep (0);

  info_msg ("thread-two terminated");

  return (void*)4722;
}
Esempio n. 8
0
/* Returns True is the pinentry is currently active. If WAITSECONDS is
   greater than zero the function will wait for this many seconds
   before returning.  */
int
pinentry_active_p (ctrl_t ctrl, int waitseconds)
{
  int err;
  (void)ctrl;

  if (waitseconds > 0)
    {
      struct timespec abstime;
      int rc;

      npth_clock_gettime (&abstime);
      abstime.tv_sec += waitseconds;
      err = npth_mutex_timedlock (&entry_lock, &abstime);
      if (err)
        {
          if (err == ETIMEDOUT)
            rc = gpg_error (GPG_ERR_TIMEOUT);
          else
            rc = gpg_error (GPG_ERR_INTERNAL);
          return rc;
        }
    }
  else
    {
      err = npth_mutex_trylock (&entry_lock);
      if (err)
        return gpg_error (GPG_ERR_LOCKED);
    }

  err = npth_mutex_unlock (&entry_lock);
  if (err)
    log_error ("failed to release the entry lock at %d: %s\n", __LINE__,
	       strerror (errno));
  return 0;
}
Esempio n. 9
0
/* Try to find an item in the cache.  Note that we currently don't
   make use of CACHE_MODE except for CACHE_MODE_NONCE and
   CACHE_MODE_USER.  */
char *
agent_get_cache (const char *key, cache_mode_t cache_mode)
{
  gpg_error_t err;
  ITEM r;
  char *value = NULL;
  int res;
  int last_stored = 0;

  if (cache_mode == CACHE_MODE_IGNORE)
    return NULL;

  if (!key)
    {
      key = last_stored_cache_key;
      if (!key)
        return NULL;
      last_stored = 1;
    }


  if (DBG_CACHE)
    log_debug ("agent_get_cache '%s' (mode %d)%s ...\n",
               key, cache_mode,
               last_stored? " (stored cache key)":"");
  housekeeping ();

  for (r=thecache; r; r = r->next)
    {
      if (r->pw
          && ((cache_mode != CACHE_MODE_USER
               && cache_mode != CACHE_MODE_NONCE)
              || r->cache_mode == cache_mode)
          && !strcmp (r->key, key))
        {
          /* Note: To avoid races KEY may not be accessed anymore below.  */
          r->accessed = gnupg_get_time ();
          if (DBG_CACHE)
            log_debug ("... hit\n");
          if (r->pw->totallen < 32)
            err = gpg_error (GPG_ERR_INV_LENGTH);
          else if ((err = init_encryption ()))
            ;
          else if (!(value = xtrymalloc_secure (r->pw->totallen - 8)))
            err = gpg_error_from_syserror ();
          else
            {
              res = npth_mutex_lock (&encryption_lock);
              if (res)
                log_fatal ("failed to acquire cache encryption mutex: %s\n",
			   strerror (res));
              err = gcry_cipher_decrypt (encryption_handle,
                                         value, r->pw->totallen - 8,
                                         r->pw->data, r->pw->totallen);
              res = npth_mutex_unlock (&encryption_lock);
              if (res)
                log_fatal ("failed to release cache encryption mutex: %s\n",
			   strerror (res));
            }
          if (err)
            {
              xfree (value);
              value = NULL;
              log_error ("retrieving cache entry '%s' failed: %s\n",
                         key, gpg_strerror (err));
            }
          return value;
        }
    }
  if (DBG_CACHE)
    log_debug ("... miss\n");

  return NULL;
}
Esempio n. 10
0
/* Check whether the Scdaemon is still alive and clean it up if not. */
void
agent_scd_check_aliveness (void)
{
  pid_t pid;
#ifdef HAVE_W32_SYSTEM
  DWORD rc;
#else
  int rc;
#endif
  struct timespec abstime;
  int err;

  if (!primary_scd_ctx)
    return; /* No scdaemon running. */

  /* This is not a critical function so we use a short timeout while
     acquiring the lock.  */
  npth_clock_gettime (&abstime);
  abstime.tv_sec += 1;
  err = npth_mutex_timedlock (&start_scd_lock, &abstime);
  if (err)
    {
      if (err == ETIMEDOUT)
        {
          if (opt.verbose > 1)
            log_info ("failed to acquire the start_scd lock while"
                      " doing an aliveness check: %s\n", strerror (err));
        }
      else
        log_error ("failed to acquire the start_scd lock while"
                   " doing an aliveness check: %s\n", strerror (err));
      return;
    }

  if (primary_scd_ctx)
    {
      pid = assuan_get_pid (primary_scd_ctx);
#ifdef HAVE_W32_SYSTEM
      /* If we have a PID we disconnect if either GetExitProcessCode
         fails or if ir returns the exit code of the scdaemon.  259 is
         the error code for STILL_ALIVE.  */
      if (pid != (pid_t)(void*)(-1) && pid
          && (!GetExitCodeProcess ((HANDLE)pid, &rc) || rc != 259))
#else
      if (pid != (pid_t)(-1) && pid
          && ((rc=waitpid (pid, NULL, WNOHANG))==-1 || (rc == pid)) )
#endif
        {
          /* Okay, scdaemon died.  Disconnect the primary connection
             now but take care that it won't do another wait. Also
             cleanup all other connections and release their
             resources.  The next use will start a new daemon then.
             Due to the use of the START_SCD_LOCAL we are sure that
             none of these context are actually in use. */
          struct scd_local_s *sl;

          assuan_set_flag (primary_scd_ctx, ASSUAN_NO_WAITPID, 1);
          assuan_release (primary_scd_ctx);

          for (sl=scd_local_list; sl; sl = sl->next_local)
            {
              if (sl->ctx)
                {
                  if (sl->ctx != primary_scd_ctx)
                    assuan_release (sl->ctx);
                  sl->ctx = NULL;
                }
            }

          primary_scd_ctx = NULL;
          primary_scd_ctx_reusable = 0;

          xfree (socket_name);
          socket_name = NULL;
        }
    }

  err = npth_mutex_unlock (&start_scd_lock);
  if (err)
    log_error ("failed to release the start_scd lock while"
               " doing the aliveness check: %s\n", strerror (err));
}
Esempio n. 11
0
/* Fork off the SCdaemon if this has not already been done.  Lock the
   daemon and make sure that a proper context has been setup in CTRL.
   This function might also lock the daemon, which means that the
   caller must call unlock_scd after this fucntion has returned
   success and the actual Assuan transaction been done. */
static int
start_scd (ctrl_t ctrl)
{
  gpg_error_t err = 0;
  const char *pgmname;
  assuan_context_t ctx = NULL;
  const char *argv[3];
  assuan_fd_t no_close_list[3];
  int i;
  int rc;

  if (opt.disable_scdaemon)
    return gpg_error (GPG_ERR_NOT_SUPPORTED);

  /* If this is the first call for this session, setup the local data
     structure. */
  if (!ctrl->scd_local)
    {
      ctrl->scd_local = xtrycalloc (1, sizeof *ctrl->scd_local);
      if (!ctrl->scd_local)
        return gpg_error_from_syserror ();
      ctrl->scd_local->ctrl_backlink = ctrl;
      ctrl->scd_local->next_local = scd_local_list;
      scd_local_list = ctrl->scd_local;
    }


  /* Assert that the lock count is as expected. */
  if (ctrl->scd_local->locked)
    {
      log_error ("start_scd: invalid lock count (%d)\n",
                 ctrl->scd_local->locked);
      return gpg_error (GPG_ERR_INTERNAL);
    }
  ctrl->scd_local->locked++;

  if (ctrl->scd_local->ctx)
    return 0; /* Okay, the context is fine.  We used to test for an
                 alive context here and do an disconnect.  Now that we
                 have a ticker function to check for it, it is easier
                 not to check here but to let the connection run on an
                 error instead. */


  /* We need to protect the following code. */
  rc = npth_mutex_lock (&start_scd_lock);
  if (rc)
    {
      log_error ("failed to acquire the start_scd lock: %s\n",
                 strerror (rc));
      return gpg_error (GPG_ERR_INTERNAL);
    }

  /* Check whether the pipe server has already been started and in
     this case either reuse a lingering pipe connection or establish a
     new socket based one. */
  if (primary_scd_ctx && primary_scd_ctx_reusable)
    {
      ctx = primary_scd_ctx;
      primary_scd_ctx_reusable = 0;
      if (opt.verbose)
        log_info ("new connection to SCdaemon established (reusing)\n");
      goto leave;
    }

  rc = assuan_new (&ctx);
  if (rc)
    {
      log_error ("can't allocate assuan context: %s\n", gpg_strerror (rc));
      err = rc;
      goto leave;
    }

  if (socket_name)
    {
      rc = assuan_socket_connect (ctx, socket_name, 0, 0);
      if (rc)
        {
          log_error ("can't connect to socket '%s': %s\n",
                     socket_name, gpg_strerror (rc));
          err = gpg_error (GPG_ERR_NO_SCDAEMON);
          goto leave;
        }

      if (opt.verbose)
        log_info ("new connection to SCdaemon established\n");
      goto leave;
    }

  if (primary_scd_ctx)
    {
      log_info ("SCdaemon is running but won't accept further connections\n");
      err = gpg_error (GPG_ERR_NO_SCDAEMON);
      goto leave;
    }

  /* Nope, it has not been started.  Fire it up now. */
  if (opt.verbose)
    log_info ("no running SCdaemon - starting it\n");

  if (fflush (NULL))
    {
#ifndef HAVE_W32_SYSTEM
      err = gpg_error_from_syserror ();
#endif
      log_error ("error flushing pending output: %s\n", strerror (errno));
      /* At least Windows XP fails here with EBADF.  According to docs
         and Wine an fflush(NULL) is the same as _flushall.  However
         the Wime implementaion does not flush stdin,stdout and stderr
         - see above.  Lets try to ignore the error. */
#ifndef HAVE_W32_SYSTEM
      goto leave;
#endif
    }

  if (!opt.scdaemon_program || !*opt.scdaemon_program)
    opt.scdaemon_program = gnupg_module_name (GNUPG_MODULE_NAME_SCDAEMON);
  if ( !(pgmname = strrchr (opt.scdaemon_program, '/')))
    pgmname = opt.scdaemon_program;
  else
    pgmname++;

  argv[0] = pgmname;
  argv[1] = "--multi-server";
  argv[2] = NULL;

  i=0;
  if (!opt.running_detached)
    {
      if (log_get_fd () != -1)
        no_close_list[i++] = assuan_fd_from_posix_fd (log_get_fd ());
      no_close_list[i++] = assuan_fd_from_posix_fd (fileno (stderr));
    }
  no_close_list[i] = ASSUAN_INVALID_FD;

  /* Connect to the scdaemon and perform initial handshaking.  Use
     detached flag so that under Windows SCDAEMON does not show up a
     new window.  */
  rc = assuan_pipe_connect (ctx, opt.scdaemon_program, argv,
			    no_close_list, atfork_cb, NULL,
                            ASSUAN_PIPE_CONNECT_DETACHED);
  if (rc)
    {
      log_error ("can't connect to the SCdaemon: %s\n",
                 gpg_strerror (rc));
      err = gpg_error (GPG_ERR_NO_SCDAEMON);
      goto leave;
    }

  if (opt.verbose)
    log_debug ("first connection to SCdaemon established\n");


  /* Get the name of the additional socket opened by scdaemon. */
  {
    membuf_t data;
    unsigned char *databuf;
    size_t datalen;

    xfree (socket_name);
    socket_name = NULL;
    init_membuf (&data, 256);
    assuan_transact (ctx, "GETINFO socket_name",
                     membuf_data_cb, &data, NULL, NULL, NULL, NULL);

    databuf = get_membuf (&data, &datalen);
    if (databuf && datalen)
      {
        socket_name = xtrymalloc (datalen + 1);
        if (!socket_name)
          log_error ("warning: can't store socket name: %s\n",
                     strerror (errno));
        else
          {
            memcpy (socket_name, databuf, datalen);
            socket_name[datalen] = 0;
            if (DBG_IPC)
              log_debug ("additional connections at '%s'\n", socket_name);
          }
      }
    xfree (databuf);
  }

  /* Tell the scdaemon we want him to send us an event signal.  We
     don't support this for W32CE.  */
#ifndef HAVE_W32CE_SYSTEM
  if (opt.sigusr2_enabled)
    {
      char buf[100];

#ifdef HAVE_W32_SYSTEM
      snprintf (buf, sizeof buf, "OPTION event-signal=%lx",
                (unsigned long)get_agent_scd_notify_event ());
#else
      snprintf (buf, sizeof buf, "OPTION event-signal=%d", SIGUSR2);
#endif
      assuan_transact (ctx, buf, NULL, NULL, NULL, NULL, NULL, NULL);
    }
#endif /*HAVE_W32CE_SYSTEM*/

  primary_scd_ctx = ctx;
  primary_scd_ctx_reusable = 0;

 leave:
  if (err)
    {
      unlock_scd (ctrl, err);
      if (ctx)
	assuan_release (ctx);
    }
  else
    {
      ctrl->scd_local->ctx = ctx;
    }
  rc = npth_mutex_unlock (&start_scd_lock);
  if (rc)
    log_error ("failed to release the start_scd lock: %s\n", strerror (rc));
  return err;
}