Esempio n. 1
0
/* Search the cache for a matching entry and return it when found.  If
   this fails search the negative cache and return (void *) -1 if this
   search was successful.  Otherwise return NULL.

   This function must be called with the read-lock held.  */
struct datahead *
cache_search (request_type type, void *key, size_t len,
	      struct database_dyn *table, uid_t owner)
{
  unsigned long int hash = __nis_hash (key, len) % table->head->module;

  unsigned long int nsearched = 0;
  struct datahead *result = NULL;

  ref_t work = table->head->array[hash];
  while (work != ENDREF)
    {
      ++nsearched;

      struct hashentry *here = (struct hashentry *) (table->data + work);

      if (type == here->type && len == here->len
	  && memcmp (key, table->data + here->key, len) == 0
	  && here->owner == owner)
	{
	  /* We found the entry.  Increment the appropriate counter.  */
	  struct datahead *dh
	    = (struct datahead *) (table->data + here->packet);

	  /* See whether we must ignore the entry.  */
	  if (dh->usable)
	    {
	      /* We do not synchronize the memory here.  The statistics
		 data is not crucial, we synchronize only once in a while
		 in the cleanup threads.  */
	      if (dh->notfound)
		++table->head->neghit;
	      else
		{
		  ++table->head->poshit;

		  if (dh->nreloads != 0)
		    dh->nreloads = 0;
		}

	      result = dh;
	      break;
	    }
	}

      work = here->next;
    }

  if (nsearched > table->head->maxnsearched)
    table->head->maxnsearched = nsearched;

  return result;
}
Esempio n. 2
0
/* Add a new entry to the cache.  The return value is zero if the function
   call was successful.

   This function must be called with the read-lock held.

   We modify the table but we nevertheless only acquire a read-lock.
   This is ok since we use operations which would be safe even without
   locking, given that the `prune_cache' function never runs.  Using
   the readlock reduces the chance of conflicts.  */
int
cache_add (int type, const void *key, size_t len, struct datahead *packet,
	   bool first, struct database_dyn *table,
	   uid_t owner, bool prune_wakeup)
{
  if (__builtin_expect (debug_level >= 2, 0))
    {
      const char *str;
      char buf[INET6_ADDRSTRLEN + 1];
      if (type == GETHOSTBYADDR || type == GETHOSTBYADDRv6)
	str = inet_ntop (type == GETHOSTBYADDR ? AF_INET : AF_INET6,
			 key, buf, sizeof (buf));
      else
	str = key;

      dbg_log (_("add new entry \"%s\" of type %s for %s to cache%s"),
	       str, serv2str[type], dbnames[table - dbs],
	       first ? _(" (first)") : "");
    }

  unsigned long int hash = __nis_hash (key, len) % table->head->module;
  struct hashentry *newp;

  newp = mempool_alloc (table, sizeof (struct hashentry), 0);
  /* If we cannot allocate memory, just do not do anything.  */
  if (newp == NULL)
    {
      /* If necessary mark the entry as unusable so that lookups will
	 not use it.  */
      if (first)
	packet->usable = false;

      return -1;
    }

  newp->type = type;
  newp->first = first;
  newp->len = len;
  newp->key = (char *) key - table->data;
  assert (newp->key + newp->len <= table->head->first_free);
  newp->owner = owner;
  newp->packet = (char *) packet - table->data;
  assert ((newp->packet & BLOCK_ALIGN_M1) == 0);

  /* Put the new entry in the first position.  */
  do
    newp->next = table->head->array[hash];
  while (atomic_compare_and_exchange_bool_acq (&table->head->array[hash],
					       (ref_t) ((char *) newp
							- table->data),
					       (ref_t) newp->next));

  /* Update the statistics.  */
  if (packet->notfound)
    ++table->head->negmiss;
  else if (first)
    ++table->head->posmiss;

  /* We depend on this value being correct and at least as high as the
     real number of entries.  */
  atomic_increment (&table->head->nentries);

  /* It does not matter that we are not loading the just increment
     value, this is just for statistics.  */
  unsigned long int nentries = table->head->nentries;
  if (nentries > table->head->maxnentries)
    table->head->maxnentries = nentries;

  if (table->persistent)
    // XXX async OK?
    msync ((void *) table->head,
	   (char *) &table->head->array[hash] - (char *) table->head
	   + sizeof (ref_t), MS_ASYNC);

  /* We do not have to worry about the pruning thread if we are
     re-adding the data since this is done by the pruning thread.  We
     also do not have to do anything in case this is not the first
     time the data is entered since different data heads all have the
     same timeout.  */
  if (first && prune_wakeup)
    {
      /* Perhaps the prune thread for the table is not running in a long
	 time.  Wake it if necessary.  */
      pthread_mutex_lock (&table->prune_lock);
      time_t next_wakeup = table->wakeup_time;
      bool do_wakeup = false;
      if (next_wakeup > packet->timeout + CACHE_PRUNE_INTERVAL)
	{
	  table->wakeup_time = packet->timeout;
	  do_wakeup = true;
	}
      pthread_mutex_unlock (&table->prune_lock);
      if (do_wakeup)
	pthread_cond_signal (&table->prune_cond);
    }

  return 0;
}
/* Try to get a file descriptor for the shared meory segment
   containing the database.  */
static struct mapped_database *
get_mapping (request_type type, const char *key,
	     struct mapped_database **mappedp)
{
  struct mapped_database *result = NO_MAPPING;
#ifdef SCM_RIGHTS
  const size_t keylen = strlen (key) + 1;
  char resdata[keylen];
  int saved_errno = errno;

  int mapfd = -1;

  /* Send the request.  */
  struct iovec iov[2];
  request_header req;

  int sock = open_socket ();
  if (sock < 0)
    goto out;

  req.version = NSCD_VERSION;
  req.type = type;
  req.key_len = keylen;

  iov[0].iov_base = &req;
  iov[0].iov_len = sizeof (req);
  iov[1].iov_base = (void *) key;
  iov[1].iov_len = keylen;

  if (__builtin_expect (TEMP_FAILURE_RETRY (__writev (sock, iov, 2))
			!= iov[0].iov_len + iov[1].iov_len, 0))
    /* We cannot even write the request.  */
    goto out_close2;

  /* Room for the data sent along with the file descriptor.  We expect
     the key name back.  */
  iov[0].iov_base = resdata;
  iov[0].iov_len = keylen;

  union
  {
    struct cmsghdr hdr;
    char bytes[CMSG_SPACE (sizeof (int))];
  } buf;
  struct msghdr msg = { .msg_iov = iov, .msg_iovlen = 1,
			.msg_control = buf.bytes,
			.msg_controllen = sizeof (buf) };
  struct cmsghdr *cmsg = CMSG_FIRSTHDR (&msg);

  cmsg->cmsg_level = SOL_SOCKET;
  cmsg->cmsg_type = SCM_RIGHTS;
  cmsg->cmsg_len = CMSG_LEN (sizeof (int));

  /* This access is well-aligned since BUF is correctly aligned for an
     int and CMSG_DATA preserves this alignment.  */
  *(int *) CMSG_DATA (cmsg) = -1;

  msg.msg_controllen = cmsg->cmsg_len;

  if (wait_on_socket (sock) <= 0)
    goto out_close2;

# ifndef MSG_NOSIGNAL
#  define MSG_NOSIGNAL 0
# endif
  if (__builtin_expect (TEMP_FAILURE_RETRY (__recvmsg (sock, &msg,
						       MSG_NOSIGNAL))
			!= keylen, 0))
    goto out_close2;

  mapfd = *(int *) CMSG_DATA (cmsg);

  if (__builtin_expect (CMSG_FIRSTHDR (&msg)->cmsg_len
			!= CMSG_LEN (sizeof (int)), 0))
    goto out_close;

  struct stat64 st;
  if (__builtin_expect (strcmp (resdata, key) != 0, 0)
      || __builtin_expect (fstat64 (mapfd, &st) != 0, 0)
      || __builtin_expect (st.st_size < sizeof (struct database_pers_head), 0))
    goto out_close;

  struct database_pers_head head;
  if (__builtin_expect (TEMP_FAILURE_RETRY (__pread (mapfd, &head,
						     sizeof (head), 0))
			!= sizeof (head), 0))
    goto out_close;

  if (__builtin_expect (head.version != DB_VERSION, 0)
      || __builtin_expect (head.header_size != sizeof (head), 0)
      /* This really should not happen but who knows, maybe the update
	 thread got stuck.  */
      || __builtin_expect (! head.nscd_certainly_running
			   && head.timestamp + MAPPING_TIMEOUT < time (NULL),
			   0))
    goto out_close;

  size_t size = (sizeof (head) + roundup (head.module * sizeof (ref_t), ALIGN)
		 + head.data_size);

  if (__builtin_expect (st.st_size < size, 0))
    goto out_close;

  /* The file is large enough, map it now.  */
  void *mapping = __mmap (NULL, size, PROT_READ, MAP_SHARED, mapfd, 0);
  if (__builtin_expect (mapping != MAP_FAILED, 1))
    {
      /* Allocate a record for the mapping.  */
      struct mapped_database *newp = malloc (sizeof (*newp));
      if (newp == NULL)
	{
	  /* Ugh, after all we went through the memory allocation failed.  */
	  __munmap (mapping, size);
	  goto out_close;
	}

      newp->head = mapping;
      newp->data = ((char *) mapping + head.header_size
		    + roundup (head.module * sizeof (ref_t), ALIGN));
      newp->mapsize = size;
      /* Set counter to 1 to show it is usable.  */
      newp->counter = 1;

      result = newp;
    }

 out_close:
  __close (mapfd);
 out_close2:
  __close (sock);
 out:
  __set_errno (saved_errno);
#endif	/* SCM_RIGHTS */

  struct mapped_database *oldval = *mappedp;
  *mappedp = result;

  if (oldval != NULL && atomic_decrement_val (&oldval->counter) == 0)
    __nscd_unmap (oldval);

  return result;
}


struct mapped_database *
__nscd_get_map_ref (request_type type, const char *name,
		    struct locked_map_ptr *mapptr, int *gc_cyclep)
{
  struct mapped_database *cur = mapptr->mapped;
  if (cur == NO_MAPPING)
    return cur;

  int cnt = 0;
  while (atomic_compare_and_exchange_val_acq (&mapptr->lock, 1, 0) != 0)
    {
      // XXX Best number of rounds?
      if (++cnt > 5)
	return NO_MAPPING;

      atomic_delay ();
    }

  cur = mapptr->mapped;

  if (__builtin_expect (cur != NO_MAPPING, 1))
    {
      /* If not mapped or timestamp not updated, request new map.  */
      if (cur == NULL
	  || (cur->head->nscd_certainly_running == 0
	      && cur->head->timestamp + MAPPING_TIMEOUT < time (NULL)))
	cur = get_mapping (type, name, &mapptr->mapped);

      if (__builtin_expect (cur != NO_MAPPING, 1))
	{
	  if (__builtin_expect (((*gc_cyclep = cur->head->gc_cycle) & 1) != 0,
				0))
	    cur = NO_MAPPING;
	  else
	    atomic_increment (&cur->counter);
	}
    }

  mapptr->lock = 0;

  return cur;
}


const struct datahead *
__nscd_cache_search (request_type type, const char *key, size_t keylen,
		     const struct mapped_database *mapped)
{
  unsigned long int hash = __nis_hash (key, keylen) % mapped->head->module;

  ref_t work = mapped->head->array[hash];
  while (work != ENDREF)
    {
      struct hashentry *here = (struct hashentry *) (mapped->data + work);

      if (type == here->type && keylen == here->len
	  && memcmp (key, mapped->data + here->key, keylen) == 0)
	{
	  /* We found the entry.  Increment the appropriate counter.  */
	  const struct datahead *dh
	    = (struct datahead *) (mapped->data + here->packet);

	  /* See whether we must ignore the entry or whether something
	     is wrong because garbage collection is in progress.  */
	  if (dh->usable && ((char *) dh + dh->allocsize
			     <= (char *) mapped->head + mapped->mapsize))
	    return dh;
	}

      work = here->next;
    }

  return NULL;
}