Ejemplo n.º 1
0
/** Return an allocated memory item to its memory pool. */
void
mp_pool_release(void *item)
{
  mp_allocated_t *allocated = (void*) M2A(item);
  mp_chunk_t *chunk = allocated->in_chunk;

  ASSERT(chunk);
  ASSERT(chunk->magic == MP_CHUNK_MAGIC);
  ASSERT(chunk->n_allocated > 0);

  allocated->u.next_free = chunk->first_free;
  chunk->first_free = allocated;

  if (PREDICT_UNLIKELY(chunk->n_allocated == chunk->capacity)) {
    /* This chunk was full and is about to be used. */
    mp_pool_t *pool = chunk->pool;
    /* unlink from the full list  */
    if (chunk->prev)
      chunk->prev->next = chunk->next;
    if (chunk->next)
      chunk->next->prev = chunk->prev;
    if (chunk == pool->full_chunks)
      pool->full_chunks = chunk->next;

    /* link to the used list. */
    chunk->next = pool->used_chunks;
    chunk->prev = NULL;
    if (chunk->next)
      chunk->next->prev = chunk;
    pool->used_chunks = chunk;
  } else if (PREDICT_UNLIKELY(chunk->n_allocated == 1)) {
    /* This was used and is about to be empty. */
    mp_pool_t *pool = chunk->pool;

    /* Unlink from the used list */
    if (chunk->prev)
      chunk->prev->next = chunk->next;
    if (chunk->next)
      chunk->next->prev = chunk->prev;
    if (chunk == pool->used_chunks)
      pool->used_chunks = chunk->next;

    /* Link to the empty list */
    chunk->next = pool->empty_chunks;
    chunk->prev = NULL;
    if (chunk->next)
      chunk->next->prev = chunk;
    pool->empty_chunks = chunk;

    /* Reset the guts of this chunk to defragment it, in case it gets
     * used again. */
    chunk->first_free = NULL;
    chunk->next_mem = chunk->mem;

    ++pool->n_empty_chunks;
  }
  --chunk->n_allocated;
}
Ejemplo n.º 2
0
/** Allocate a chunk of <b>size</b> bytes of memory, and return a pointer to
 * result.  On error, log and terminate the process.  (Same as malloc(size),
 * but never returns NULL.)
 */
void *
tor_malloc_(size_t size)
{
  void *result;

  raw_assert(size < SIZE_T_CEILING);

#ifndef MALLOC_ZERO_WORKS
  /* Some libc mallocs don't work when size==0. Override them. */
  if (size==0) {
    size=1;
  }
#endif /* !defined(MALLOC_ZERO_WORKS) */

  result = raw_malloc(size);

  if (PREDICT_UNLIKELY(result == NULL)) {
    /* LCOV_EXCL_START */
    /* If these functions die within a worker process, they won't call
     * spawn_exit, but that's ok, since the parent will run out of memory soon
     * anyway. */
    raw_assert_unreached_msg("Out of memory on malloc(). Dying.");
    /* LCOV_EXCL_STOP */
  }
  return result;
}
Ejemplo n.º 3
0
/** Sort the used chunks in <b>pool</b> into descending order of fullness,
 * so that we preferentially fill up mostly full chunks before we make
 * nearly empty chunks less nearly empty. */
static void
mp_pool_sort_used_chunks(mp_pool_t *pool)
{
  int i, n=0, inverted=0;
  mp_chunk_t **chunks, *chunk;
  for (chunk = pool->used_chunks; chunk; chunk = chunk->next) {
    ++n;
    if (chunk->next && chunk->next->n_allocated > chunk->n_allocated)
      ++inverted;
  }
  if (!inverted)
    return;
  //printf("Sort %d/%d\n",inverted,n);
  chunks = ALLOC(sizeof(mp_chunk_t *)*n);
#ifdef ALLOC_CAN_RETURN_NULL
  if (PREDICT_UNLIKELY(!chunks)) return;
#endif
  for (i=0,chunk = pool->used_chunks; chunk; chunk = chunk->next)
    chunks[i++] = chunk;
  qsort(chunks, n, sizeof(mp_chunk_t *), mp_pool_sort_used_chunks_helper);
  pool->used_chunks = chunks[0];
  chunks[0]->prev = NULL;
  for (i=1;i<n;++i) {
    chunks[i-1]->next = chunks[i];
    chunks[i]->prev = chunks[i-1];
  }
  chunks[n-1]->next = NULL;
  FREE(chunks);
  mp_pool_assert_ok(pool);
}
Ejemplo n.º 4
0
static inline const ed25519_impl_t *
get_ed_impl(void)
{
  if (PREDICT_UNLIKELY(ed25519_impl == NULL)) {
    pick_ed25519_impl();
  }
  return ed25519_impl;
}
Ejemplo n.º 5
0
/** Return a pointer to the microdescriptor cache, loading it if necessary. */
microdesc_cache_t *
get_microdesc_cache(void)
{
  microdesc_cache_t *cache = get_microdesc_cache_noload();
  if (PREDICT_UNLIKELY(cache->is_loaded == 0)) {
    microdesc_cache_reload(cache);
  }
  return cache;
}
Ejemplo n.º 6
0
/** Create an empty nodelist if we haven't done so already. */
static void
init_nodelist(void)
{
  if (PREDICT_UNLIKELY(the_nodelist == NULL)) {
    the_nodelist = tor_malloc_zero(sizeof(nodelist_t));
    HT_INIT(nodelist_map, &the_nodelist->nodes_by_id);
    the_nodelist->nodes = smartlist_new();
  }
}
Ejemplo n.º 7
0
/** As node_get_by_id, but returns a non-const pointer */
node_t *
node_get_mutable_by_id(const char *identity_digest)
{
  node_t search, *node;
  if (PREDICT_UNLIKELY(the_nodelist == NULL))
    return NULL;

  memcpy(&search.identity, identity_digest, DIGEST_LEN);
  node = HT_FIND(nodelist_map, &the_nodelist->nodes_by_id, &search);
  return node;
}
Ejemplo n.º 8
0
/** Return a pointer to the microdescriptor cache, creating (but not loading)
 * it if necessary. */
static microdesc_cache_t *
get_microdesc_cache_noload(void)
{
  if (PREDICT_UNLIKELY(the_microdesc_cache==NULL)) {
    microdesc_cache_t *cache = tor_malloc_zero(sizeof(*cache));
    HT_INIT(microdesc_map, &cache->map);
    cache->cache_fname = get_datadir_fname("cached-microdescs");
    cache->journal_fname = get_datadir_fname("cached-microdescs.new");
    the_microdesc_cache = cache;
  }
  return the_microdesc_cache;
}
Ejemplo n.º 9
0
/** Find the first token in <b>s</b> whose keyword is <b>keyword</b>; fail
 * with an assert if no such keyword is found.
 */
directory_token_t *
find_by_keyword_(smartlist_t *s, directory_keyword keyword,
                 const char *keyword_as_string)
{
  directory_token_t *tok = find_opt_by_keyword(s, keyword);
  if (PREDICT_UNLIKELY(!tok)) {
    log_err(LD_BUG, "Missing %s [%d] in directory object that should have "
         "been validated. Internal error.", keyword_as_string, (int)keyword);
    tor_assert(tok);
  }
  return tok;
}
Ejemplo n.º 10
0
/**
 * Return a per-thread fast RNG, initializing it if necessary.
 *
 * You do not need to free this yourself.
 *
 * It is NOT safe to share this value across threads.
 **/
crypto_fast_rng_t *
get_thread_fast_rng(void)
{
  crypto_fast_rng_t *rng = tor_threadlocal_get(&thread_rng);

  if (PREDICT_UNLIKELY(rng == NULL)) {
    rng = crypto_fast_rng_new();
    tor_threadlocal_set(&thread_rng, rng);
  }

  return rng;
}
Ejemplo n.º 11
0
/** Return a newly allocated copy of the NUL-terminated string s. On
 * error, log and terminate.  (Like strdup(s), but never returns
 * NULL.)
 */
char *
tor_strdup_(const char *s)
{
  char *duplicate;
  raw_assert(s);

  duplicate = raw_strdup(s);

  if (PREDICT_UNLIKELY(duplicate == NULL)) {
    /* LCOV_EXCL_START */
    raw_assert_unreached_msg("Out of memory on strdup(). Dying.");
    /* LCOV_EXCL_STOP */
  }
  return duplicate;
}
Ejemplo n.º 12
0
/** Return an estimate of how many microseconds we will need for a single
 * cpuworker to to process <b>n_requests</b> onionskins of type
 * <b>onionskin_type</b>. */
uint64_t
estimated_usec_for_onionskins(uint32_t n_requests, uint16_t onionskin_type)
{
  if (onionskin_type > MAX_ONION_HANDSHAKE_TYPE) /* should be impossible */
    return 1000 * (uint64_t)n_requests;
  if (PREDICT_UNLIKELY(onionskins_n_processed[onionskin_type] < 100)) {
    /* Until we have 100 data points, just asssume everything takes 1 msec. */
    return 1000 * (uint64_t)n_requests;
  } else {
    /* This can't overflow: we'll never have more than 500000 onionskins
     * measured in onionskin_usec_internal, and they won't take anything near
     * 1 sec each, and we won't have anything like 1 million queued
     * onionskins.  But that's 5e5 * 1e6 * 1e6, which is still less than
     * UINT64_MAX. */
    return (onionskins_usec_internal[onionskin_type] * n_requests) /
      onionskins_n_processed[onionskin_type];
  }
}
Ejemplo n.º 13
0
/**
 * Extract <b>n</b> bytes from <b>rng</b> into the buffer at <b>out</b>.
 **/
void
crypto_fast_rng_getbytes(crypto_fast_rng_t *rng, uint8_t *out, size_t n)
{
  if (PREDICT_UNLIKELY(n > BUFLEN)) {
    /* The user has asked for a lot of output; generate it from a stream
     * cipher seeded by the PRNG rather than by pulling it out of the PRNG
     * directly.
     */
    uint8_t seed[SEED_LEN];
    crypto_fast_rng_getbytes_impl(rng, seed, SEED_LEN);
    crypto_cipher_t *c = cipher_from_seed(seed);
    memset(out, 0, n);
    crypto_cipher_crypt_inplace(c, (char*)out, n);
    crypto_cipher_free(c);
    memwipe(seed, 0, sizeof(seed));
    return;
  }

  crypto_fast_rng_getbytes_impl(rng, out, n);
}
Ejemplo n.º 14
0
/** Make sure that <b>sl</b> can hold at least <b>size</b> entries. */
static INLINE void
smartlist_ensure_capacity(smartlist_t *sl, int size)
{
#if SIZEOF_SIZE_T > SIZEOF_INT
#define MAX_CAPACITY (INT_MAX)
#else
#define MAX_CAPACITY (int)((SIZE_MAX / (sizeof(void*))))
#endif
  if (size > sl->capacity) {
    int higher = sl->capacity;
    if (PREDICT_UNLIKELY(size > MAX_CAPACITY/2)) {
      tor_assert(size <= MAX_CAPACITY);
      higher = MAX_CAPACITY;
    } else {
      while (size > higher)
        higher *= 2;
    }
    sl->capacity = higher;
    sl->list = tor_realloc(sl->list, sizeof(void*)*((size_t)sl->capacity));
  }
}
Ejemplo n.º 15
0
/** Make sure that <b>sl</b> can hold at least <b>size</b> entries. */
static inline void
smartlist_ensure_capacity(smartlist_t *sl, int size)
{
/*#if SIZEOF_SIZE_T > SIZEOF_INT*/
#define MAX_CAPACITY (INT_MAX)
/*#else*/
/*#define MAX_CAPACITY (uint64_t)((SIZE_MAX / (sizeof(void*))))*/
/*#endif*/
  if (size > sl->capacity) {
    int higher = sl->capacity;
    if (PREDICT_UNLIKELY((uint64_t)size > MAX_CAPACITY/2)) {
      assert(size <= MAX_CAPACITY);
      higher = MAX_CAPACITY;
    } else {
      while (size > higher)
        higher *= 2;
    }
    sl->capacity = higher;
    sl->list = (void **) realloc(sl->list, sizeof(void*)*((size_t)sl->capacity));
  }
}
Ejemplo n.º 16
0
STATIC int
curve25519_basepoint_impl(uint8_t *output, const uint8_t *secret)
{
  int r = 0;
  if (PREDICT_UNLIKELY(curve25519_use_ed == -1)) {
    pick_curve25519_basepoint_impl();
  }

  /* TODO: Someone should benchmark curved25519_scalarmult_basepoint versus
   * an optimized NaCl build to see which should be used when compiled with
   * NaCl available.  I suspected that the ed25519 optimization always wins.
   */
  if (PREDICT_LIKELY(curve25519_use_ed == 1)) {
    curved25519_scalarmult_basepoint_donna(output, secret);
    r = 0;
  } else {
    static const uint8_t basepoint[32] = {9};
    r = curve25519_impl(output, secret, basepoint);
  }
  return r;
}
Ejemplo n.º 17
0
/** Change the size of the memory block pointed to by <b>ptr</b> to <b>size</b>
 * bytes long; return the new memory block.  On error, log and
 * terminate. (Like realloc(ptr,size), but never returns NULL.)
 */
void *
tor_realloc_(void *ptr, size_t size)
{
  void *result;

  raw_assert(size < SIZE_T_CEILING);

#ifndef MALLOC_ZERO_WORKS
  /* Some libc mallocs don't work when size==0. Override them. */
  if (size==0) {
    size=1;
  }
#endif /* !defined(MALLOC_ZERO_WORKS) */

  result = raw_realloc(ptr, size);

  if (PREDICT_UNLIKELY(result == NULL)) {
    /* LCOV_EXCL_START */
    raw_assert_unreached_msg("Out of memory on realloc(). Dying.");
    /* LCOV_EXCL_STOP */
  }
  return result;
}
Ejemplo n.º 18
0
/** Return a newly allocated item from <b>pool</b>. */
void *
mp_pool_get(mp_pool_t *pool)
{
  mp_chunk_t *chunk;
  mp_allocated_t *allocated;

  if (PREDICT_LIKELY(pool->used_chunks != NULL)) {
    /* Common case: there is some chunk that is neither full nor empty.  Use
     * that one. (We can't use the full ones, obviously, and we should fill
     * up the used ones before we start on any empty ones. */
    chunk = pool->used_chunks;

  } else if (pool->empty_chunks) {
    /* We have no used chunks, but we have an empty chunk that we haven't
     * freed yet: use that.  (We pull from the front of the list, which should
     * get us the most recently emptied chunk.) */
    chunk = pool->empty_chunks;

    /* Remove the chunk from the empty list. */
    pool->empty_chunks = chunk->next;
    if (chunk->next)
      chunk->next->prev = NULL;

    /* Put the chunk on the 'used' list*/
    add_newly_used_chunk_to_used_list(pool, chunk);

    ASSERT(!chunk->prev);
    --pool->n_empty_chunks;
    if (pool->n_empty_chunks < pool->min_empty_chunks)
      pool->min_empty_chunks = pool->n_empty_chunks;
  } else {
    /* We have no used or empty chunks: allocate a new chunk. */
    chunk = mp_chunk_new(pool);
    CHECK_ALLOC(chunk);

    /* Add the new chunk to the used list. */
    add_newly_used_chunk_to_used_list(pool, chunk);
  }

  ASSERT(chunk->n_allocated < chunk->capacity);

  if (chunk->first_free) {
    /* If there's anything on the chunk's freelist, unlink it and use it. */
    allocated = chunk->first_free;
    chunk->first_free = allocated->u.next_free;
    allocated->u.next_free = NULL; /* For debugging; not really needed. */
    ASSERT(allocated->in_chunk == chunk);
  } else {
    /* Otherwise, the chunk had better have some free space left on it. */
    ASSERT(chunk->next_mem + pool->item_alloc_size <=
           chunk->mem + chunk->mem_size);

    /* Good, it did.  Let's carve off a bit of that free space, and use
     * that. */
    allocated = (void*)chunk->next_mem;
    chunk->next_mem += pool->item_alloc_size;
    allocated->in_chunk = chunk;
    allocated->u.next_free = NULL; /* For debugging; not really needed. */
  }

  ++chunk->n_allocated;
#ifdef MEMPOOL_STATS
  ++pool->total_items_allocated;
#endif

  if (PREDICT_UNLIKELY(chunk->n_allocated == chunk->capacity)) {
    /* This chunk just became full. */
    ASSERT(chunk == pool->used_chunks);
    ASSERT(chunk->prev == NULL);

    /* Take it off the used list. */
    pool->used_chunks = chunk->next;
    if (chunk->next)
      chunk->next->prev = NULL;

    /* Put it on the full list. */
    chunk->next = pool->full_chunks;
    if (chunk->next)
      chunk->next->prev = chunk;
    pool->full_chunks = chunk;
  }
  /* And return the memory portion of the mp_allocated_t. */
  return A2M(allocated);
}