예제 #1
0
파일: weak-set.c 프로젝트: AtomicKity/guile
static int
is_acceptable_size_index (scm_t_weak_set *set, int size_index)
{
  int computed = compute_size_index (set);

  if (size_index == computed)
    /* We were going to grow or shrink, and allocating the new vector
       didn't change the target size.  */
    return 1;

  if (size_index == computed + 1)
    {
      /* We were going to enlarge the set, but allocating the new
         vector finalized some objects, making an enlargement
         unnecessary.  It might still be a good idea to use the larger
         set, though.  (This branch also gets hit if, while allocating
         the vector, some other thread was actively removing items from
         the set.  That is less likely, though.)  */
      unsigned long new_lower = hashset_size[size_index] / 5;

      return set->size > new_lower;
    }

  if (size_index == computed - 1)
    {
      /* We were going to shrink the set, but when we dropped the lock
         to allocate the new vector, some other thread added elements to
         the set.  */
      return 0;
    }

  /* The computed size differs from our newly allocated size by more
     than one size index -- recalculate.  */
  return 0;
}
예제 #2
0
파일: weak-set.c 프로젝트: AtomicKity/guile
static void
resize_set (scm_t_weak_set *set)
{
  scm_t_weak_entry *old_entries, *new_entries;
  int new_size_index;
  unsigned long old_size, new_size, old_k;

  do 
    {
      new_size_index = compute_size_index (set);
      if (new_size_index == set->size_index)
        return;
      new_size = hashset_size[new_size_index];
      new_entries = scm_gc_malloc_pointerless (new_size * sizeof(scm_t_weak_entry),
                                               "weak set");
    }
  while (!is_acceptable_size_index (set, new_size_index));

  old_entries = set->entries;
  old_size = set->size;

  memset (new_entries, 0, new_size * sizeof(scm_t_weak_entry));

  set->size_index = new_size_index;
  set->size = new_size;
  if (new_size_index <= set->min_size_index)
    set->lower = 0;
  else
    set->lower = new_size / 5;
  set->upper = 9 * new_size / 10;
  set->n_items = 0;
  set->entries = new_entries;

  for (old_k = 0; old_k < old_size; old_k++)
    {
      scm_t_weak_entry copy;
      unsigned long new_k, distance;

      if (!old_entries[old_k].hash)
        continue;
      
      copy_weak_entry (&old_entries[old_k], &copy);
      
      if (!copy.key)
        continue;
      
      new_k = hash_to_index (copy.hash, new_size);

      for (distance = 0; ; distance++, new_k = (new_k + 1) % new_size)
        {
          unsigned long other_hash = new_entries[new_k].hash;

          if (!other_hash)
            /* Found an empty entry. */
            break;

          /* Displace the entry if our distance is less, otherwise keep
             looking. */
          if (entry_distance (other_hash, new_k, new_size) < distance)
            {
              rob_from_rich (set, new_k);
              break;
            }
        }
          
      set->n_items++;
      new_entries[new_k].hash = copy.hash;
      new_entries[new_k].key = copy.key;

      if (SCM_HEAP_OBJECT_P (SCM_PACK (copy.key)))
        SCM_I_REGISTER_DISAPPEARING_LINK ((void **) &new_entries[new_k].key,
                                          (void *) new_entries[new_k].key);
    }
}
예제 #3
0
파일: weak-table.c 프로젝트: Card1nal/guile
static void
resize_table (scm_t_weak_table *table)
{
  scm_t_weak_entry *old_entries, *new_entries;
  int new_size_index;
  unsigned long old_size, new_size, old_k;

  do 
    {
      new_size_index = compute_size_index (table);
      if (new_size_index == table->size_index)
        return;
      new_size = hashtable_size[new_size_index];
      scm_i_pthread_mutex_unlock (&table->lock);
      /* Allocating memory might cause finalizers to run, which could
         run anything, so drop our lock to avoid deadlocks.  */
      new_entries = allocate_entries (new_size, table->kind);
      scm_i_pthread_mutex_unlock (&table->lock);
    }
  while (!is_acceptable_size_index (table, new_size_index));

  old_entries = table->entries;
  old_size = table->size;
  
  table->size_index = new_size_index;
  table->size = new_size;
  if (new_size_index <= table->min_size_index)
    table->lower = 0;
  else
    table->lower = new_size / 5;
  table->upper = 9 * new_size / 10;
  table->n_items = 0;
  table->entries = new_entries;

  for (old_k = 0; old_k < old_size; old_k++)
    {
      scm_t_weak_entry copy;
      unsigned long new_k, distance;

      if (!old_entries[old_k].hash)
        continue;
      
      copy_weak_entry (&old_entries[old_k], &copy);
      
      if (!copy.key || !copy.value)
        continue;
      
      new_k = hash_to_index (copy.hash, new_size);

      for (distance = 0; ; distance++, new_k = (new_k + 1) % new_size)
        {
          unsigned long other_hash = new_entries[new_k].hash;

          if (!other_hash)
            /* Found an empty entry. */
            break;

          /* Displace the entry if our distance is less, otherwise keep
             looking. */
          if (entry_distance (other_hash, new_k, new_size) < distance)
            {
              rob_from_rich (table, new_k);
              break;
            }
        }
          
      table->n_items++;
      new_entries[new_k].hash = copy.hash;
      new_entries[new_k].key = copy.key;
      new_entries[new_k].value = copy.value;

      register_disappearing_links (&new_entries[new_k],
                                   SCM_PACK (copy.key), SCM_PACK (copy.value),
                                   table->kind);
    }
}