int allocate(int size) {

    if(size + 1 > 512) {
        return -1;
    }

    int temp,ceil_size, req_free_list,req_free_list_size,start_addr;

    ceil_size = ceil2(size + 1);
    req_free_list = get_free_list(ceil_size);
    req_free_list_size = find_free_list_size(ceil_size);

    if(heap[req_free_list] == -1) {
        temp = split_rec(req_free_list, req_free_list_size, ceil_size, ceil_size);
        if(temp == 0) {
            return -1;
        }
    }

    temp = req_free_list;
    while(temp < req_free_list + req_free_list_size && heap[temp] != -1) {
        temp = temp + 1;
    }

    temp--;
    start_addr = heap[temp];
    heap[temp] = -1;
    heap[start_addr] = size;
    return start_addr;
}
void merge_buddies(int start_addr, int ceil_size) {
    if(ceil_size > 512) {
        return;
    }
    int buddy_free_index = find_free_buddy(start_addr, ceil_size);
    int free_list = get_free_list(ceil_size);
    int free_list_size = find_free_list_size(ceil_size);
    if(buddy_free_index == -1) {
        add_to_list(free_list, start_addr);
        return;
    }
    else {
        int parent_free_list = get_free_list(2*ceil_size);
        int new_start_addr = minimum(heap[buddy_free_index], start_addr);
        remove_from_list(free_list, free_list_size, heap[buddy_free_index]);
        merge_buddies(new_start_addr, 2 * ceil_size);
    }
}
Esempio n. 3
0
static mstate
internal_function
arena_get2 (mstate a_tsd, size_t size, mstate avoid_arena)
{
  mstate a;

  static size_t narenas_limit;

  a = get_free_list ();
  if (a == NULL)
    {
      /* Nothing immediately available, so generate a new arena.  */
      if (narenas_limit == 0)
        {
          if (mp_.arena_max != 0)
            narenas_limit = mp_.arena_max;
          else if (narenas > mp_.arena_test)
            {
              int n = 2; /*__get_nprocs (); */

              if (n >= 1)
                narenas_limit = NARENAS_FROM_NCORES (n);
              else
                /* We have no information about the system.  Assume two
                   cores.  */
                narenas_limit = NARENAS_FROM_NCORES (2);
            }
        }
    repeat:;
      size_t n = narenas;
      /* NB: the following depends on the fact that (size_t)0 - 1 is a
         very large number and that the underflow is OK.  If arena_max
         is set the value of arena_test is irrelevant.  If arena_test
         is set but narenas is not yet larger or equal to arena_test
         narenas_limit is 0.  There is no possibility for narenas to
         be too big for the test to always fail since there is not
         enough address space to create that many arenas.  */
      if (__glibc_unlikely (n <= narenas_limit - 1))
        {
          if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n))
            goto repeat;
          a = _int_new_arena (size);
	  if (__glibc_unlikely (a == NULL))
            catomic_decrement (&narenas);
        }
      else
        a = reused_arena (avoid_arena);
    }
  return a;
}
int find_free_buddy(int start_addr, int ceil_size) {
    int buddy;
    if(start_addr % (2 * ceil_size) == 0) {
        buddy = start_addr + ceil_size;
    }
    else {
        buddy = start_addr - ceil_size;
    }
    int free_list = get_free_list(ceil_size);
    int free_list_size = find_free_list_size(ceil_size);
    int i = free_list;
    while(i < free_list + free_list_size) {
        if(heap[i] == buddy) {
            return i;
        }
        i++;
    }
    return -1;
}
Esempio n. 5
0
static mstate
internal_function
arena_get2(mstate a_tsd, size_t size, mstate avoid_arena)
{
  mstate a;

#ifdef PER_THREAD
  static size_t narenas_limit;

  a = get_free_list ();
  if (a == NULL)
    {
      /* Nothing immediately available, so generate a new arena.  */
      if (narenas_limit == 0)
	{
	  if (mp_.arena_max != 0)
	    narenas_limit = mp_.arena_max;
	  else if (narenas > mp_.arena_test)
	    {
	      int n  = __get_nprocs ();

	      if (n >= 1)
		narenas_limit = NARENAS_FROM_NCORES (n);
	      else
		/* We have no information about the system.  Assume two
		   cores.  */
		narenas_limit = NARENAS_FROM_NCORES (2);
	    }
	}
    repeat:;
      size_t n = narenas;
      /* NB: the following depends on the fact that (size_t)0 - 1 is a
	 very large number and that the underflow is OK.  If arena_max
	 is set the value of arena_test is irrelevant.  If arena_test
	 is set but narenas is not yet larger or equal to arena_test
	 narenas_limit is 0.  There is no possibility for narenas to
	 be too big for the test to always fail since there is not
	 enough address space to create that many arenas.  */
      if (__builtin_expect (n <= narenas_limit - 1, 0))
	{
	  if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n))
	    goto repeat;
	  a = _int_new_arena (size);
	  if (__builtin_expect (a == NULL, 0))
	    catomic_decrement (&narenas);
	}
      else
	a = reused_arena (avoid_arena);
    }
#else
  if(!a_tsd)
    a = a_tsd = &main_arena;
  else {
    a = a_tsd->next;
    if(!a) {
      /* This can only happen while initializing the new arena. */
      (void)mutex_lock(&main_arena.mutex);
      THREAD_STAT(++(main_arena.stat_lock_wait));
      return &main_arena;
    }
  }

  /* Check the global, circularly linked list for available arenas. */
  bool retried = false;
 repeat:
  do {
    if(!mutex_trylock(&a->mutex)) {
      if (retried)
	(void)mutex_unlock(&list_lock);
      THREAD_STAT(++(a->stat_lock_loop));
      tsd_setspecific(arena_key, (void *)a);
      return a;
    }
    a = a->next;
  } while(a != a_tsd);

  /* If not even the list_lock can be obtained, try again.  This can
     happen during `atfork', or for example on systems where thread
     creation makes it temporarily impossible to obtain _any_
     locks. */
  if(!retried && mutex_trylock(&list_lock)) {
    /* We will block to not run in a busy loop.  */
    (void)mutex_lock(&list_lock);

    /* Since we blocked there might be an arena available now.  */
    retried = true;
    a = a_tsd;
    goto repeat;
  }

  /* Nothing immediately available, so generate a new arena.  */
  a = _int_new_arena(size);
  (void)mutex_unlock(&list_lock);
#endif

  return a;
}
Esempio n. 6
0
/*
Get an Ndb object.
Input:
hint_id: 0 = no hint, otherwise a hint of which Ndb object the thread
         used the last time.
a_db_name: NULL = don't check for database specific Ndb  object, otherwise
           a hint of which database is preferred.
Output:
hint_id: Returns id of Ndb object returned
Return value: Ndb object pointer
*/
Ndb*
NdbPool::get_ndb_object(Uint32 &hint_id,
                        const char* a_catalog_name,
                        const char* a_schema_name)
{
  Ndb* ret_ndb = NULL;
  Uint32 hash_entry = compute_hash(a_schema_name);
  NdbMutex_Lock(pool_mutex);
  while (1) {
    /*
    We start by checking if we can use the hinted Ndb object.
    */
    if ((ret_ndb = get_hint_ndb(hint_id, hash_entry)) != NULL) {
      break;
    }
    /*
    The hinted Ndb object was not free. We need to allocate another object.
    We start by checking for a free Ndb object connected to the same database.
    */
    if (a_schema_name && (ret_ndb = get_db_hash(hint_id,
                                                hash_entry,
                                                a_catalog_name,
                                                a_schema_name))) {
      break;
    }
    /*
    No Ndb object connected to the preferred database was found.
    We look for a free Ndb object in general.
    */
    if ((ret_ndb = get_free_list(hint_id, hash_entry)) != NULL) {
      break;
    }
    /*
    No free Ndb object was found. If we haven't allocated objects up until the
    maximum number yet then we can allocate a new Ndb object here.
    */
    if (m_no_of_objects < m_max_ndb_objects) {
      if (allocate_ndb(hint_id, a_catalog_name, a_schema_name)) {
        assert((ret_ndb = get_hint_ndb(hint_id, hash_entry)) != NULL);
        break;
      }
    }
    /*
    We need to wait until an Ndb object becomes
    available.
    */
    if ((ret_ndb = wait_free_ndb(hint_id)) != NULL) {
      break;
    }
    /*
    Not even after waiting were we able to get hold of an Ndb object. We 
    return NULL to indicate this problem.
    */
    ret_ndb = NULL;
    break;
  }
  NdbMutex_Unlock(pool_mutex);
  if (ret_ndb != NULL) {
    /*
    We need to set the catalog and schema name of the Ndb object before
    returning it to the caller.
    */
    ret_ndb->setCatalogName(a_catalog_name);
    ret_ndb->setSchemaName(a_schema_name);
  }
  return ret_ndb;
}