/* __ompc_expand_task_pool_default
 * Expand the task pool for a new team size. Simply a matter of add an extra
 * task queue per extra thread.
 */
omp_task_pool_t * __ompc_expand_task_pool_default(omp_task_pool_t *pool,
                                                      int new_team_size)
{
  int i;
  int old_team_size;
  omp_task_queue_level_t *per_thread;

  if (pool == NULL)
    return __ompc_create_task_pool(new_team_size);

  per_thread = &pool->level[PER_THREAD];

  old_team_size = pool->team_size;

  per_thread->num_queues = new_team_size * 2;
  per_thread->task_queue = aligned_realloc(
                              (void *) per_thread->task_queue,
                              sizeof(omp_queue_t) * old_team_size * 2,
                              sizeof(omp_queue_t) * new_team_size * 2,
                              CACHE_LINE_SIZE);
  Is_True(per_thread->task_queue != NULL,
      ("__ompc_expand_task_pool: couldn't expand the task pool"));

  for (i = old_team_size; i < new_team_size; i++) {
    __ompc_queue_init(&per_thread->task_queue[TIED_IDX(i)],
                      __omp_task_queue_num_slots);
    __ompc_queue_init(&per_thread->task_queue[UNTIED_IDX(i)],
                      __omp_task_queue_num_slots);
  }

  return pool;
}
Example #2
0
bool aio_init(size_t largest_request, bool buffered_io)
{
    buffering = buffered_io;
    if(buffering == false)
    {
        size_t allocate = largest_request + AIO_MAX_SECTOR_SIZE - 1;
        largest_request_pub = largest_request;

        read_buffer = (char *)aligned_realloc(read_buffer, allocate, AIO_MAX_SECTOR_SIZE);
        io_commit = (char *)aligned_realloc(io_commit, allocate, AIO_MAX_SECTOR_SIZE);
        write_buffer = (char *)aligned_realloc(write_buffer, allocate, AIO_MAX_SECTOR_SIZE);

        if(read_buffer == 0 || io_commit == 0 || write_buffer == 0)
        {
            abort("Error allocating memory - decrease -T and -K flags");
        }
    }    
    return true;
}
static inline void
__ompc_dyn_array_resize(omp_queue_t *q, int new_num_slots)
{
  unsigned int old_tail_index = q->tail_index;
  unsigned int head_index = q->head_index;
  int old_num_slots = q->num_slots;

  q->head = q->tail = q->slots = aligned_realloc((void *) q->slots,
      sizeof(omp_queue_slot_t) * old_num_slots,
      sizeof(omp_queue_slot_t) * new_num_slots,
      CACHE_LINE_SIZE);
  Is_True(q->slots != NULL, ("couldn't resize the queue"));

  if (old_tail_index < head_index) {
    memcpy(&q->slots[old_num_slots], &q->slots[0],
           (old_tail_index+1)*sizeof(omp_queue_slot_t));
    q->tail_index = old_tail_index + old_num_slots;
  }

  q->num_slots = new_num_slots;
}
Example #4
0
  inline void* aligned_realloc(void* ptr, std::size_t size, std::size_t alignment)
  {
    // Do we want to use built-ins special aligned free/alloc ?
    #if defined( _MSC_VER ) && !defined(BOOST_SIMD_MEMORY_NO_BUILTINS)

    std::size_t* const oldptr = static_cast<std::size_t*>(ptr)-1;

    if(ptr && !size)
    {
      ::_aligned_free(oldptr);
      return 0;
    }

    if(ptr && alignment == *oldptr)
    {
      std::size_t* fresh_ptr = static_cast<std::size_t*>(::_aligned_offset_realloc(oldptr, size+sizeof(std::size_t), alignment, sizeof(std::size_t)));
      if(!fresh_ptr)
        return 0;
      return fresh_ptr+1;
    }

    std::size_t* fresh_ptr = static_cast<std::size_t*>(::_aligned_offset_malloc(size+sizeof(std::size_t), alignment, sizeof(std::size_t)));
    if(!fresh_ptr)
      return 0;

    *fresh_ptr++ = alignment;

    if(ptr)
    {
      std::size_t const oldSize( ::_aligned_msize( oldptr, *oldptr, sizeof(std::size_t) ) );
      std::memcpy( fresh_ptr, ptr, std::min( size, oldSize ) );
      ::_aligned_free(oldptr);
    }
    return fresh_ptr;

    #elif (     defined( BOOST_SIMD_CONFIG_SUPPORT_POSIX_MEMALIGN )                                \
            ||  (defined( _GNU_SOURCE ) && defined(__linux) && !defined( __ANDROID__ ))            \
          )                                                                                        \
        && !defined(BOOST_SIMD_MEMORY_NO_BUILTINS)

    // Resizing to 0 free the pointer data and return
    if(size == 0)
    {
      ::free(ptr);
      return 0;
    }

    #if defined(__ANDROID__)
    // https://groups.google.com/forum/?fromgroups=#!topic/android-ndk/VCEUpMfSh_o
    std::size_t const oldSize( ::dlmalloc_usable_size( ptr ) );
    #elif defined(__APPLE__)
    std::size_t const oldSize( ::malloc_size( ptr ) );
    #else
    std::size_t const oldSize( ::malloc_usable_size( ptr ) );
    #endif

    if( simd::is_aligned(ptr, alignment) )
    {
      if( ( oldSize - size ) < BOOST_SIMD_REALLOC_SHRINK_THRESHOLD )
      {
        return ptr;
      }
      else
      {
        // FIXME: realloc will free the old memory if it moves.
        // if it moves to a non-aligned memory segment and the subsequent
        // memory allocation fails, we break the invariant
        ptr = ::realloc(ptr, size);
        if( simd::is_aligned(ptr, alignment) )
          return ptr;
      }
    }

    void* const fresh_ptr = aligned_malloc(size, alignment);
    if(!fresh_ptr)
      return 0;

    std::memcpy(fresh_ptr, ptr, std::min(size, oldSize));
    ::free(ptr);

    return fresh_ptr;

    #else

    return aligned_realloc(ptr, size, alignment, custom_realloc_fn);

    #endif
  }