示例#1
0
  cudaError_t malloc(void** devPtr, size_t size, cudaStream_t stream)
  {
    std::lock_guard<std::mutex> lock(mutex);

    int device;
    cudaError_t err = cudaGetDevice(&device);
    if (err != cudaSuccess) {
      return err;
    }

    size = round_size(size);
    bool small = size <= kSmallAlloc;

    Block search_key(device, stream, size);
    auto& free_blocks = small ? large_blocks : small_blocks;

    Block* block = NULL;
    Block* remaining = NULL;

    auto it = free_blocks.lower_bound(&search_key);
    if (it != free_blocks.end() && (*it)->device == device && (*it)->stream == stream) {
      block = *it;
      free_blocks.erase(it);
    } else {
      void* ptr;
      size_t alloc_size = small ? kSmallAlloc : size;
      cudaError_t err = cuda_malloc_retry(device, &ptr, alloc_size);
      if (err != cudaSuccess) {
        return err;
      }
      block = new Block(device, stream, alloc_size, (char*)ptr);
    }

    if (block->size - size >= (small ? kRoundSmall : kSmallAlloc + 1)) {
      remaining = block;

      block = new Block(device, stream, size, block->ptr);
      block->prev = remaining->prev;
      if (block->prev) {
        block->prev->next = block;
      }
      block->next = remaining;

      remaining->prev = block;
      remaining->ptr += size;
      remaining->size -= size;
      free_blocks.insert(remaining);
    }

    block->allocated = true;
    allocated_blocks[block->ptr] = block;

    *devPtr = (void*)block->ptr;
    return cudaSuccess;
  }
示例#2
0
static int parse_event(uint8_t *rptr, MSFilter **f, unsigned int *id, void **data, int *argsize){
	int evsize;
	int header_size = sizeof(MSEventHeader);

	if (((intptr_t)rptr % 4) != 0) ms_fatal("Unaligned access");
	*f = ((MSEventHeader *)rptr)->filter;
	*id = ((MSEventHeader *)rptr)->ev_id;

	*argsize = (*id) & 0xff;
	evsize = round_size((*argsize)) + header_size;
	*data = rptr + header_size;
	return evsize;
}
示例#3
0
_Py_hashtable_t *
_Py_hashtable_new_full(size_t data_size, size_t init_size,
                       _Py_hashtable_hash_func hash_func,
                       _Py_hashtable_compare_func compare_func,
                       _Py_hashtable_copy_data_func copy_data_func,
                       _Py_hashtable_free_data_func free_data_func,
                       _Py_hashtable_get_data_size_func get_data_size_func,
                       _Py_hashtable_allocator_t *allocator)
{
    _Py_hashtable_t *ht;
    size_t buckets_size;
    _Py_hashtable_allocator_t alloc;

    if (allocator == NULL) {
        alloc.malloc = PyMem_RawMalloc;
        alloc.free = PyMem_RawFree;
    }
    else
        alloc = *allocator;

    ht = (_Py_hashtable_t *)alloc.malloc(sizeof(_Py_hashtable_t));
    if (ht == NULL)
        return ht;

    ht->num_buckets = round_size(init_size);
    ht->entries = 0;
    ht->data_size = data_size;

    buckets_size = ht->num_buckets * sizeof(ht->buckets[0]);
    ht->buckets = alloc.malloc(buckets_size);
    if (ht->buckets == NULL) {
        alloc.free(ht);
        return NULL;
    }
    memset(ht->buckets, 0, buckets_size);

    ht->hash_func = hash_func;
    ht->compare_func = compare_func;
    ht->copy_data_func = copy_data_func;
    ht->free_data_func = free_data_func;
    ht->get_data_size_func = get_data_size_func;
    ht->alloc = alloc;
    return ht;
}
示例#4
0
static void
hashtable_rehash(_Py_hashtable_t *ht)
{
    size_t buckets_size, new_size, bucket;
    _Py_slist_t *old_buckets = NULL;
    size_t old_num_buckets;

    new_size = round_size((size_t)(ht->entries * HASHTABLE_REHASH_FACTOR));
    if (new_size == ht->num_buckets)
        return;

    old_num_buckets = ht->num_buckets;

    buckets_size = new_size * sizeof(ht->buckets[0]);
    old_buckets = ht->buckets;
    ht->buckets = ht->alloc.malloc(buckets_size);
    if (ht->buckets == NULL) {
        /* cancel rehash on memory allocation failure */
        ht->buckets = old_buckets ;
        /* memory allocation failed */
        return;
    }
    memset(ht->buckets, 0, buckets_size);

    ht->num_buckets = new_size;

    for (bucket = 0; bucket < old_num_buckets; bucket++) {
        _Py_hashtable_entry_t *entry, *next;
        for (entry = BUCKETS_HEAD(old_buckets[bucket]); entry != NULL; entry = next) {
            size_t entry_index;

            assert(ht->hash_func(entry->key) == entry->key_hash);
            next = ENTRY_NEXT(entry);
            entry_index = entry->key_hash & (new_size - 1);

            _Py_slist_prepend(&ht->buckets[entry_index], (_Py_slist_item_t*)entry);
        }
    }

    ht->alloc.free(old_buckets);
}
示例#5
0
static void write_event(MSEventQueue *q, MSFilter *f, unsigned int ev_id, void *arg){
	int argsize=ev_id & 0xff;
	int size=round_size(argsize);
	uint8_t *nextpos;
	int header_size = sizeof(MSEventHeader);
	size += header_size;
	ms_mutex_lock(&q->mutex);
	nextpos=q->wptr+size;

	if (q->freeroom<size){
		ms_mutex_unlock(&q->mutex);
		ms_error("Dropped event, no more free space in event buffer !");
		return;
	}

	if (nextpos>q->lim){
		/* need to wrap around */
		q->endptr=q->wptr;
		q->wptr=q->buffer;
		nextpos=q->wptr+size;
	}

	if (((intptr_t)q->wptr % 4) != 0) ms_fatal("Unaligned access");
	((MSEventHeader *)q->wptr)->filter = f;
	((MSEventHeader *)q->wptr)->ev_id = ev_id;

	if (argsize > 0) memcpy(q->wptr + header_size, arg, argsize);
	q->wptr=nextpos;

	/* buffer actual size(q->endptr) may have grown within the limit q->lim, prevent unwanted reading reset to the begining by setting the actual endptr */
	if (nextpos>q->endptr) {
		q->endptr=nextpos;
	}

	q->freeroom-=size;
	ms_mutex_unlock(&q->mutex);
}