Esempio n. 1
0
void tds_vec_retain(tds_vec *vec)
{
#if HAS_TORCH
  THAtomicIncrementRef(&vec->refcount);
#else
  vec->refcount++;
#endif
}
Esempio n. 2
0
TensorWrapper::TensorWrapper(GpuMatT & matT, THCState *state) {

    if (matT.tensor != nullptr) {
        // Mat is already constructed on another Tensor, so return that
        this->tensorPtr = reinterpret_cast<THByteTensor *>(matT.tensor);
        this->definedInLua = true;
        this->typeCode = static_cast<char>(matT.mat.depth());
        THAtomicIncrementRef(&this->tensorPtr->storage->refcount);
    } else {
        new (this) TensorWrapper(matT.mat, state);
    }
}
Esempio n. 3
0
void transfer_tensor_CUDA(THCState *state, THCudaTensor *dst, struct TensorWrapper srcWrapper) {

    THCudaTensor *src = reinterpret_cast<THCudaTensor *>(srcWrapper.tensorPtr);

    dst->nDimension = src->nDimension;
    dst->refcount = src->refcount;

    dst->storage = src->storage;

    if (!srcWrapper.definedInLua) {
        // Don't let Torch deallocate size and stride arrays
        dst->size = src->size;
        dst->stride = src->stride;
        src->size = nullptr;
        src->stride = nullptr;
        THAtomicIncrementRef(&src->storage->refcount);
        THCudaTensor_free(state, src);
    } else {
        dst->size   = static_cast<long *>(THAlloc(sizeof(long) * dst->nDimension));
        dst->stride = static_cast<long *>(THAlloc(sizeof(long) * dst->nDimension));
        memcpy(dst->size,   src->size,   src->nDimension * sizeof(long));
        memcpy(dst->stride, src->stride, src->nDimension * sizeof(long));
    }
}
Esempio n. 4
0
static void * THRefcountedMapAllocator_alloc(void *_ctx, ptrdiff_t size) {
  THMapAllocatorContext *ctx = _ctx;

  if (ctx->flags & TH_ALLOCATOR_MAPPED_FROMFD)
    THError("THRefcountedMapAllocator doesn't support TH_ALLOCATOR_MAPPED_FROMFD flag");
  if (ctx->flags & TH_ALLOCATOR_MAPPED_KEEPFD)
    THError("THRefcountedMapAllocator doesn't support TH_ALLOCATOR_MAPPED_KEEPFD flag");
  if (ctx->flags & TH_ALLOCATOR_MAPPED_UNLINK)
    THError("THRefcountedMapAllocator doesn't support TH_ALLOCATOR_MAPPED_UNLINK flag");
  if (!(ctx->flags & TH_ALLOCATOR_MAPPED_SHAREDMEM))
    THError("THRefcountedMapAllocator requires TH_ALLOCATOR_MAPPED_SHAREDMEM flag");

  size = size + TH_ALLOC_ALIGNMENT;
  void *ptr = _map_alloc(ctx, size);
  char *data = ((char*)ptr) + TH_ALLOC_ALIGNMENT;
  THMapInfo *map_info = (THMapInfo*)ptr;

  if (ctx->flags & TH_ALLOCATOR_MAPPED_EXCLUSIVE)
    map_info->refcount = 1;
  else
    THAtomicIncrementRef(&map_info->refcount);

  return (void*)data;
}
Esempio n. 5
0
static int queue_retain(lua_State *L)
{
  THQueue *queue = luaTHRD_checkudata(L, 1, "threads.Queue");
  THAtomicIncrementRef(&queue->refcount);
  return 0;
}
Esempio n. 6
0
void THCudaTensor_retain(THCState *state, THCudaTensor *self)
{
  if(self->flag & TH_TENSOR_REFCOUNTED)
    THAtomicIncrementRef(&self->refcount);
}
Esempio n. 7
0
void THRefcountedMapAllocator_incref(THMapAllocatorContext *ctx, void *data)
{
  THMapInfo *map_info = (THMapInfo*)(((char*)data) - TH_ALLOC_ALIGNMENT);
  THAtomicIncrementRef(&map_info->refcount);
}
Esempio n. 8
0
void THCStream_retain(THCStream* self)
{
  if (self->stream) {
    THAtomicIncrementRef(&self->refcount);
  }
}