void tds_vec_retain(tds_vec *vec) { #if HAS_TORCH THAtomicIncrementRef(&vec->refcount); #else vec->refcount++; #endif }
TensorWrapper::TensorWrapper(GpuMatT & matT, THCState *state) { if (matT.tensor != nullptr) { // Mat is already constructed on another Tensor, so return that this->tensorPtr = reinterpret_cast<THByteTensor *>(matT.tensor); this->definedInLua = true; this->typeCode = static_cast<char>(matT.mat.depth()); THAtomicIncrementRef(&this->tensorPtr->storage->refcount); } else { new (this) TensorWrapper(matT.mat, state); } }
void transfer_tensor_CUDA(THCState *state, THCudaTensor *dst, struct TensorWrapper srcWrapper) { THCudaTensor *src = reinterpret_cast<THCudaTensor *>(srcWrapper.tensorPtr); dst->nDimension = src->nDimension; dst->refcount = src->refcount; dst->storage = src->storage; if (!srcWrapper.definedInLua) { // Don't let Torch deallocate size and stride arrays dst->size = src->size; dst->stride = src->stride; src->size = nullptr; src->stride = nullptr; THAtomicIncrementRef(&src->storage->refcount); THCudaTensor_free(state, src); } else { dst->size = static_cast<long *>(THAlloc(sizeof(long) * dst->nDimension)); dst->stride = static_cast<long *>(THAlloc(sizeof(long) * dst->nDimension)); memcpy(dst->size, src->size, src->nDimension * sizeof(long)); memcpy(dst->stride, src->stride, src->nDimension * sizeof(long)); } }
static void * THRefcountedMapAllocator_alloc(void *_ctx, ptrdiff_t size) { THMapAllocatorContext *ctx = _ctx; if (ctx->flags & TH_ALLOCATOR_MAPPED_FROMFD) THError("THRefcountedMapAllocator doesn't support TH_ALLOCATOR_MAPPED_FROMFD flag"); if (ctx->flags & TH_ALLOCATOR_MAPPED_KEEPFD) THError("THRefcountedMapAllocator doesn't support TH_ALLOCATOR_MAPPED_KEEPFD flag"); if (ctx->flags & TH_ALLOCATOR_MAPPED_UNLINK) THError("THRefcountedMapAllocator doesn't support TH_ALLOCATOR_MAPPED_UNLINK flag"); if (!(ctx->flags & TH_ALLOCATOR_MAPPED_SHAREDMEM)) THError("THRefcountedMapAllocator requires TH_ALLOCATOR_MAPPED_SHAREDMEM flag"); size = size + TH_ALLOC_ALIGNMENT; void *ptr = _map_alloc(ctx, size); char *data = ((char*)ptr) + TH_ALLOC_ALIGNMENT; THMapInfo *map_info = (THMapInfo*)ptr; if (ctx->flags & TH_ALLOCATOR_MAPPED_EXCLUSIVE) map_info->refcount = 1; else THAtomicIncrementRef(&map_info->refcount); return (void*)data; }
static int queue_retain(lua_State *L) { THQueue *queue = luaTHRD_checkudata(L, 1, "threads.Queue"); THAtomicIncrementRef(&queue->refcount); return 0; }
void THCudaTensor_retain(THCState *state, THCudaTensor *self) { if(self->flag & TH_TENSOR_REFCOUNTED) THAtomicIncrementRef(&self->refcount); }
void THRefcountedMapAllocator_incref(THMapAllocatorContext *ctx, void *data) { THMapInfo *map_info = (THMapInfo*)(((char*)data) - TH_ALLOC_ALIGNMENT); THAtomicIncrementRef(&map_info->refcount); }
void THCStream_retain(THCStream* self) { if (self->stream) { THAtomicIncrementRef(&self->refcount); } }