/* helpful functions */ static void torch_(Tensor_c_readSize)(lua_State *L, int index, THLongStorage **size_) { THLongStorage *size = NULL; long i; if( (size = luaT_toudata(L, index, torch_LongStorage_id)) ) { THLongStorage_retain(size); *size_ = size; } else { size = THLongStorage_newWithSize(4); for(i = 0; i < 4; i++) { if(lua_isnone(L, index+i)) THLongStorage_set(size, i, 0); else { if(lua_isnumber(L, index+i)) THLongStorage_set(size, i, lua_tonumber(L, index+i)); else { THLongStorage_free(size); luaL_error(L, "invalid argument %d: number expected", index+i); } } } *size_ = size; } }
static void load_array_to_lua(lua_State *L, chtk::htkarray& arr){ int ndims = 2; //based on code from mattorch with stride fix int k; THLongStorage *size = THLongStorage_newWithSize(ndims); THLongStorage *stride = THLongStorage_newWithSize(ndims); THLongStorage_set(size,0 , arr.nsamples); THLongStorage_set(size,1,arr.samplesize/4*(2*arr.frm_ext+1)); THLongStorage_set(stride,1,1); THLongStorage_set(stride,0,arr.samplesize/4*(2*arr.frm_ext+1)); void * tensorDataPtr = NULL; size_t numBytes = 0; THFloatTensor *tensor = THFloatTensor_newWithSize(size, stride); tensorDataPtr = (void *)(THFloatTensor_data(tensor)); numBytes = THFloatTensor_nElement(tensor) * 4; luaT_pushudata(L, tensor, luaT_checktypename2id(L, "torch.FloatTensor")); // now copy the data assert(tensorDataPtr); memcpy(tensorDataPtr, (void *)(arr.data<void>()), numBytes); }
void THLab_(max)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension) { THLongStorage *dim; long i; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension out of range"); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(values_, dim); THLongTensor_resize(indices_, dim); THLongStorage_free(dim); TH_TENSOR_DIM_APPLY3(real, Real, t, real, Real, values_, long, Long, indices_, dimension, long theIndex = 0; real theMax = t_data[0]; for(i = 1; i < t_size; i++) { if(t_data[i*t_stride] > theMax) { theIndex = i; theMax = t_data[i*t_stride]; } } *indices__data = theIndex; *values__data = theMax;);
THLongStorage* torch_checklongargs(lua_State *L, int index) { THLongStorage *storage; int i; int narg = lua_gettop(L)-index+1; if(narg == 1 && luaT_toudata(L, index, "torch.LongStorage")) { THLongStorage *storagesrc = luaT_toudata(L, index, "torch.LongStorage"); storage = THLongStorage_newWithSize(storagesrc->size); THLongStorage_copy(storage, storagesrc); } else { storage = THLongStorage_newWithSize(narg); for(i = index; i < index+narg; i++) { if(!lua_isnumber(L, i)) { THLongStorage_free(storage); luaL_argerror(L, i, "number expected"); } THLongStorage_set(storage, i-index, lua_tonumber(L, i)); } } return storage; }
void THNN_(GatedLinear_updateOutput)( THNNState *state, THTensor *input, THTensor *output, int dim) { // size output to half of input dim = dim - TH_INDEX_BASE; const int64_t nIn = THTensor_(size)(input, dim); THArgCheck(nIn % 2 == 0, 2, "Halving dimension must be even. Dim %d is size %ld", dim + TH_INDEX_BASE, nIn); const int64_t inputSize = THTensor_(size)(input, dim) / 2; THLongStorage *newSizes = THTensor_(newSizeOf)(input); THLongStorage_set(newSizes, dim, inputSize); THTensor_(resize)(output, newSizes, NULL); // halve tensor THTensor *firstHalf = THTensor_(newNarrow)(input, dim, 0, inputSize); THTensor *secondHalf = THTensor_(newNarrow)(input, dim, inputSize, inputSize); // x = x1:cmul( sigmoid(x2) ) THTensor_(sigmoid)(output, secondHalf); THTensor_(cmul)(output, output, firstHalf); THLongStorage_free(newSizes); THTensor_(free)(firstHalf); THTensor_(free)(secondHalf); }
static void torch_(Tensor_c_readTensorStorageSize)(lua_State *L, int index, int allowNone, int allowTensor, int allowStorage, THStorage **storage_, long *storageOffset_, THLongStorage **size_) { static char errMsg[64]; THTensor *src = NULL; THStorage *storage = NULL; int arg1Type = lua_type(L, index); if( allowNone && (arg1Type == LUA_TNONE) ) { *storage_ = NULL; *storageOffset_ = 0; *size_ = THLongStorage_new(); return; } else if( allowTensor && (arg1Type == LUA_TUSERDATA) && (src = luaT_toudata(L, index, torch_(Tensor_id))) ) { *storage_ = THTensor_(storage)(src); *storageOffset_ = THTensor_(storageOffset)(src); *size_ = THTensor_(newSizeOf)(src); return; } else if( allowStorage && (arg1Type == LUA_TUSERDATA) && (storage = luaT_toudata(L, index, torch_(Storage_id))) ) { *storage_ = storage; if(lua_isnone(L, index+1)) { *storageOffset_ = 0; *size_ = THLongStorage_newWithSize(1); THLongStorage_set(*size_, 1, THStorage_(size)(storage)); } else { *storageOffset_ = luaL_checklong(L, index+1)-1; torch_(Tensor_c_readSize)(L, index+2, size_); } return; } else if( (arg1Type == LUA_TNUMBER) || (luaT_toudata(L, index, torch_LongStorage_id)) ) { *storage_ = NULL; *storageOffset_ = 0; torch_(Tensor_c_readSize)(L, index, size_); return; } sprintf(errMsg, "expecting number%s%s", (allowTensor ? " or Tensor" : ""), (allowStorage ? " or Storage" : "")); luaL_argcheck(L, 0, index, errMsg); }
static void load_array_to_lua(lua_State *L, cnpy::NpyArray& arr){ int ndims = arr.shape.size(); //based on code from mattorch with stride fix int k; THLongStorage *size = THLongStorage_newWithSize(ndims); THLongStorage *stride = THLongStorage_newWithSize(ndims); for (k=0; k<ndims; k++) { THLongStorage_set(size, k, arr.shape[k]); if (k > 0) THLongStorage_set(stride, ndims-k-1, arr.shape[ndims-k]*THLongStorage_get(stride,ndims-k)); else THLongStorage_set(stride, ndims-k-1, 1); } void * tensorDataPtr = NULL; size_t numBytes = 0; if ( arr.arrayType == 'f' ){ // float32/64 if ( arr.word_size == 4 ){ //float32 THFloatTensor *tensor = THFloatTensor_newWithSize(size, stride); tensorDataPtr = (void *)(THFloatTensor_data(tensor)); numBytes = THFloatTensor_nElement(tensor) * arr.word_size; luaT_pushudata(L, tensor, luaT_checktypename2id(L, "torch.FloatTensor")); }else if ( arr.word_size == 8){ //float 64 THDoubleTensor *tensor = THDoubleTensor_newWithSize(size, stride); tensorDataPtr = (void *)(THDoubleTensor_data(tensor)); numBytes = THDoubleTensor_nElement(tensor) * arr.word_size; luaT_pushudata(L, tensor, luaT_checktypename2id(L, "torch.DoubleTensor")); } }else if ( arr.arrayType == 'i' || arr.arrayType == 'u' ){ // does torch have unsigned types .. need to look if ( arr.word_size == 1 ){ //int8 THByteTensor *tensor = THByteTensor_newWithSize(size, stride); tensorDataPtr = (void *)(THByteTensor_data(tensor)); numBytes = THByteTensor_nElement(tensor) * arr.word_size; luaT_pushudata(L, tensor, luaT_checktypename2id(L, "torch.ByteTensor")); }else if ( arr.word_size == 2 ){ //int16 THShortTensor *tensor = THShortTensor_newWithSize(size, stride); tensorDataPtr = (void *)(THShortTensor_data(tensor)); numBytes = THShortTensor_nElement(tensor) * arr.word_size; luaT_pushudata(L, tensor, luaT_checktypename2id(L, "torch.ShortTensor")); }else if ( arr.word_size == 4 ){ //int32 THIntTensor *tensor = THIntTensor_newWithSize(size, stride); tensorDataPtr = (void *)(THIntTensor_data(tensor)); numBytes = THIntTensor_nElement(tensor) * arr.word_size; luaT_pushudata(L, tensor, luaT_checktypename2id(L, "torch.IntTensor")); }else if ( arr.word_size == 8){ //long 64 THLongTensor *tensor = THLongTensor_newWithSize(size, stride); tensorDataPtr = (void *)(THLongTensor_data(tensor)); numBytes = THLongTensor_nElement(tensor) * arr.word_size; luaT_pushudata(L, tensor, luaT_checktypename2id(L, "torch.LongTensor")); } }else{ printf("array type unsupported"); throw std::runtime_error("unsupported data type"); } // now copy the data assert(tensorDataPtr); memcpy(tensorDataPtr, (void *)(arr.data<void>()), numBytes); }