static int nn_(Min_updateOutput)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_(Tensor_id)); int dimension = luaT_getfieldcheckint(L, 1, "dimension")-1; THTensor *indices = luaT_getfieldcheckudata(L, 1, "indices", torch_(Tensor_id)); THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_(Tensor_id)); THLongStorage *dim; long i; luaL_argcheck(L, dimension >= 0 && dimension < input->nDimension, 2, "dimension out of range"); dim = THLongStorage_newWithSize(input->nDimension); for(i = 0; i < input->nDimension; i++) dim->data[i] = input->size[i]; dim->data[dimension] = 1; THTensor_(resize)(output, dim, NULL); THTensor_(resize)(indices, dim, NULL); THLongStorage_free(dim); TH_TENSOR_DIM_APPLY3(real, output, real, input, real, indices, dimension, long theIndex = 0; real theMin = input_data[0]; for(i = 1; i < input_size; i++) { if(input_data[i*input_stride] < theMin) { theIndex = i; theMin = input_data[i*input_stride]; } } *indices_data = theIndex+1; *output_data = theMin;)
static int torch_(Tensor_copy)(lua_State *L) { THTensor *tensor = luaT_checkudata(L, 1, torch_(Tensor_id)); void *src; if( (src = luaT_toudata(L, 2, torch_(Tensor_id))) ) THTensor_(copy)(tensor, src); else if( (src = luaT_toudata(L, 2, torch_ByteTensor_id)) ) THTensor_(copyByte)(tensor, src); else if( (src = luaT_toudata(L, 2, torch_CharTensor_id)) ) THTensor_(copyChar)(tensor, src); else if( (src = luaT_toudata(L, 2, torch_ShortTensor_id)) ) THTensor_(copyShort)(tensor, src); else if( (src = luaT_toudata(L, 2, torch_IntTensor_id)) ) THTensor_(copyInt)(tensor, src); else if( (src = luaT_toudata(L, 2, torch_LongTensor_id)) ) THTensor_(copyLong)(tensor, src); else if( (src = luaT_toudata(L, 2, torch_FloatTensor_id)) ) THTensor_(copyFloat)(tensor, src); else if( (src = luaT_toudata(L, 2, torch_DoubleTensor_id)) ) THTensor_(copyDouble)(tensor, src); else luaL_typerror(L, 2, "torch.*Tensor"); lua_settop(L, 1); return 1; }
static int torch_(Tensor___index__)(lua_State *L) { THTensor *tensor = luaT_checkudata(L, 1, torch_(Tensor_id)); if(lua_isnumber(L, 2)) { long index = luaL_checklong(L,2)-1; luaL_argcheck(L, THTensor_(nDimension)(tensor) > 0, 1, "empty tensor"); if(THTensor_(nDimension)(tensor) == 1) lua_pushnumber(L, THTensor_(get1d)(tensor, index)); else { THTensor *tensor_ = THTensor_(newWithTensorSelect)(tensor, index); luaT_pushudata(L, tensor_, torch_(Tensor_id)); } lua_pushboolean(L, 1); return 2; } else { lua_pushboolean(L, 0); return 1; } }
static int nn_(SparseLinear_updateOutput)(lua_State *L) { long i; THTensor * input = luaT_checkudata(L, 2, torch_(Tensor_id)); THTensor * weight = luaT_getfieldcheckudata(L, 1, "weight", torch_(Tensor_id)); THTensor * bias = luaT_getfieldcheckudata(L, 1, "bias", torch_(Tensor_id)); THTensor * output = luaT_getfieldcheckudata(L, 1, "output", torch_(Tensor_id)); long dim = weight->size[0]; /* number of weights.. */ THTensor_(copy)(output, bias); for(i = 0; i < input->size[1]; i++) { long offset = (long)(THTensor_(get2d)(input, 0, i))-1; if(offset >= 0 && offset < dim) /* make sure indices are in bounds.. */ { real val = THTensor_(get2d)(input, 1, i); THBlas_(axpy)(output->size[0], val, THTensor_(data)(weight)+offset*weight->stride[0], weight->stride[1], THTensor_(data)(output), output->stride[0]); } else luaL_error(L, "index out of bound"); } return 1; }
static int torch_(Tensor_storage)(lua_State *L) { THTensor *tensor = luaT_checkudata(L, 1, torch_(Tensor_id)); THStorage_(retain)(THTensor_(storage)(tensor)); luaT_pushudata(L, THTensor_(storage)(tensor), torch_(Storage_id)); return 1; }
/* Resize */ static int torch_(Tensor_resizeAs)(lua_State *L) { THTensor *tensor = luaT_checkudata(L, 1, torch_(Tensor_id)); THTensor *src = luaT_checkudata(L, 2, torch_(Tensor_id)); THTensor_(resizeAs)(tensor, src); lua_settop(L, 1); return 1; }
static int torch_(Tensor_select)(lua_State *L) { THTensor *src = luaT_checkudata(L, 1, torch_(Tensor_id)); long sliceIndex = luaL_checklong(L, 2)-1; THTensor *tensor = THTensor_(newWithTensorSelect)(src, sliceIndex); luaT_pushudata(L, tensor, torch_(Tensor_id)); return 1; }
static int nn_(Square_updateOutput)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_(Tensor_id)); THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_(Tensor_id)); THTensor_(resizeAs)(output, input); TH_TENSOR_APPLY2(real, output, real, input, \ *output_data = *input_data * *input_data;);
static int torch_(Tensor_narrow)(lua_State *L) { THTensor *src = luaT_checkudata(L, 1, torch_(Tensor_id)); long firstIndex = luaL_checklong(L, 2)-1; long size = luaL_checklong(L, 3); THTensor *tensor = THTensor_(newWithTensorNarrow)(src, firstIndex, size); luaT_pushudata(L, tensor, torch_(Tensor_id)); return 1; }
static int nn_(Sqrt_updateOutput)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_(Tensor_id)); real bias = luaT_getfieldchecknumber(L,1,"eps"); THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_(Tensor_id)); THTensor_(resizeAs)(output, input); TH_TENSOR_APPLY2(real, output, real, input, \ *output_data = sqrt(*input_data + bias););
static int nnOmp_(SpatialMaxPooling_updateGradInputOmp)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_(Tensor_id)); THTensor *gradOutput = luaT_checkudata(L, 3, torch_(Tensor_id)); int kW = luaT_getfieldcheckint(L, 1, "kW"); int kH = luaT_getfieldcheckint(L, 1, "kH"); int dW = luaT_getfieldcheckint(L, 1, "dW"); int dH = luaT_getfieldcheckint(L, 1, "dH"); setompnthread(L,1,"nThread"); THTensor *indices = luaT_getfieldcheckudata(L, 1, "indices", torch_(Tensor_id)); THTensor *gradInput = luaT_getfieldcheckudata(L, 1, "gradInput", torch_(Tensor_id)); THTensor *gradOutputPlane, *gradInputPlane, *unfoldedGradInputPlane, *gradLocalInput; int k,i,j; THTensor_(resizeAs)(gradInput, input); THTensor_(zero)(gradInput); gradInputPlane = THTensor_(new)(); gradOutputPlane = THTensor_(new)(); gradLocalInput = THTensor_(new)(); unfoldedGradInputPlane = THTensor_(new)(); for (k = 0; k < input->size[0]; k++) { /* get input and output plane */ THTensor_(select)(gradOutputPlane, gradOutput, 0, k); THTensor_(select)(gradInputPlane, gradInput, 0, k); /* Unfold input to get each local window */ THTensor_(unfold)(unfoldedGradInputPlane, gradInputPlane, 0, kH, dH); THTensor_(unfold)(unfoldedGradInputPlane, NULL, 1, kW, dW); /* Calculate max points */ for(i = 0; i < gradOutputPlane->size[0]; i++) { for(j = 0; j < gradOutputPlane->size[1]; j++) { THTensor_(select)(gradLocalInput, unfoldedGradInputPlane,0,i); THTensor_(select)(gradLocalInput, NULL, 0,j); long maxi = THTensor_(get4d)(indices,0,k,i,j)-1; long maxj = THTensor_(get4d)(indices,1,k,i,j)-1; double gi = THTensor_(get2d)(gradLocalInput,maxi,maxj)+THTensor_(get2d)(gradOutputPlane,i,j); THTensor_(set2d)(gradLocalInput,maxi,maxj,gi); } } } /* Cleanup */ THTensor_(free)(gradInputPlane); THTensor_(free)(gradOutputPlane); THTensor_(free)(unfoldedGradInputPlane); THTensor_(free)(gradLocalInput); return 1; }
static int nn_(HardShrink_updateOutput)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_(Tensor_id)); THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_(Tensor_id)); THTensor_(resizeAs)(output, input); TH_TENSOR_APPLY2(real, output, real, input, \ if ((*input_data) > 0.5) *output_data = *input_data - 0.5; \ else if ((*input_data) < 0.5) *output_data = *input_data + 0.5; \ else *output_data = 0;);
static int nnOmp_(Tanh_updateOutputOmp)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_(Tensor_id)); setompnthread(L,1,"nThread"); THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_(Tensor_id)); THTensor_(resizeAs)(output, input); if (input->nDimension == 1 || !THTensor_(isContiguous)(input) || !THTensor_(isContiguous)(output)) { TH_TENSOR_APPLY2(real, output, real, input, \ *output_data = tanh(*input_data););
static int torch_(Tensor_resize)(lua_State *L) { THTensor *tensor = luaT_checkudata(L, 1, torch_(Tensor_id)); THLongStorage *size; torch_(Tensor_c_readSize)(L, 2, &size); THTensor_(resize)(tensor, size); THLongStorage_free(size); lua_settop(L, 1); return 1; }
static void torch_(Tensor_c_readTensorStorageSize)(lua_State *L, int index, int allowNone, int allowTensor, int allowStorage, THStorage **storage_, long *storageOffset_, THLongStorage **size_) { static char errMsg[64]; THTensor *src = NULL; THStorage *storage = NULL; int arg1Type = lua_type(L, index); if( allowNone && (arg1Type == LUA_TNONE) ) { *storage_ = NULL; *storageOffset_ = 0; *size_ = THLongStorage_new(); return; } else if( allowTensor && (arg1Type == LUA_TUSERDATA) && (src = luaT_toudata(L, index, torch_(Tensor_id))) ) { *storage_ = THTensor_(storage)(src); *storageOffset_ = THTensor_(storageOffset)(src); *size_ = THTensor_(newSizeOf)(src); return; } else if( allowStorage && (arg1Type == LUA_TUSERDATA) && (storage = luaT_toudata(L, index, torch_(Storage_id))) ) { *storage_ = storage; if(lua_isnone(L, index+1)) { *storageOffset_ = 0; *size_ = THLongStorage_newWithSize(1); THLongStorage_set(*size_, 1, THStorage_(size)(storage)); } else { *storageOffset_ = luaL_checklong(L, index+1)-1; torch_(Tensor_c_readSize)(L, index+2, size_); } return; } else if( (arg1Type == LUA_TNUMBER) || (luaT_toudata(L, index, torch_LongStorage_id)) ) { *storage_ = NULL; *storageOffset_ = 0; torch_(Tensor_c_readSize)(L, index, size_); return; } sprintf(errMsg, "expecting number%s%s", (allowTensor ? " or Tensor" : ""), (allowStorage ? " or Storage" : "")); luaL_argcheck(L, 0, index, errMsg); }
static int torch_(Tensor_new)(lua_State *L) { THTensor *tensor; THStorage *storage = NULL; long storageOffset = 0; THLongStorage *size = NULL; torch_(Tensor_c_readTensorStorageSize)(L, 1, 1, 1, 1, &storage, &storageOffset, &size); tensor = THTensor_(newWithStorage)(storage, storageOffset, size); THLongStorage_free(size); luaT_pushudata(L, tensor, torch_(Tensor_id)); return 1; }
/******************* grab the rgb frame *******************/ static int libkinect_(grab_rgb) (lua_State *L) { // Get Tensor's Info THTensor * tensor = luaT_checkudata(L, 1, torch_(Tensor_id)); THTensor *contigTensor = THTensor_(newContiguous)(tensor); // Get device ID int index = 0; if (lua_isnumber(L, 2)) index = lua_tonumber(L, 2); THArgCheck(tensor->nDimension == 3 , 1, "RBG buffer: 3x480x640 Tensor expected"); THArgCheck(tensor->size[0] == 3 , 1, "RBG buffer: 3x480x640 Tensor expected"); THArgCheck(tensor->size[1] == 480 , 1, "RBG buffer: 3x480x640 Tensor expected"); THArgCheck(tensor->size[2] == 640 , 1, "RBG buffer: 3x480x640 Tensor expected"); unsigned int timestamp; unsigned char *data = 0; if (freenect_sync_get_video((void**)&data, ×tamp, index, FREENECT_VIDEO_RGB)) luaL_error(L, "<libkinect.grabRGB> Error Kinect not connected?"); int z; for (z=0;z<3;z++){ unsigned char *sourcep = data+z; THTensor *tslice = THTensor_(newSelect)(contigTensor,0,z); // copy TH_TENSOR_APPLY(real, tslice, *tslice_data = ((real)(*sourcep)) / 255; sourcep = sourcep + 3; ); THTensor_(free)(tslice); }
static int etherflow_(Api_send_tensor_lua)(lua_State *L) { /* get the arguments */ THTensor *tensor = luaT_toudata(L, 1, torch_(Tensor_id)); int size = THTensor_(nElement)(tensor); real *data = THTensor_(data)(tensor); etherflow_send_(Tensor_C)(data, size); return 0; }
static int nn_(SpatialUpSampling_backward)(lua_State *L) { // get all params THTensor *input = luaT_checkudata(L, 2, torch_(Tensor_id)); THTensor *gradOutput = luaT_checkudata(L, 3, torch_(Tensor_id)); THTensor *gradInput = luaT_getfieldcheckudata(L, 1, "gradInput", torch_(Tensor_id)); int dW = luaT_getfieldcheckint(L, 1, "dW"); int dH = luaT_getfieldcheckint(L, 1, "dH"); // dims int iwidth = input->size[2]; int iheight = input->size[1]; int ichannels = input->size[0]; int owidth = gradOutput->size[2]; int oheight = gradOutput->size[1]; int ochannels = gradOutput->size[0]; // resize gradInput THTensor_(zero)(gradInput); // get raw pointers real *gradInput_data = THTensor_(data)(gradInput); real *gradOutput_data = THTensor_(data)(gradOutput); // compute gradients for each plane int k; for (k=0; k<ochannels; k++) { // get planes real *gradInput_p = gradInput_data + k*iwidth*iheight; real *gradOutput_p = gradOutput_data + k*owidth*oheight; // for each plane, resample int x,y; for (y=0; y<oheight; y++) { for (x=0; x<owidth; x++) { // input positions (floored) int ix = x/dW; int iy = y/dH; // accumulate gradient gradInput_p[iy*iwidth + ix] += gradOutput_p[y*owidth + x]; } } } return 1; }
static int nn_(TemporalMaxPooling_updateGradInput)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_(Tensor_id)); THTensor *gradOutput = luaT_checkudata(L, 3, torch_(Tensor_id)); int dW = luaT_getfieldcheckint(L, 1, "dW"); THTensor *indices = luaT_getfieldcheckudata(L, 1, "indices", torch_(Tensor_id)); THTensor *gradInput = luaT_getfieldcheckudata(L, 1, "gradInput", torch_(Tensor_id)); // get contiguous gradOutput gradOutput = THTensor_(newContiguous)(gradOutput); // resize and zero THTensor_(resizeAs)(gradInput, input); THTensor_(zero)(gradInput); // sizes int noframe = gradOutput->size[0]; long framesize = gradOutput->size[1]; // get raw pointers real *gradInput_data = THTensor_(data)(gradInput); real *gradOutput_data = THTensor_(data)(gradOutput); real *indices_data = THTensor_(data)(indices); long t, y; for(t = 0; t < noframe; t++) { real *gip = gradInput_data + t*framesize*dW; real *gop = gradOutput_data + t*framesize; real *xp = indices_data + t*framesize; #pragma omp parallel for private(y) for(y = 0; y < framesize; y++) { // compute local max: long maxindex = (long)xp[y]; gip[maxindex*framesize+y] += gop[y]; } } // cleanup THTensor_(free)(gradOutput); return 1; }
static int nn_(SpatialUpSampling_forward)(lua_State *L) { // get all params THTensor *input = luaT_checkudata(L, 2, torch_(Tensor_id)); int dW = luaT_getfieldcheckint(L, 1, "dW"); int dH = luaT_getfieldcheckint(L, 1, "dH"); THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_(Tensor_id)); // dims int iwidth = input->size[2]; int iheight = input->size[1]; int ochannels = input->size[0]; int owidth = iwidth * dW; int oheight = iheight * dH; // get raw pointers real *input_data = THTensor_(data)(input); real *output_data = THTensor_(data)(output); // resample each plane int k; for (k=0; k<ochannels; k++) { // get planes real *input_p = input_data + k*iwidth*iheight; real *output_p = output_data + k*owidth*oheight; // for each plane, resample int x,y; for (y=0; y<oheight; y++) { for (x=0; x<owidth; x++) { // input positions (floored) int ix = x/dW; int iy = y/dH; // set output output_p[y*owidth + x] = input_p[iy*iwidth + ix]; } } } return 1; }
int nn_(SparseLinear_updateParameters)(lua_State *L) { long i; real learningRate = luaL_checknumber(L, 2); THTensor * weight = luaT_getfieldcheckudata(L, 1, "weight", torch_(Tensor_id)); THTensor * output = luaT_getfieldcheckudata(L, 1, "output", torch_(Tensor_id)); THTensor * bias = luaT_getfieldcheckudata(L, 1, "bias", torch_(Tensor_id)); THTensor * gradBias = luaT_getfieldcheckudata(L, 1, "gradBias", torch_(Tensor_id)); THTensor * gradWeight = luaT_getfieldcheckudata(L, 1, "gradWeight", torch_(Tensor_id)); THTensor * lastInput = luaT_getfieldcheckudata(L, 1, "lastInput", torch_(Tensor_id)); real weightDecay = luaT_getfieldchecknumber(L, 1, "weightDecay"); long dim = weight->size[0]; /* number of weights.. */ THTensor_(cadd)(bias, bias, -learningRate, gradBias); for(i = 0; i < lastInput->size[1]; i++) { long offset = (long)(THTensor_(get2d)(lastInput, 0, i))-1; if(offset >= 0 && offset < dim) /* make sure indices are in bounds.. */ { THBlas_(axpy)(bias->size[0], -learningRate, THTensor_(data)(gradWeight)+offset*gradWeight->stride[0], gradWeight->stride[1], THTensor_(data)(weight)+offset*weight->stride[0], weight->stride[1]); } else luaL_error(L, "index out of bound"); } return 0; }
static int torch_(Tensor_size)(lua_State *L) { THTensor *tensor = luaT_checkudata(L, 1, torch_(Tensor_id)); if(lua_isnumber(L,2)) { int dim = luaL_checkint(L, 2)-1; luaL_argcheck(L, dim >= 0 && dim < THTensor_(nDimension)(tensor), 2, "out of range"); lua_pushnumber(L, THTensor_(size)(tensor, dim)); } else luaT_pushudata(L, THTensor_(newSizeOf)(tensor), torch_LongStorage_id); return 1; }
static int torch_(Tensor___newindex__)(lua_State *L) { if(lua_isnumber(L, 2)) { THTensor *tensor = luaT_checkudata(L, 1, torch_(Tensor_id)); long index = luaL_checklong(L,2)-1; real value = (real)luaL_checknumber(L,3); luaL_argcheck(L, THTensor_(nDimension)(tensor) == 1, 1, "must be a one dimensional tensor"); THTensor_(set1d)(tensor, index, value); lua_pushboolean(L, 1); } else lua_pushboolean(L, 0); return 1; }
static int nn_(SparseLinear_accGradParameters)(lua_State *L) { long i; THTensor * input = luaT_checkudata(L, 2, torch_(Tensor_id)); THTensor * gradOutput = luaT_checkudata(L, 3, torch_(Tensor_id)); real scale = luaL_optnumber(L, 4, 1); THTensor * weight = luaT_getfieldcheckudata(L, 1, "weight", torch_(Tensor_id)); THTensor * output = luaT_getfieldcheckudata(L, 1, "output", torch_(Tensor_id)); THTensor * gradBias = luaT_getfieldcheckudata(L, 1, "gradBias", torch_(Tensor_id)); THTensor * gradWeight = luaT_getfieldcheckudata(L, 1, "gradWeight", torch_(Tensor_id)); THTensor * lastInput = luaT_getfieldcheckudata(L, 1, "lastInput", torch_(Tensor_id)); real weightDecay = luaT_getfieldchecknumber(L, 1, "weightDecay"); long dim = gradWeight->size[0]; /* number of weights.. */ for(i = 0; i < input->size[1]; i++) { long offset = (long)(THTensor_(get2d)(input, 0, i))-1; if(offset >= 0 && offset < dim) /* make sure indices are in bounds.. */ { real val = scale*THTensor_(get2d)(input, 1, i); THBlas_(scal)(gradOutput->size[0], 0, THTensor_(data)(gradWeight)+offset*gradWeight->stride[0], gradWeight->stride[1]); /* zero */ THBlas_(axpy)(gradOutput->size[0], val, THTensor_(data)(gradOutput), gradOutput->stride[0], THTensor_(data)(gradWeight)+offset*gradWeight->stride[0], gradWeight->stride[1]); } else luaL_error(L, "index out of bound"); } THTensor_(cadd)(gradBias, gradBias, 1, gradOutput); if(weightDecay != 0) THTensor_(cadd)(gradWeight, gradWeight, weightDecay, weight); THTensor_(resizeAs)(lastInput, input); THTensor_(copy)(lastInput, input); return 0; }
static int torch_(Tensor_apply)(lua_State *L) { THTensor *tensor = luaT_checkudata(L, 1, torch_(Tensor_id)); luaL_checktype(L, 2, LUA_TFUNCTION); lua_settop(L, 2); TH_TENSOR_APPLY(tensor, lua_pushvalue(L, 2); lua_pushnumber(L, tensor_data[i]); lua_call(L, 1, 1); if(lua_isnumber(L, 3)) { tensor_data[i] = (real)lua_tonumber(L, 3); lua_pop(L, 1); } else if(lua_isnil(L, 3)) lua_pop(L, 1); else luaL_error(L, "given function should return a number or nil"););
void etherflow_(Api_init)(lua_State *L) { luaT_pushmetaclass(L, torch_(Tensor_id)); luaT_registeratname(L, etherflow_(Api__), "etherflow"); }
// Stitch takes args. // pano - a torch tensor in RGB with dims (3 x height x width) // // offset_map - a torch tensor same h and w as pano storing offsets and // and image indices. The two feaure dimentions are: // -- image number (starting at 1) and // -- bit offset in image tensor // nimages - is the number of images use to make the panorama // image1, ... imagen - are the image in a torch tensor static int Lstitch_(stitch)(lua_State *L) { int nargs = lua_gettop(L); THTensor *pano = (THTensor *)luaT_checkudata(L, 1, torch_(Tensor_id)); THLongTensor *offset_map = (THLongTensor *)luaT_checkudata(L, 2, luaT_checktypename2id(L, "torch.LongTensor")); THTensor *images[MAXIMAGES]; int i = 0; long npixels = offset_map->size[1]*offset_map->size[2]; real *pano_pt = THTensor_(data)(pano); long *offset_pt = THLongTensor_data(offset_map); real *images_pt[MAXIMAGES]; long images_npixels[MAXIMAGES]; long images_Goff[MAXIMAGES]; long images_Boff[MAXIMAGES]; real * panoR = pano_pt; real * panoG = pano_pt + pano->stride[0]; real * panoB = pano_pt + (2*pano->stride[0]); real * curImg_pt = NULL; long unsigned int XYoffset = 0; long * offImg = offset_pt; long * offIndexXY = offset_pt + offset_map->stride[0]; int nimages = 0; long cImgOff = 0; /* finish processing input image tensors */ /* either you can pass a table */ /* or a number and variable length of args */ if (nargs == 3){ if (lua_istable(L,3)){ nimages = lua_objlen (L, 3); /* table is in the stack at index 3 */ lua_pushnil(L); /* first key */ i = 0; while (lua_next(L, 3) != 0) { /* 'key' (at index -2) and 'value' (at index -1) */ images[i] = (THTensor *)luaT_checkudata(L, -1, torch_(Tensor_id)); images_npixels[i] = images[i]->size[1]*images[i]->size[2]; images_Goff[i] = images[i]->stride[0]; images_Boff[i] = 2*images[i]->stride[0]; images_pt[i] = THTensor_(data)(images[i]); /* removes 'value'; keeps 'key' for next iteration */ lua_pop(L, 1); i = i+1; } } else { lua_pushstring(L, "with 3 args last argument is a table"); lua_error(L); } } else { nimages = lua_tonumber(L,3); for(i=0;i<nimages;i++){ images[i] = (THTensor *)luaT_checkudata(L, i+4, torch_(Tensor_id)); images_npixels[i] = images[i]->size[1]*images[i]->size[2]; images_Goff[i] = images[i]->stride[0]; images_Boff[i] = 2*images[i]->stride[0]; images_pt[i] = THTensor_(data)(images[i]); } } for(i=0;i<npixels;i++){ cImgOff = (long unsigned int)*offImg - 1; curImg_pt = images_pt[cImgOff]; if ((*offIndexXY > 0) && (*offIndexXY < images_npixels[cImgOff])){ XYoffset = (long unsigned int)*offIndexXY; *panoR = curImg_pt[XYoffset]; *panoG = curImg_pt[XYoffset + images_Goff[cImgOff]] ; *panoB = curImg_pt[XYoffset + images_Boff[cImgOff]]; } panoR++; panoG++; panoB++; offImg++; offIndexXY++; } return 0; }
void Lstitch_(Init)(lua_State *L) { luaT_pushmetaclass(L, torch_(Tensor_id)); luaT_registeratname(L, Lstitch_(Methods), "stitch"); }
static void nn_(SpatialUpSampling_init)(lua_State *L) { luaT_pushmetaclass(L, torch_(Tensor_id)); luaT_registeratname(L, nn_(SpatialUpSampling__), "nn"); lua_pop(L,1); }