static int QPSolver_run(lua_State *L) { SVQP2 *qp = (SVQP2*)luaT_checkudata(L, 1, QPSolver_id); luaL_argcheck(L, lua_isfunction(L, 2) || luaT_isudata(L, 2, torch_Tensor_id), 2, "function or Tensor expected"); if(lua_isfunction(L, 2)) { bool initialize = luaT_optboolean(L, 3, true); bool finalize = luaT_optboolean(L, 4, true); qp->Aclosure = L; qp->Afunction = QPSolver_luaClosure; int result = qp->run(initialize, finalize); if(result == -1) luaL_error(L, qp->errmsg); return 0; } else { bool initialize = luaT_optboolean(L, 4, true); bool finalize = luaT_optboolean(L, 5, true); THTensor *data = (THTensor*)luaT_checkudata(L, 2, torch_Tensor_id); luaL_argcheck(L, data->nDimension == 2, 2, "2D Tensor expected"); luaL_argcheck(L, data->size[1] == qp->n, 2, "invalid size"); struct QPSolver_cClosureParams stuff; lua_getfield(L, 3, "__eval"); bool isCKernel = lua_islightuserdata(L, -1); lua_pop(L, 1); if(isCKernel) { double (*func)(THTensor*, THTensor*, void *) = ( double (*)(THTensor*, THTensor*, void *))luaT_getfieldchecklightudata(L, 3, "__eval"); void *params = luaT_getfieldchecklightudata(L, 3, "__params"); stuff.func = func; stuff.params = params; } else { lua_getfield(L, 3, "eval"); lua_pushvalue(L, 3); stuff.func = NULL; stuff.params = NULL; } stuff.L = L; stuff.data = data; stuff.x1 = THTensor_new(); stuff.x2 = THTensor_new(); luaT_pushudata(L, stuff.x1, torch_Tensor_id); /* auto dealloc */ luaT_pushudata(L, stuff.x2, torch_Tensor_id); /* auto dealloc */ qp->Aclosure = &stuff; qp->Afunction = (stuff.func ? QPSolver_cClosure : QPSolver_cLuaClosure); int result = qp->run(initialize, finalize); if(result == -1) luaL_error(L, qp->errmsg); return 0; } }
static int torch_(Tensor___index__)(lua_State *L) { THTensor *tensor = luaT_checkudata(L, 1, torch_(Tensor_id)); if(lua_isnumber(L, 2)) { long index = luaL_checklong(L,2)-1; luaL_argcheck(L, THTensor_(nDimension)(tensor) > 0, 1, "empty tensor"); if(THTensor_(nDimension)(tensor) == 1) lua_pushnumber(L, THTensor_(get1d)(tensor, index)); else { THTensor *tensor_ = THTensor_(newWithTensorSelect)(tensor, index); luaT_pushudata(L, tensor_, torch_(Tensor_id)); } lua_pushboolean(L, 1); return 2; } else { lua_pushboolean(L, 0); return 1; } }
static void load_array_to_lua(lua_State *L, chtk::htkarray& arr){ int ndims = 2; //based on code from mattorch with stride fix int k; THLongStorage *size = THLongStorage_newWithSize(ndims); THLongStorage *stride = THLongStorage_newWithSize(ndims); THLongStorage_set(size,0 , arr.nsamples); THLongStorage_set(size,1,arr.samplesize/4*(2*arr.frm_ext+1)); THLongStorage_set(stride,1,1); THLongStorage_set(stride,0,arr.samplesize/4*(2*arr.frm_ext+1)); void * tensorDataPtr = NULL; size_t numBytes = 0; THFloatTensor *tensor = THFloatTensor_newWithSize(size, stride); tensorDataPtr = (void *)(THFloatTensor_data(tensor)); numBytes = THFloatTensor_nElement(tensor) * 4; luaT_pushudata(L, tensor, luaT_checktypename2id(L, "torch.FloatTensor")); // now copy the data assert(tensorDataPtr); memcpy(tensorDataPtr, (void *)(arr.data<void>()), numBytes); }
static int cutorch_CudaTensorOperator___sub__(lua_State *L) { THCudaTensor *tensor1 = luaT_toudata(L, 1, "torch.CudaTensor"); THCudaTensor *tensor2 = luaT_toudata(L, 2, "torch.CudaTensor"); THCudaTensor *r; if(!tensor1 && !tensor2) luaL_error(L, "expecting two Tensors or one Tensor and one number"); else { r = THCudaTensor_new(); luaT_pushudata(L, r, "torch.CudaTensor"); if(!tensor1 && tensor2) { THCudaTensor_resizeAs(r, tensor2); THCudaTensor_fill(r, luaL_checknumber(L, 1)); THCudaTensor_cadd(r, r, -1, tensor2); } else if(tensor1 && !tensor2) { THCudaTensor_resizeAs(r, tensor1); THCudaTensor_copy(r, tensor1); THCudaTensor_add(r, r, -luaL_checknumber(L, 2)); } else { THCudaTensor_resizeAs(r, tensor1); THCudaTensor_copy(r, tensor1); THCudaTensor_cadd(r, r, -1, tensor2); } } return 1; }
static int torch_MemoryFile_storage(lua_State *L) { THFile *self = luaT_checkudata(L, 1, "torch.MemoryFile"); THCharStorage_retain(THMemoryFile_storage(self)); luaT_pushudata(L, THMemoryFile_storage(self), "torch.CharStorage"); return 1; }
static int torch_(Tensor_storage)(lua_State *L) { THTensor *tensor = luaT_checkudata(L, 1, torch_(Tensor_id)); THStorage_(retain)(THTensor_(storage)(tensor)); luaT_pushudata(L, THTensor_(storage)(tensor), torch_(Storage_id)); return 1; }
static int lcairo_create_window(lua_State *L){ int w=luaL_checkint(L,1); int h=luaL_checkint(L,2); tWindow* p=tWindow_create(L,w,h,"Torch5.1"); luaT_pushudata(L, p, tWindow_id); return 1; }
int luaopen_libcutorch(lua_State *L) { lua_newtable(L); luaL_setfuncs(L, cutorch_stuff__, 0); THCState* state = (THCState*)malloc(sizeof(THCState)); THCudaInit(state); /* Register torch.CudaHostAllocator. */ luaT_pushudata(L, state->cudaHostAllocator, "torch.Allocator"); lua_setfield(L, -2, "CudaHostAllocator"); #ifdef USE_MAGMA THCMagma_init(state); lua_pushboolean(L, 1); lua_setfield(L, -2, "magma"); #endif cutorch_CudaStorage_init(L); cutorch_CudaTensor_init(L); cutorch_CudaTensorMath_init(L); cutorch_CudaTensorOperator_init(L); /* Store state in cutorch table. */ lua_pushlightuserdata(L, state); lua_setfield(L, -2, "_state"); return 1; }
static int nerv_matrix_(lua_index)(lua_State *L) { Status status; Matrix *self = luaT_checkudata(L, 1, nerv_matrix_(tname)); if (lua_isnumber(L, 2)) { int idx = luaL_checkinteger(L, 2); if (self->dim == 1) { if (idx < 0 || idx >= self->ncol) nerv_error(L, "index must be within range [0, %d)", self->ncol); lua_pushnumber(L, MATRIX_DATA_READ(L, MATRIX_ELEM_PTR(self), idx)); } else { if (idx < 0 || idx >= self->nrow) nerv_error(L, "index must be within range [0, %d)", self->nrow); luaT_pushudata(L, nerv_matrix_(getrow)(self, idx), nerv_matrix_(tname)); } lua_pushboolean(L, 1); return 2; } else { lua_pushboolean(L, 0); return 1; } }
static int QPSolver_new(lua_State *L) { int n = (int)luaL_checknumber(L, 1); SVQP2 *qp = new SVQP2(n); luaT_pushudata(L, qp, QPSolver_id); return 1; }
static int queue_arg(lua_State *L) { THQueue *queue = luaTHRD_checkudata(L, 1, "threads.Queue"); int idx = luaL_checkint(L, 2); luaL_argcheck(L, idx >= 0 && idx < queue->size, 2, "out of range"); if(lua_gettop(L) == 2) { THCharStorage *storage = NULL; if((storage = queue->args[idx])) { THCharStorage_retain(storage); luaT_pushudata(L, storage, "torch.CharStorage"); return 1; } else return 0; } else if(lua_gettop(L) == 3) { THCharStorage *storage = luaT_checkudata(L, 3, "torch.CharStorage"); /* DEBUG: might be luaT for torch objects */ if(queue->args[idx]) THCharStorage_free(queue->args[idx]); queue->args[idx] = storage; THCharStorage_retain(storage); return 0; } else luaL_error(L, "invalid arguments"); return 0; }
static int cutorch_getRNGState(lua_State *L) { THByteTensor* t = THByteTensor_new(); THCRandom_getRNGState(cutorch_getstate(L), t); luaT_pushudata(L, t, "torch.ByteTensor"); return 1; }
static int torch_TensorOperator_(__add__)(lua_State *L) { THTensor *tensor1 = luaT_toudata(L, 1, torch_Tensor); THTensor *tensor2 = luaT_toudata(L, 2, torch_Tensor); THTensor *r; if(!tensor1 && !tensor2) luaL_error(L, "expecting two Tensors or one Tensor and one number"); else { r = THTensor_(new)(); luaT_pushudata(L, r, torch_Tensor); if(!tensor1 && tensor2) { THTensor_(resizeAs)(r, tensor2); THTensor_(copy)(r, tensor2); THTensor_(add)(r, r, luaL_checknumber(L, 1)); } else if(tensor1 && !tensor2) { THTensor_(resizeAs)(r, tensor1); THTensor_(copy)(r, tensor1); THTensor_(add)(r, r, luaL_checknumber(L, 2)); } else { THTensor_(resizeAs)(r, tensor1); THTensor_(copy)(r, tensor1); THTensor_(cadd)(r, r, 1, tensor2); } } return 1; }
static int QPSolver_permutation(lua_State *L) { SVQP2 *qp = (SVQP2*)luaT_checkudata(L, 1, QPSolver_id); THIntStorage *table = THIntStorage_newWithSize(qp->n); qp->permutation(table->data); luaT_pushudata(L, table, torch_IntStorage_id); return 1; }
static int torch_(Tensor_select)(lua_State *L) { THTensor *src = luaT_checkudata(L, 1, torch_(Tensor_id)); long sliceIndex = luaL_checklong(L, 2)-1; THTensor *tensor = THTensor_(newWithTensorSelect)(src, sliceIndex); luaT_pushudata(L, tensor, torch_(Tensor_id)); return 1; }
int nerv_matrix_(lua_new)(lua_State *L) { Status status; Matrix *self = nerv_matrix_(create)(luaL_checkinteger(L, 1), luaL_checkinteger(L, 2), &status); NERV_LUA_CHECK_STATUS(L, status); luaT_pushudata(L, self, nerv_matrix_(tname)); return 1; }
static int torch_(Tensor_narrow)(lua_State *L) { THTensor *src = luaT_checkudata(L, 1, torch_(Tensor_id)); long firstIndex = luaL_checklong(L, 2)-1; long size = luaL_checklong(L, 3); THTensor *tensor = THTensor_(newWithTensorNarrow)(src, firstIndex, size); luaT_pushudata(L, tensor, torch_(Tensor_id)); return 1; }
static int lcairo_cairo_create(lua_State *L){ cairo_surface_t *psurf=luaT_checkudata(L,1,tSurface_id); cairo_t* p = cairo_create(psurf); cairo_select_font_face(p, "sans-serif", CAIRO_FONT_SLANT_NORMAL, CAIRO_FONT_WEIGHT_NORMAL); cairo_set_font_size(p, 10); luaT_pushudata(L, p, tCairo_id); return 1; }
static int lcairo_create_image(lua_State *L){ int w=luaL_checkint(L,1); int h=luaL_checkint(L,2); tImage *p=(tImage*)luaT_alloc(L, sizeof(tImage)); p->data = luaT_alloc(L,w*h*4); memset(p->data,0xff,w*h*4); p->surf = cairo_image_surface_create_for_data(p->data,CAIRO_FORMAT_ARGB32,w,h,4*w); luaT_pushudata(L, p, tImage_id); return 1; }
static int torch_PipeFile_new(lua_State *L) { const char *name = luaL_checkstring(L, 1); const char *mode = luaL_optstring(L, 2, "r"); int isQuiet = luaT_optboolean(L, 3, 0); THFile *self = THPipeFile_new(name, mode, isQuiet); luaT_pushudata(L, self, "torch.PipeFile"); return 1; }
static int pa_openstream(lua_State *L) { double samplerate = 0; unsigned long nbufframe = 0; pa_Stream *stream = NULL; int narg = lua_gettop(L); int hascallback = 0; PaStreamParameters inparams; PaStreamParameters outparams; PaStreamFlags flags = 0; if((narg == 5 || (narg == 6 && lua_isfunction(L, 6))) && (lua_istable(L, 1) || lua_isnil(L, 1)) && (lua_istable(L, 2) || lua_isnil(L, 2)) && lua_isnumber(L, 3) && lua_isnumber(L, 4) && lua_isnumber(L, 5)) { if(lua_istable(L, 1)) pa_readstreamparameters(L, 1, &inparams); if(lua_istable(L, 2)) pa_readstreamparameters(L, 2, &outparams); samplerate = (double)lua_tonumber(L, 3); nbufframe = (unsigned long)lua_tonumber(L, 4); flags = (PaStreamFlags)lua_tonumber(L, 5); if(narg == 6) hascallback = 1; } else luaL_error(L, "expected arguments: (table | nil) (table | nil) number number number [function]"); stream = luaT_alloc(L, sizeof(pa_Stream)); stream->id = NULL; stream->ninchannel = (lua_istable(L, 1) ? inparams.channelCount : 0); stream->noutchannel = (lua_istable(L, 2) ? outparams.channelCount : 0); stream->insampleformat = (lua_istable(L, 1) ? inparams.sampleFormat : 0); stream->outsampleformat = (lua_istable(L, 2) ? outparams.sampleFormat : 0); if(!(stream->pa_L = luaL_newstate())) luaL_error(L, "could not allocate new state"); stream->callbackerror = NULL; luaT_pushudata(L, stream, "pa.Stream"); luaL_openlibs(stream->pa_L); if(hascallback) pa_setcallback__(L, 6, stream); pa_checkerror(L, Pa_OpenStream(&stream->id, (lua_istable(L, 1) ? &inparams : NULL), (lua_istable(L, 2) ? &outparams : NULL), samplerate, nbufframe, flags, (hascallback ? streamcallbackshort : NULL), (hascallback ? stream : NULL))); return 1; }
static int cutorch_Event_new(lua_State *L) { cudaEvent_t *event = luaT_alloc(L, sizeof(cudaEvent_t)); THCudaCheck(cudaEventCreate(event)); THCState *state = cutorch_getstate(L); THCudaCheck(cudaEventRecord(*event, THCState_getCurrentStream(state))); luaT_pushudata(L, event, "cutorch.Event"); return 1; }
static int cutorch_CudaTensorOperator___unm__(lua_State *L) { THCudaTensor *tensor = luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *r; r = THCudaTensor_new(); luaT_pushudata(L, r, "torch.CudaTensor"); THCudaTensor_resizeAs(r, tensor); THCudaTensor_copy(r, tensor); THCudaTensor_mul(r, r, -1); return 1; }
static int torch_Tensor_(storage)(lua_State *L) { THTensor *tensor = luaT_checkudata(L, 1, torch_Tensor); if(tensor->storage) { THStorage_(retain)(tensor->storage); luaT_pushudata(L, tensor->storage, torch_Storage); } else lua_pushnil(L); return 1; }
static int lcairo_create_ps_surface(lua_State *L){ #ifdef CAIRO_HAS_PS_SURFACE const char *filename=luaL_checkstring(L,1); double w=luaL_checknumber(L,2); double h=luaL_checknumber(L,3); cairo_surface_t *p = (cairo_surface_t*)cairo_ps_surface_create(filename,w,h); luaT_pushudata(L, p, tSurface_id); return 1; #else luaL_error(L,"Installed Cairo does not support PS"); return 0; #endif }
static int torch_TensorOperator_(__unm__)(lua_State *L) { THTensor *tensor = luaT_checkudata(L, 1, torch_Tensor); THTensor *r; r = THTensor_(new)(); luaT_pushudata(L, r, torch_Tensor); THTensor_(resizeAs)(r, tensor); THTensor_(copy)(r, tensor); THTensor_(mul)(r, r, -1); return 1; }
static int torch_DiskFile_new(lua_State *L) { const char *name = luaL_checkstring(L, 1); const char *mode = luaL_optstring(L, 2, "r"); int isQuiet = luaT_optboolean(L, 3, 0); int isReadable; int isWritable; FILE *handle; DiskFile *file; luaL_argcheck(L, torch_DiskFile_c_mode(mode, &isReadable, &isWritable), 2, "file mode should be 'r','w' or 'rw'"); if( isReadable && isWritable ) { handle = fopen(name, "r+b"); if(!handle) { handle = fopen(name, "wb"); if(handle) { fclose(handle); handle = fopen(name, "r+b"); } } } else handle = fopen(name, (isReadable ? "rb" : "wb")); if(!handle) { if(isQuiet) return 0; else luaL_error(L, "cannot open <%s> in mode %c%c", name, (isReadable ? 'r' : ' '), (isWritable ? 'w' : ' ')); } file = luaT_alloc(L, sizeof(DiskFile)); file->handle = handle; file->flags.isQuiet = isQuiet; file->flags.isReadable = isReadable; file->flags.isWritable = isWritable; file->isNativeEncoding = 1; file->flags.isBinary = 0; file->flags.isAutoSpacing = 1; file->flags.hasError = 0; file->name = luaT_alloc(L, strlen(name)+1); strcpy(file->name, name); luaT_pushudata(L, file, torch_DiskFile_id); return 1; }
static int torch_TensorOperator_(__mul__)(lua_State *L) { THTensor *tensor1 = luaT_toudata(L, 1, torch_Tensor); THTensor *tensor2 = luaT_toudata(L, 2, torch_Tensor); THTensor *r; if(!tensor1 && !tensor2) luaL_error(L, "expecting two Tensors or one Tensor and one number"); else { r = THTensor_(new)(); luaT_pushudata(L, r, torch_Tensor); if(!tensor1 && tensor2) { THTensor_(resizeAs)(r, tensor2); THTensor_(copy)(r, tensor2); THTensor_(mul)(r, r, luaL_checknumber(L, 1)); } else if(tensor1 && !tensor2) { THTensor_(resizeAs)(r, tensor1); THTensor_(copy)(r, tensor1); THTensor_(mul)(r, r, luaL_checknumber(L, 2)); } else { int dimt = tensor1->nDimension; int dims = tensor2->nDimension; if(dimt == 1 && dims == 1) lua_pushnumber(L, THTensor_(dot)(tensor1, tensor2)); /* ok, we wasted r, but who cares */ else if(dimt == 2 && dims == 1) { THTensor_(resize1d)(r, tensor1->size[0]); THTensor_(zero)(r); THTensor_(addmv)(r, 1, r, 1, tensor1, tensor2); } else if(dimt == 2 && dims == 2) { THTensor_(resize2d)(r, tensor1->size[0], tensor2->size[1]); THTensor_(zero)(r); THTensor_(addmm)(r, 1, r, 1, tensor1, tensor2); } else luaL_error(L, "multiplication between %dD and %dD tensors not yet supported", tensor1->nDimension, tensor2->nDimension); } } return 1; }
static int cutorch_CudaTensorOperator___mul__(lua_State *L) { THCudaTensor *tensor1 = luaT_toudata(L, 1, "torch.CudaTensor"); THCudaTensor *tensor2 = luaT_toudata(L, 2, "torch.CudaTensor"); THCudaTensor *r; if(!tensor1 && !tensor2) luaL_error(L, "expecting two Tensors or one Tensor and one number"); else { r = THCudaTensor_new(); luaT_pushudata(L, r, "torch.CudaTensor"); if(!tensor1 && tensor2) { THCudaTensor_resizeAs(r, tensor2); THCudaTensor_copy(r, tensor2); THCudaTensor_mul(r, r, luaL_checknumber(L, 1)); } else if(tensor1 && !tensor2) { THCudaTensor_resizeAs(r, tensor1); THCudaTensor_copy(r, tensor1); THCudaTensor_mul(r, r, luaL_checknumber(L, 2)); } else { int dimt = tensor1->nDimension; int dims = tensor2->nDimension; if(dimt == 1 && dims == 1) lua_pushnumber(L, THCudaTensor_dot(tensor1, tensor2)); /* ok, we wasted r, but who cares */ else if(dimt == 2 && dims == 1) { THCudaTensor_resize1d(r, tensor1->size[0]); THCudaTensor_zero(r); THCudaTensor_addmv(r, 1, r, 1, tensor1, tensor2); } else if(dimt == 2 && dims == 2) { THCudaTensor_resize2d(r, tensor1->size[0], tensor2->size[1]); THCudaTensor_zero(r); THCudaTensor_addmm(r, 1, r, 1, tensor1, tensor2); } else luaL_error(L, "multiplication between %dD and %dD tensors not yet supported", tensor1->nDimension, tensor2->nDimension); } } return 1; }
static int torch_(Tensor_size)(lua_State *L) { THTensor *tensor = luaT_checkudata(L, 1, torch_(Tensor_id)); if(lua_isnumber(L,2)) { int dim = luaL_checkint(L, 2)-1; luaL_argcheck(L, dim >= 0 && dim < THTensor_(nDimension)(tensor), 2, "out of range"); lua_pushnumber(L, THTensor_(size)(tensor, dim)); } else luaT_pushudata(L, THTensor_(newSizeOf)(tensor), torch_LongStorage_id); return 1; }