else \ { \ if(lua_isfunction(L, -1)) \ { \ lua_insert(L, 1); /* insert function */ \ lua_pop(L, 1); /* remove metatable */ \ lua_call(L, lua_gettop(L)-1, LUA_MULTRET); /* we return the result of the call */ \ return lua_gettop(L); \ } \ /* we return the thing the user left in __tostring__ */ \ } \ return 0; \ } MT_DECLARE_OPERATOR(tostring, lua_pushstring(L, luaT_typename(L, 1)); return 1;) MT_DECLARE_OPERATOR(add, luaL_error(L, "%s has no addition operator", luaT_typename(L, 1))) MT_DECLARE_OPERATOR(sub, luaL_error(L, "%s has no substraction operator", luaT_typename(L, 1))) MT_DECLARE_OPERATOR(mul, luaL_error(L, "%s has no multiplication operator", luaT_typename(L, 1))) MT_DECLARE_OPERATOR(div, luaL_error(L, "%s has no division operator", luaT_typename(L, 1))) MT_DECLARE_OPERATOR(mod, luaL_error(L, "%s has no modulo operator", luaT_typename(L, 1))) MT_DECLARE_OPERATOR(pow, luaL_error(L, "%s has no power operator", luaT_typename(L, 1))) MT_DECLARE_OPERATOR(unm, luaL_error(L, "%s has no negation operator", luaT_typename(L, 1))) MT_DECLARE_OPERATOR(concat, luaL_error(L, "%s has no concat operator", luaT_typename(L, 1))) MT_DECLARE_OPERATOR(len, luaL_error(L, "%s has no length operator", luaT_typename(L, 1))) MT_DECLARE_OPERATOR(eq, lua_settop(L, 2); lua_pushcfunction(L, luaT_lua_isequal); lua_insert(L, 1); lua_call(L, 2, 1);
static int luafunc_load(lua_State *L) { THFloatTensor *t = 0; const char *tname = luaT_typename(L, 1); int i, index = lua_tointeger(L, 2); if(max == 0) luaL_error(L, "fastimage.init: call init first"); if(index > nsizes) luaL_error(L, "Invalid size index %d", index); index--; if(index < 0) index = 0; if(tname && !strcmp(tname, "torch.FloatTensor")) { t = luaT_toudata(L, 1, luaT_typenameid(L, "torch.FloatTensor")); if(t->nDimension == 4 && t->size[1] == 3) { if(nsizes == 1) { sizes[0].width = t->size[3]; sizes[0].height = t->size[2]; max = t->size[0]; } else if(sizes[0].width != t->size[3] || sizes[0].height != t->size[2] || max != t->size[0]) t = 0; } else t = 0; } if(!index) { for(i = 0; i < max; i++) if(images[i].bitmap) { free(images[i].bitmap); images[i].bitmap = 0; } for(i = 0; i < max; i++) { if(loadnextimage(images + i)) break; } if(i == 0) { lprintf("Nothing found\n"); return 0; } if(i < max) { max = i; if(t) t = THFloatTensor_newNarrow(t, 0, 0, i); } } for(i = 0; i < max; i++) { if(nsizes == 1 && (!sizes[0].width || !sizes[0].height)) { lprintf("Set width = %d, height = %d\n", images[i].width, images[i].height); sizes[0].width = images[i].width; sizes[0].height = images[i].height; } if(!t) t = THFloatTensor_newWithSize4d(max, 3, sizes[index].height, sizes[index].width); uint8_t *rescaled = scale(images + i, sizes[index].width, sizes[index].height); rgb_tofloat(THFloatTensor_data(t) + i * t->stride[0], t->stride[1], t->stride[2], rescaled, sizes[index].width, sizes[index].height); if(rescaled != images[i].bitmap) free(rescaled); if(nsizes == 1 && images[i].bitmap) { // It's not necessary to keep all the images in memory, if there is only one size free(images[i].bitmap); images[i].bitmap = 0; } } lprintf("%d x 3 x %d x %d tensor returned\n", i, sizes[index].height, sizes[index].width); luaT_pushudata(L, t, "torch.FloatTensor"); lua_createtable(L, max, 0); for(i = 0; i < max; i++) { lua_pushinteger(L, i+1); lua_createtable(L, 0, 3); lua_pushstring(L, "filename"); lua_pushstring(L, images[i].filename); lua_settable(L, -3); lua_pushstring(L, "width"); lua_pushinteger(L, images[i].width); lua_settable(L, -3); lua_pushstring(L, "height"); lua_pushinteger(L, images[i].height); lua_settable(L, -3); lua_settable(L, -3); } return 2; }
static int luafunc_init(lua_State *L) { struct stat st; const char *path = lua_tostring(L, 1); max = lua_tointeger(L, 2); if(!path) luaL_error(L, "fastimage.init: path has to be a string"); if(max < 1) luaL_error(L, "fastimage.init: max has to be a positive number"); strcpy(initpath, path); const char *tname = luaT_typename(L, 3); if(images) { int i; for(i = 0; i < max; i++) if(images[i].bitmap) free(images[i].bitmap); free(images); images = 0; } if(sizes) { free(sizes); sizes = 0; } nsizes = 0; if(tname && !strcmp(tname, "torch.FloatTensor")) { THFloatTensor *t = luaT_toudata(L, 3, luaT_typenameid(L, "torch.FloatTensor")); if(t->nDimension == 2 && t->size[1] == 2) { int i; nsizes = t->size[0]; sizes = (imgsize_t *)malloc(nsizes * sizeof(imgsize_t)); float *data = THFloatTensor_data(t); for(i = 0; i < nsizes; i++) { sizes[i].width = data[i * t->stride[0]]; sizes[i].height = data[i * t->stride[0] + 1]; } if(lua_isnumber(L, 4)) greylevel = (int)(255 * lua_tonumber(L, 4)); else greylevel = -1; } else t = 0; } else { nsizes = 1; sizes = (imgsize_t *)malloc(sizeof(imgsize_t)); sizes[0].width = lua_tointeger(L, 3); sizes[0].height = lua_tointeger(L, 4); if(lua_isnumber(L, 5)) greylevel = (int)(255 * lua_tonumber(L, 5)); else greylevel = -1; } images = (img_t *)calloc(max, sizeof(img_t)); lprintf("fastimage.init(%s, %d, %d, %d, %d)\n", path, max, sizes[0].width, sizes[0].height, greylevel); terminate = 0; if(dir) { closedir(dir); dir = 0; } if(!stat(path, &st)) { if(S_ISREG(st.st_mode)) return 0; else if(S_ISDIR(st.st_mode)) { lprintf("opendir %s\n", path); dir = opendir(path); if(!dir) luaL_error(L, "fastimage.init: failed to open directory %s", path); return 0; } else luaL_error(L, "fastimage.init: %s is neither a file, nor a directory", path); } else luaL_error(L, "fastimage.init: Cannot stat %s", path); return 0; }
NIL_BEHAVIOR; \ } \ else \ { \ if(lua_isfunction(L, -1)) \ { \ lua_insert(L, 1); /* insert function */ \ lua_pop(L, 2); /* remove metatable and metaclass */ \ lua_call(L, lua_gettop(L)-1, 1); /* we return the result of the call */ \ } \ /* we return the thing the user left in __tostring__ */ \ } \ return 1; \ } MT_DECLARE_OPERATOR(tostring, lua_pushstring(L, luaT_typename(L, 1))) MT_DECLARE_OPERATOR(add, luaL_error(L, "%s has no addition operator", luaT_typename(L, 1))) MT_DECLARE_OPERATOR(sub, luaL_error(L, "%s has no substraction operator", luaT_typename(L, 1))) MT_DECLARE_OPERATOR(mul, luaL_error(L, "%s has no multiplication operator", luaT_typename(L, 1))) MT_DECLARE_OPERATOR(div, luaL_error(L, "%s has no division operator", luaT_typename(L, 1))) MT_DECLARE_OPERATOR(mod, luaL_error(L, "%s has no modulo operator", luaT_typename(L, 1))) MT_DECLARE_OPERATOR(pow, luaL_error(L, "%s has no power operator", luaT_typename(L, 1))) MT_DECLARE_OPERATOR(unm, luaL_error(L, "%s has no negation operator", luaT_typename(L, 1))) MT_DECLARE_OPERATOR(concat, luaL_error(L, "%s has no concat operator", luaT_typename(L, 1))) MT_DECLARE_OPERATOR(len, luaL_error(L, "%s has no length operator", luaT_typename(L, 1))) MT_DECLARE_OPERATOR(eq, lua_settop(L, 2); lua_pushcfunction(L, luaT_lua_isequal); lua_insert(L, 1); lua_call(L, 2, 1);) MT_DECLARE_OPERATOR(lt, luaL_error(L, "%s has no lower than operator", luaT_typename(L, 1)))
static int savenpy_l(lua_State *L){ try{ const char *filename = lua_tostring(L, 1); std::string fpath = std::string(filename); const char *inType = luaT_typename(L, 2); int typeId = luaL_checkinteger(L, 3); switch ( typeId ){ case 0: //double { THDoubleTensor * tensor = (THDoubleTensor *) luaT_checkudata(L, 2, inType); std::vector<size_t> shape = get_shape(tensor->nDimension, tensor->size); cnpy::npy_save<double>(fpath, tensor->storage->data, shape, "w"); } break; case 1: //float { THFloatTensor * tensor = (THFloatTensor *) luaT_checkudata(L, 2, inType); std::vector<size_t> shape = get_shape(tensor->nDimension, tensor->size); cnpy::npy_save<float>(fpath, tensor->storage->data, shape, "w"); } break; case 2: //int { THIntTensor * tensor = (THIntTensor *) luaT_checkudata(L, 2, inType); std::vector<size_t> shape = get_shape(tensor->nDimension, tensor->size); cnpy::npy_save<int>(fpath, tensor->storage->data, shape, "w"); } break; case 3: //byte { THByteTensor * tensor = (THByteTensor *) luaT_checkudata(L, 2, inType); std::vector<size_t> shape = get_shape(tensor->nDimension, tensor->size); cnpy::npy_save<unsigned char>(fpath, tensor->storage->data, shape, "w"); } break; case 4: //long { THLongTensor * tensor = (THLongTensor *) luaT_checkudata(L, 2, inType); std::vector<size_t> shape = get_shape(tensor->nDimension, tensor->size); cnpy::npy_save<long>(fpath, tensor->storage->data, shape, "w"); } break; case 5: //short { THShortTensor * tensor = (THShortTensor *) luaT_checkudata(L, 2, inType); std::vector<size_t> shape = get_shape(tensor->nDimension, tensor->size); cnpy::npy_save<short>(fpath, tensor->storage->data, shape, "w"); } break; default: THError("unsupported tensor type :-("); } lua_pushboolean(L, 1); // to return true }catch (std::exception& e){ THError(e.what()); } return 1; }
int rb_save(lua_State *L, int index, ringbuffer_t *rb, int oop, int upval) { char type = lua_type(L, index); switch (type) { case LUA_TNIL: { RB_WRITE(L, rb, &type, sizeof(char)); return 0; } case LUA_TBOOLEAN: { RB_WRITE(L, rb, &type, sizeof(char)); type = lua_toboolean(L, index); RB_WRITE(L, rb, &type, sizeof(char)); return 0; } case LUA_TNUMBER: { #if LUA_VERSION_NUM >= 503 if (lua_isinteger(L, index)) { type = EXTRA_LUA_TINTEGER; RB_WRITE(L, rb, &type, sizeof(char)); lua_Integer n = lua_tointeger(L, index); RB_WRITE(L, rb, &n, sizeof(n)); } else #endif { RB_WRITE(L, rb, &type, sizeof(char)); lua_Number n = lua_tonumber(L, index); RB_WRITE(L, rb, &n, sizeof(n)); } return 0; } case LUA_TSTRING: { RB_WRITE(L, rb, &type, sizeof(char)); size_t str_len; const char *str = lua_tolstring(L, index, &str_len); RB_WRITE(L, rb, &str_len, sizeof(str_len)); RB_WRITE(L, rb, str, str_len); return 0; } case LUA_TTABLE: { RB_WRITE(L, rb, &type, sizeof(char)); int top = lua_gettop(L); int ret; lua_pushnil(L); while (lua_next(L, index) != 0) { ret = rb_save(L, top + 1, rb, oop, upval); // key if (ret) { lua_pop(L, 2); return ret; } ret = rb_save(L, top + 2, rb, oop, upval); // value if (ret) { lua_pop(L, 2); return ret; } lua_pop(L, 1); } type = LUA_TNIL; RB_WRITE(L, rb, &type, sizeof(char)); // breaks the read loop // the typename identifies the metatable const char *str = luaT_typename(L, index); if (!str) { if (luaL_callmeta(L, index, "metatablename")) { str = lua_tostring(L, lua_gettop(L)); lua_pop(L, 1); } else { str = ""; } } size_t str_len = strlen(str); RB_WRITE(L, rb, &str_len, sizeof(str_len)); RB_WRITE(L, rb, str, str_len); return 0; } case LUA_TFUNCTION: { RB_WRITE(L, rb, &type, sizeof(char)); if (index != lua_gettop(L)) { lua_pushvalue(L, index); } lua_Debug ar; lua_pushvalue(L, -1); lua_getinfo(L, ">nuS", &ar); if (ar.what[0] != 'L') { luaL_error(L, "attempt to persist a C function '%s'", ar.name); } // this returns different things under LuaJIT vs Lua #if LUA_VERSION_NUM >= 503 lua_dump(L, rb_lua_writer, rb, 0); #else lua_dump(L, rb_lua_writer, rb); #endif size_t str_len = 0; RB_WRITE(L, rb, &str_len, sizeof(size_t)); // zero-terminated // does the serialization accept upvalues? RB_WRITE(L, rb, &upval, sizeof(int)); // upvalues if (upval == 1) { lua_newtable(L); int envIdx = -1; for (int i=1; i <= ar.nups; i++) { const char *name = lua_getupvalue(L, -2, i); if (strcmp(name, "_ENV") != 0) { lua_rawseti(L, -2, i); } else { // ignore _ENV as we assume that this is the global _G variable lua_pop(L, 1); envIdx = i; } } // write upvalue index of _ENV RB_WRITE(L, rb, &envIdx, sizeof(int)); // write upvalue table int ret = rb_save(L, lua_gettop(L), rb, oop, upval); if (ret) { return ret; } lua_pop(L, 1); } else if (ar.nups > 1) { luaL_error(L, "attempt to serialize a funciton with upvalues (i.e. a closure). Use ipc.workqueue.writeup()."); } if (index != lua_gettop(L)) { lua_pop(L, 1); } return 0; } case LUA_TUSERDATA: { if (oop) return -EPERM; const char *str = luaT_typename(L, index); if (!str) { if (luaL_callmeta(L, index, "metatablename")) { str = lua_tostring(L, lua_gettop(L)); lua_pop(L, 1); type = -type; } else { return -EINVAL; } } RB_WRITE(L, rb, &type, sizeof(char)); size_t str_len = strlen(str); RB_WRITE(L, rb, &str_len, sizeof(str_len)); RB_WRITE(L, rb, str, str_len); void *ptr = lua_touserdata(L, index); RB_WRITE(L, rb, ptr, sizeof(void *)); if (luaL_callmeta(L, index, "retain")) { lua_pop(L, 1); } else { return -EINVAL; } return 0; } default: return -EPERM; } }