Exemplo n.º 1
0
static int torch_TensorOperator_(__add__)(lua_State *L)
{
  THTensor *tensor1 = luaT_toudata(L, 1, torch_Tensor);
  THTensor *tensor2 = luaT_toudata(L, 2, torch_Tensor);
  THTensor *r;

  if(!tensor1 && !tensor2)
    luaL_error(L, "expecting two Tensors or one Tensor and one number");
  else
  {
    r = THTensor_(new)();
    luaT_pushudata(L, r, torch_Tensor);
    
    if(!tensor1 && tensor2)
    {
      THTensor_(resizeAs)(r, tensor2);
      THTensor_(copy)(r, tensor2);
      THTensor_(add)(r, r, luaL_checknumber(L, 1));
    }
    else if(tensor1 && !tensor2)
    {
      THTensor_(resizeAs)(r, tensor1);
      THTensor_(copy)(r, tensor1);
      THTensor_(add)(r, r, luaL_checknumber(L, 2));
    }
    else
    {
      THTensor_(resizeAs)(r, tensor1);
      THTensor_(copy)(r, tensor1);
      THTensor_(cadd)(r, r, 1, tensor2);
    }
  }
  return 1;
}
Exemplo n.º 2
0
static int cutorch_CudaTensorOperator___sub__(lua_State *L)
{
  THCudaTensor *tensor1 = luaT_toudata(L, 1, "torch.CudaTensor");
  THCudaTensor *tensor2 = luaT_toudata(L, 2, "torch.CudaTensor");
  THCudaTensor *r;

  if(!tensor1 && !tensor2)
    luaL_error(L, "expecting two Tensors or one Tensor and one number");
  else
  {
    r = THCudaTensor_new();
    luaT_pushudata(L, r, "torch.CudaTensor");

    if(!tensor1 && tensor2)
    {
      THCudaTensor_resizeAs(r, tensor2);
      THCudaTensor_fill(r, luaL_checknumber(L, 1));
      THCudaTensor_cadd(r, r, -1, tensor2);
    }
    else if(tensor1 && !tensor2)
    {
      THCudaTensor_resizeAs(r, tensor1);
      THCudaTensor_copy(r, tensor1);
      THCudaTensor_add(r, r, -luaL_checknumber(L, 2));
    }
    else
    {
      THCudaTensor_resizeAs(r, tensor1);
      THCudaTensor_copy(r, tensor1);
      THCudaTensor_cadd(r, r, -1, tensor2);
    }
  }
  return 1;
}
Exemplo n.º 3
0
THLongStorage* lab_checklongargs(lua_State *L, int index)
{
  THLongStorage *storage;
  int i;
  int narg = lua_gettop(L)-index+1;

  if(narg == 1 && luaT_toudata(L, index, torch_LongStorage_id))
  {
    THLongStorage *storagesrc = luaT_toudata(L, index, torch_LongStorage_id);
    storage = THLongStorage_newWithSize(storagesrc->size);
    THLongStorage_copy(storage, storagesrc);
  }
  else
  {
    storage = THLongStorage_newWithSize(narg);
    for(i = index; i < index+narg; i++)
    {
      if(!lua_isnumber(L, i))
      {
        THLongStorage_free(storage);
        luaL_argerror(L, i, "number expected");
      }
      storage->data[i-index] = lua_tonumber(L, i);
    }
  }
  return storage;
}
Exemplo n.º 4
0
/* now we overwrite some methods specific to CudaTensor */
static int cutorch_CudaTensor_copy(lua_State *L)
{
  THCState *state = cutorch_getstate(L);
  THCudaTensor *storage = luaT_checkudata(L, 1, "torch.CudaTensor");
  void *src;
  if( (src = luaT_toudata(L, 2, "torch.CudaTensor")) )
    THCudaTensor_copy(state, storage, src);
  else if( (src = luaT_toudata(L, 2, "torch.ByteTensor")) )
    THCudaTensor_copyByte(state, storage, src);
  else if( (src = luaT_toudata(L, 2, "torch.CharTensor")) )
    THCudaTensor_copyChar(state, storage, src);
  else if( (src = luaT_toudata(L, 2, "torch.ShortTensor")) )
    THCudaTensor_copyShort(state, storage, src);
  else if( (src = luaT_toudata(L, 2, "torch.IntTensor")) )
    THCudaTensor_copyInt(state, storage, src);
  else if( (src = luaT_toudata(L, 2, "torch.LongTensor")) )
    THCudaTensor_copyLong(state, storage, src);
  else if( (src = luaT_toudata(L, 2, "torch.FloatTensor")) )
    THCudaTensor_copyFloat(state, storage, src);
  else if( (src = luaT_toudata(L, 2, "torch.DoubleTensor")) )
    THCudaTensor_copyDouble(state, storage, src);
  else if( (src = luaT_toudata(L, 2, "torch.CudaTensor")) )
    THCudaTensor_copyCuda(state, storage, src);
  else
    luaL_typerror(L, 2, "torch.*Tensor");

  lua_settop(L, 1);
  return 1;
}
Exemplo n.º 5
0
static int cltorch_ClStorage_copy(lua_State *L)
{
  THClState *state = cltorch_getstate(L);
  THClStorage *storage = luaT_checkudata(L, 1, "torch.ClStorage");
  void *src;
  if( (src = luaT_toudata(L, 2, "torch.ClStorage")) )
    THClStorage_copy(state, storage, src);
  else if( (src = luaT_toudata(L, 2, "torch.ByteStorage")) )
    THClStorage_copyByte(state, storage, src);
  else if( (src = luaT_toudata(L, 2, "torch.CharStorage")) )
    THClStorage_copyChar(state, storage, src);
  else if( (src = luaT_toudata(L, 2, "torch.ShortStorage")) )
    THClStorage_copyShort(state, storage, src);
  else if( (src = luaT_toudata(L, 2, "torch.IntStorage")) )
    THClStorage_copyInt(state, storage, src);
  else if( (src = luaT_toudata(L, 2, "torch.LongStorage")) )
    THClStorage_copyLong(state, storage, src);
  else if( (src = luaT_toudata(L, 2, "torch.FloatStorage")) )
    THClStorage_copyFloat(state, storage, src);
  else if( (src = luaT_toudata(L, 2, "torch.DoubleStorage")) )
    THClStorage_copyDouble(state, storage, src);
  else if( (src = luaT_toudata(L, 2, "torch.ClStorage")) )
    THClStorage_copyCl(state, storage, src);
  else
    luaL_typerror(L, 2, "torch.*Storage");

  lua_settop(L, 1);
  return 1;
}
Exemplo n.º 6
0
static int torch_(Tensor_copy)(lua_State *L)
{
  THTensor *tensor = luaT_checkudata(L, 1, torch_(Tensor_id));
  void *src;
  if( (src = luaT_toudata(L, 2, torch_(Tensor_id))) )
    THTensor_(copy)(tensor, src);
  else if( (src = luaT_toudata(L, 2, torch_ByteTensor_id)) )
    THTensor_(copyByte)(tensor, src);
  else if( (src = luaT_toudata(L, 2, torch_CharTensor_id)) )
    THTensor_(copyChar)(tensor, src);
  else if( (src = luaT_toudata(L, 2, torch_ShortTensor_id)) )
    THTensor_(copyShort)(tensor, src);
  else if( (src = luaT_toudata(L, 2, torch_IntTensor_id)) )
    THTensor_(copyInt)(tensor, src);
  else if( (src = luaT_toudata(L, 2, torch_LongTensor_id)) )
    THTensor_(copyLong)(tensor, src);
  else if( (src = luaT_toudata(L, 2, torch_FloatTensor_id)) )
    THTensor_(copyFloat)(tensor, src);
  else if( (src = luaT_toudata(L, 2, torch_DoubleTensor_id)) )
    THTensor_(copyDouble)(tensor, src);
  else
    luaL_typerror(L, 2, "torch.*Tensor");
  lua_settop(L, 1);
  return 1;
}
Exemplo n.º 7
0
static int torch_TensorOperator_(__mul__)(lua_State *L)
{
  THTensor *tensor1 = luaT_toudata(L, 1, torch_Tensor);
  THTensor *tensor2 = luaT_toudata(L, 2, torch_Tensor);
  THTensor *r;

  if(!tensor1 && !tensor2)
    luaL_error(L, "expecting two Tensors or one Tensor and one number");
  else
  {
    r = THTensor_(new)();
    luaT_pushudata(L, r, torch_Tensor);
    
    if(!tensor1 && tensor2)
    {
      THTensor_(resizeAs)(r, tensor2);
      THTensor_(copy)(r, tensor2);
      THTensor_(mul)(r, r, luaL_checknumber(L, 1));
    }
    else if(tensor1 && !tensor2)
    {
      THTensor_(resizeAs)(r, tensor1);
      THTensor_(copy)(r, tensor1);
      THTensor_(mul)(r, r, luaL_checknumber(L, 2));
    }
    else
    {
      int dimt = tensor1->nDimension;
      int dims = tensor2->nDimension;
      
      if(dimt == 1 && dims == 1)
        lua_pushnumber(L, THTensor_(dot)(tensor1, tensor2)); /* ok, we wasted r, but who cares */
      else if(dimt == 2 && dims == 1)
      {
        THTensor_(resize1d)(r, tensor1->size[0]);
        THTensor_(zero)(r);
        THTensor_(addmv)(r, 1, r, 1, tensor1, tensor2);
      }
      else if(dimt == 2 && dims == 2)
      {
        THTensor_(resize2d)(r, tensor1->size[0], tensor2->size[1]);
        THTensor_(zero)(r);
        THTensor_(addmm)(r, 1, r, 1, tensor1, tensor2);
      }
      else
        luaL_error(L, "multiplication between %dD and %dD tensors not yet supported", tensor1->nDimension, tensor2->nDimension); 
    }
  }
  return 1;
}
Exemplo n.º 8
0
static int cutorch_CudaTensorOperator___mul__(lua_State *L)
{
  THCudaTensor *tensor1 = luaT_toudata(L, 1, "torch.CudaTensor");
  THCudaTensor *tensor2 = luaT_toudata(L, 2, "torch.CudaTensor");
  THCudaTensor *r;

  if(!tensor1 && !tensor2)
    luaL_error(L, "expecting two Tensors or one Tensor and one number");
  else
  {
    r = THCudaTensor_new();
    luaT_pushudata(L, r, "torch.CudaTensor");

    if(!tensor1 && tensor2)
    {
      THCudaTensor_resizeAs(r, tensor2);
      THCudaTensor_copy(r, tensor2);
      THCudaTensor_mul(r, r, luaL_checknumber(L, 1));
    }
    else if(tensor1 && !tensor2)
    {
      THCudaTensor_resizeAs(r, tensor1);
      THCudaTensor_copy(r, tensor1);
      THCudaTensor_mul(r, r, luaL_checknumber(L, 2));
    }
    else
    {
      int dimt = tensor1->nDimension;
      int dims = tensor2->nDimension;

      if(dimt == 1 && dims == 1)
        lua_pushnumber(L, THCudaTensor_dot(tensor1, tensor2)); /* ok, we wasted r, but who cares */
      else if(dimt == 2 && dims == 1)
      {
        THCudaTensor_resize1d(r, tensor1->size[0]);
        THCudaTensor_zero(r);
        THCudaTensor_addmv(r, 1, r, 1, tensor1, tensor2);
      }
      else if(dimt == 2 && dims == 2)
      {
        THCudaTensor_resize2d(r, tensor1->size[0], tensor2->size[1]);
        THCudaTensor_zero(r);
        THCudaTensor_addmm(r, 1, r, 1, tensor1, tensor2);
      }
      else
        luaL_error(L, "multiplication between %dD and %dD tensors not yet supported", tensor1->nDimension, tensor2->nDimension);
    }
  }
  return 1;
}
Exemplo n.º 9
0
static void torch_(Tensor_c_readTensorStorageSize)(lua_State *L, int index, int allowNone, int allowTensor, int allowStorage,
                                                         THStorage **storage_, long *storageOffset_, THLongStorage **size_)
{
  static char errMsg[64];
  THTensor *src = NULL;
  THStorage *storage = NULL;

  int arg1Type = lua_type(L, index);

  if( allowNone && (arg1Type == LUA_TNONE) )
  {
    *storage_ = NULL;
    *storageOffset_ = 0;
    *size_ = THLongStorage_new();
    return;
  }
  else if( allowTensor && (arg1Type == LUA_TUSERDATA) && (src = luaT_toudata(L, index, torch_(Tensor_id))) )
  {
    *storage_ = THTensor_(storage)(src);
    *storageOffset_ = THTensor_(storageOffset)(src);
    *size_ = THTensor_(newSizeOf)(src);
    return;
  }
  else if( allowStorage && (arg1Type == LUA_TUSERDATA) && (storage = luaT_toudata(L, index, torch_(Storage_id))) )
  {
    *storage_ = storage;
    if(lua_isnone(L, index+1))
    {
      *storageOffset_ = 0;
      *size_ = THLongStorage_newWithSize(1);
      THLongStorage_set(*size_, 1, THStorage_(size)(storage));
    }
    else
    {
      *storageOffset_ = luaL_checklong(L, index+1)-1;
      torch_(Tensor_c_readSize)(L, index+2, size_);
    }
    return;
  }
  else if( (arg1Type == LUA_TNUMBER) || (luaT_toudata(L, index, torch_LongStorage_id)) )
  {
    *storage_ = NULL;
    *storageOffset_ = 0;
    torch_(Tensor_c_readSize)(L, index, size_);
    return;
  }
  sprintf(errMsg, "expecting number%s%s", (allowTensor ? " or Tensor" : ""), (allowStorage ? " or Storage" : ""));
  luaL_argcheck(L, 0, index, errMsg);
}
Exemplo n.º 10
0
int luaT_isudata(lua_State *L, int ud, const void *id)
{
  if(luaT_toudata(L, ud, id))
    return 1;
  else
    return 0;
}
Exemplo n.º 11
0
/* helpful functions */
static void torch_(Tensor_c_readSize)(lua_State *L, int index, THLongStorage **size_)
{
  THLongStorage *size = NULL;
  long i;

  if( (size = luaT_toudata(L, index, torch_LongStorage_id)) )
  {
    THLongStorage_retain(size);
    *size_ = size;
  }
  else
  {
    size = THLongStorage_newWithSize(4);
    for(i = 0; i < 4; i++)
    {
      if(lua_isnone(L, index+i))
        THLongStorage_set(size, i, 0);
      else
      {
        if(lua_isnumber(L, index+i))
          THLongStorage_set(size, i, lua_tonumber(L, index+i));
        else
        {
          THLongStorage_free(size);
          luaL_error(L, "invalid argument %d: number expected", index+i);
        }
      }
    }
    *size_ = size;
  }
}
Exemplo n.º 12
0
int luaT_isudata(lua_State *L, int ud, const char *tname)
{
  if(luaT_toudata(L, ud, tname))
    return 1;
  else
    return 0;
}
Exemplo n.º 13
0
void *luaT_checkudata (lua_State *L, int ud, const void *id)
{
  void *p = luaT_toudata(L, ud, id);
  if(!p)
    luaT_typerror(L, ud, luaT_id2typename(L, id));
  return p;
}
Exemplo n.º 14
0
void *luaT_checkudata(lua_State *L, int ud, const char *tname)
{
  void *p = luaT_toudata(L, ud, tname);
  if(!p)
    luaT_typerror(L, ud, tname);
  return p;
}
Exemplo n.º 15
0
static int send_tensor_float_lua(lua_State *L) {
  /* get the arguments */
  THFloatTensor * tensorToSend = luaT_toudata(L, 1, luaT_checktypename2id(L, "torch.FloatTensor"));
  int size = tensorToSend->size[0] * tensorToSend->size[1];
  float *data = tensorToSend->storage->data+tensorToSend->storageOffset;
  send_tensor_float_C(data, size);
  return 0;
}
Exemplo n.º 16
0
static int cutorch_Tensor_(copyAsyncCPU)(lua_State *L)
{
#define STRINGIFY_TENSOR(x) TH_CONCAT_STRING_3(torch.,x,Tensor)
  THCState *state = cutorch_getstate(L);
  THCTensor *tensor = luaT_checkudata(L, 1, STRINGIFY_TENSOR(CReal));
  void *src;
  if( (src = luaT_toudata(L, 2, STRINGIFY_TENSOR(CReal))))
    THCTensor_(copy)(state, tensor, src);
  else if( (src = luaT_toudata(L, 2, STRINGIFY_TENSOR(Real))))
    THCTensor_(copyAsyncCPU)(state, tensor, src);
  else
    luaL_typerror(L, 2, STRINGIFY_TENSOR(Real) " or " STRINGIFY_TENSOR(CReal));

  lua_settop(L, 1);
  return 1;
#undef STRINGIFY_TENSOR
}
Exemplo n.º 17
0
static int etherflow_(Api_send_tensor_lua)(lua_State *L) {
  /* get the arguments */
  THTensor *tensor = luaT_toudata(L, 1, torch_(Tensor_id));
  int size = THTensor_(nElement)(tensor);
  real *data = THTensor_(data)(tensor);
  etherflow_send_(Tensor_C)(data, size);
  return 0;
}
Exemplo n.º 18
0
static int etherflow_(Api_send_tensor_byte_lua)(lua_State *L) {
  // get params
  THByteTensor *tensor = luaT_toudata(L, 1, luaT_checktypename2id(L, "torch.ByteTensor"));
  int size = THByteTensor_nElement(tensor);
  unsigned char *data = THByteTensor_data(tensor);
  etherflow_send_ByteTensor_C(data, size);
  return 0;
}
Exemplo n.º 19
0
static int receive_tensor_float_lua(lua_State *L){
  /* get the arguments */
  THFloatTensor * result = luaT_toudata(L, 1, luaT_checktypename2id(L, "torch.FloatTensor"));
  float * data = result->storage->data+result->storageOffset;
  int size = result->size[0]*result->size[1];
  receive_tensor_float_C(data, size, result->size[1]);
  return 0;
}
Exemplo n.º 20
0
static int receive_tensor_lua_ack(lua_State *L){
  /* get the arguments */
  THTensor * result = luaT_toudata(L, 1, luaT_checktypename2id(L, "torch.Tensor"));
  double * data = result->storage->data+result->storageOffset;
  int size = result->size[0]*result->size[1];
  receive_tensor_double_C_ack(data, size, result->size[1]);
  return 0;
}
Exemplo n.º 21
0
static int send_tensor_byte_lua(lua_State *L) {
  // get params
  THByteTensor * tensorToSend = luaT_toudata(L, 1, luaT_checktypename2id(L, "torch.ByteTensor"));
  int size = tensorToSend->size[0];
  unsigned char * data = tensorToSend->storage->data+tensorToSend->storageOffset;
  send_tensor_byte_C(data, size);
  return 0;
}
Exemplo n.º 22
0
static int send_tensor_lua_ack(lua_State *L) {
  /* get the arguments */
  THTensor * tensorToSend = luaT_toudata(L, 1, luaT_checktypename2id(L, "torch.Tensor"));
  int size = tensorToSend->size[0] * tensorToSend->size[1];
  double *data = tensorToSend->storage->data+tensorToSend->storageOffset;
  send_tensor_double_C_ack(data, size);
  return 0;
}
Exemplo n.º 23
0
void *luaT_getfieldcheckudata(lua_State *L, int ud, const char *field, const char *tname)
{
  void *p;
  lua_getfield(L, ud, field);
  if(lua_isnil(L, -1))
    luaL_error(L, "bad argument #%d (field %s does not exist)", ud, field);
  p = luaT_toudata(L, -1, tname);
  if(!p)
    luaL_error(L, "bad argument #%d (field %s is not a %s)", ud, field, tname);
  return p;
}
Exemplo n.º 24
0
// Here we allways use the macro version to refer to tensors
// such that the expansion of this function will give two valid
// functions, one for FloatTensor and one for DoubleTensor
static int torch_Tensor_(cScale)(lua_State *L) {
  // THTensor is expanded to THFloatTensor and THDoubleTensor
  // torch_Tensor is expanded to torch.FloatTensor and torch.DoubleTensor
  THTensor* output = luaT_toudata(L, 1, torch_Tensor);
  if(!output) {
    luaL_error(L, "cScale takes a "torch_Tensor" as first argument.");
  }
  THTensor* input = luaT_toudata(L, 2, torch_Tensor);
  if(!input) {
    luaL_error(L, "cScale takes a "torch_Tensor" as second argument.");
  }
  double scale = lua_tonumber(L, 3);

  // Regular C code
  // THTensor_(resizeAs) is expanded to THFloatTensor_resizeAs and THDoubleTensor_resizeAs
  THTensor_(resizeAs)(output, input);
  THTensor_(zero)(output);
  THTensor_(mul)(output, input, scale);

  return 0; 
}
Exemplo n.º 25
0
static int pa_stream_stop(lua_State *L)
{
  pa_Stream *stream = NULL;
  int narg = lua_gettop(L);
  if(narg == 1 && luaT_isudata(L, 1, "pa.Stream"))
    stream = luaT_toudata(L, 1, "pa.Stream");
  else
    luaL_error(L, "expected arguments: Stream");

  if(!stream->id)
    luaL_error(L, "attempt to operate on a closed stream");

  pa_checkerror(L, Pa_StopStream(stream->id));
  return 0;
}
Exemplo n.º 26
0
static int pa_stream_writeShort(lua_State *L)
{
  pa_Stream *stream = NULL;
  THShortTensor *data = NULL;
  long nelem = 0;
  PaError err = 0;
  int narg = lua_gettop(L);

  if(narg == 2 && luaT_isudata(L, 1, "pa.Stream") && luaT_isudata(L, 2, "torch.ShortTensor"))
  {
    stream = luaT_toudata(L, 1, "pa.Stream");
    data = luaT_toudata(L, 2, "torch.ShortTensor");
  }
  else
    luaL_error(L, "expected arguments: Stream ShortTensor");

  if(!stream->id)
    luaL_error(L, "attempt to operate on a closed stream");

  nelem = THShortTensor_nElement(data);
  luaL_argcheck(L, (nelem > 0) && (nelem % stream->noutchannel == 0), 2, "invalid data: number of elements must be > 0 and divisible by the number of channels");
  luaL_argcheck(L, stream->outsampleformat & paInt16, 1, "stream does not support short data");

  data = THShortTensor_newContiguous(data);
  err = Pa_WriteStream(stream->id, THShortTensor_data(data), nelem/stream->noutchannel);
  THShortTensor_free(data);

  if(err == paOutputUnderflowed)
    lua_pushboolean(L, 0);
  else if(err == paNoError)
    lua_pushboolean(L, 1);
  else
    pa_checkerror(L, err);

  return 1;
}
Exemplo n.º 27
0
static int pa_stream_cpuload(lua_State *L)
{
  pa_Stream *stream = NULL;
  int narg = lua_gettop(L);

  if(narg == 1 && luaT_isudata(L, 1, "pa.Stream"))
    stream = luaT_toudata(L, 1, "pa.Stream");
  else
    luaL_error(L, "expected arguments: Stream");

  if(!stream->id)
    luaL_error(L, "attempt to operate on a closed stream");

  lua_pushnumber(L, Pa_GetStreamCpuLoad(stream->id));
  return 1;
}
Exemplo n.º 28
0
static int pa_stream_free(lua_State *L)
{
  pa_Stream *stream = NULL;
  int narg = lua_gettop(L);
  if(narg == 1 && luaT_isudata(L, 1, "pa.Stream"))
    stream = luaT_toudata(L, 1, "pa.Stream");
  else
    luaL_error(L, "expected arguments: Stream");

  if(stream->id)
    Pa_CloseStream(stream->id);

  /* should also free input/output buffers */
  luaT_free(L, stream);

  return 0;
}
Exemplo n.º 29
0
static int torch_MemoryFile_new(lua_State *L)
{
  const char *mode;
  THCharStorage *storage = luaT_toudata(L, 1, "torch.CharStorage");
  THFile *self;

  if(storage)
  {
    mode = luaL_optstring(L, 2, "rw");
    self = THMemoryFile_newWithStorage(storage, mode);
  }
  else
  {
    mode = luaL_optstring(L, 1, "rw");
    self = THMemoryFile_new(mode);
  }

  luaT_pushudata(L, self, "torch.MemoryFile");
  return 1;
}
Exemplo n.º 30
0
int torch_islongargs(lua_State *L, int index)
{
  int narg = lua_gettop(L)-index+1;

  if(narg == 1 && luaT_toudata(L, index, "torch.LongStorage"))
  {
    return 1;
  }
  else
  {
    int i;

    for(i = index; i < index+narg; i++)
    {
      if(!lua_isnumber(L, i))
        return 0;
    }
    return 1;
  }
  return 0;
}