/* * Based on the implementation of the THTensor_(indexCopy) in torch7 */ static void THCudaTensor_indexFill(THCudaTensor *tensor, int dim, THLongTensor *index, float val) { long i, numel; THCudaTensor *tSlice; long *index_data; numel = THLongTensor_nElement(index); THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector"); THArgCheck(dim < tensor->nDimension,4,"Indexing dim is out of bounds"); index = THLongTensor_newContiguous(index); index_data = THLongTensor_data(index); for (i=0; i<numel; i++) { if (tensor->nDimension > 1 ) { // create a new CudaTensor tSlice = THCudaTensor_new(); // set its storage to point to the corresponding storage in tensor THCudaTensor_select(tSlice, tensor,dim,index_data[i]-1); THCudaTensor_fill(tSlice, val); THCudaTensor_free(tSlice); } else { THCudaTensor_set1d(tensor,index_data[i]-1,val); } } THLongTensor_free(index); }
static int cutorch_CudaTensorOperator___sub__(lua_State *L) { THCudaTensor *tensor1 = luaT_toudata(L, 1, "torch.CudaTensor"); THCudaTensor *tensor2 = luaT_toudata(L, 2, "torch.CudaTensor"); THCudaTensor *r; if(!tensor1 && !tensor2) luaL_error(L, "expecting two Tensors or one Tensor and one number"); else { r = THCudaTensor_new(); luaT_pushudata(L, r, "torch.CudaTensor"); if(!tensor1 && tensor2) { THCudaTensor_resizeAs(r, tensor2); THCudaTensor_fill(r, luaL_checknumber(L, 1)); THCudaTensor_cadd(r, r, -1, tensor2); } else if(tensor1 && !tensor2) { THCudaTensor_resizeAs(r, tensor1); THCudaTensor_copy(r, tensor1); THCudaTensor_add(r, r, -luaL_checknumber(L, 2)); } else { THCudaTensor_resizeAs(r, tensor1); THCudaTensor_copy(r, tensor1); THCudaTensor_cadd(r, r, -1, tensor2); } } return 1; }