コード例 #1
0
bool SharedMemoryManagerVoxelMaps::getDevicePointer(void*& dev_pointer, const uint32_t index)
{
  std::string index_str = boost::lexical_cast<std::string>(index);
  std::string handler_var_name = shm_variable_name_voxelmap_handler_dev_pointer + index_str;
  std::pair<cudaIpcMemHandle_t*, std::size_t> res = shmm->getMemSegment().find<cudaIpcMemHandle_t>(
      handler_var_name.c_str());
  if (res.second == 0)
  {
    return false;
  }
  cudaIpcMemHandle_t handler = *res.first;
  cudaError_t cuda_error = cudaIpcOpenMemHandle((void**) &dev_pointer, (cudaIpcMemHandle_t) handler,
                                                cudaIpcMemLazyEnablePeerAccess);
  // the handle is closed by Visualizer.cu
  return cuda_error == cudaSuccess;
}
コード例 #2
0
ファイル: StorageSharing.cpp プロジェクト: RichieMay/pytorch
static PyObject * THPStorage_(newSharedCuda)(PyObject *_unused, PyObject *args)
{
  HANDLE_TH_ERRORS
  THPUtils_assert(PyTuple_GET_SIZE(args) == 5, "tuple of 5 items expected");
  PyObject *_device = PyTuple_GET_ITEM(args, 0);
  PyObject *_handle = PyTuple_GET_ITEM(args, 1);
  PyObject *_size = PyTuple_GET_ITEM(args, 2);
  PyObject *_offset = PyTuple_GET_ITEM(args, 3);
  PyObject *_view_size = PyTuple_GET_ITEM(args, 4);
  if (!(THPUtils_checkLong(_device) && THPUtils_checkLong(_size)
      && (_handle == Py_None || PyBytes_Check(_handle))
      && THPUtils_checkLong(_offset) && THPUtils_checkLong(_view_size))) {
    THPUtils_invalidArguments(args, NULL, "_new_shared in CUDA mode", 1,
        "(int device, bytes handle, int storage_size, int offset, int view_size");
    return NULL;
  }

  size_t storage_size = (size_t)THPUtils_unpackLong(_size);
  ptrdiff_t offset = (ptrdiff_t)THPUtils_unpackLong(_offset);
  size_t view_size =  (size_t)THPUtils_unpackLong(_view_size);

  int64_t device = THPUtils_unpackLong(_device);
  AutoGPU __autogpu(device);

  char *buffer;
  Py_ssize_t handle_size;
  if (PyBytes_AsStringAndSize(_handle, &buffer, &handle_size) == -1) {
    return NULL;
  }
  THPUtils_assert(handle_size == CUDA_IPC_HANDLE_SIZE, "incorrect handle size");
  cudaIpcMemHandle_t handle = *(cudaIpcMemHandle_t*)buffer;

  void *devPtr = NULL;
  THCudaCheck(cudaIpcOpenMemHandle(&devPtr, handle, cudaIpcMemLazyEnablePeerAccess));

  THStoragePtr base(THStorage_(newWithDataAndAllocator)(
      LIBRARY_STATE (real*)devPtr, storage_size, &THCIpcAllocator, (void*)device));
  base->flag = TH_STORAGE_REFCOUNTED | TH_STORAGE_FREEMEM;

  if (offset != 0 || view_size != storage_size) {
    return THPStorage_(newTHView)(base.get(), offset, view_size);
  }

  return THPStorage_(New)(base.release());
  END_HANDLE_TH_ERRORS
}
bool SharedMemoryManagerVoxelLists::getVisualizationData(Cube*& cubes, uint32_t& size, const uint32_t index)
{
  std::string handler_name = shm_variable_name_voxellist_handler_dev_pointer + boost::lexical_cast<std::string>(index);
  std::string number_cubes_name = shm_variable_name_voxellist_num_voxels + boost::lexical_cast<std::string>(index);

  // Find shared memory handles for: Cubes device pointer, number of cubes
  std::pair<cudaIpcMemHandle_t*, std::size_t> shm_cubes_handle = shmm->getMemSegment().find<cudaIpcMemHandle_t>(handler_name.c_str());
  std::pair<uint32_t*, std::size_t> shm_size = shmm->getMemSegment().find<uint32_t>(number_cubes_name.c_str());

  if (shm_cubes_handle.second == 0 || shm_size.second == 0)
  {
    // Shared memory handles not found
    return false;
  }

  size_t new_size = *shm_size.first;
  if (new_size > 0)
  {
    Cube* new_cubes;
    cudaError_t cuda_error = cudaIpcOpenMemHandle((void**) &new_cubes, *shm_cubes_handle.first, cudaIpcMemLazyEnablePeerAccess);
    if (cuda_error == cudaSuccess)
    {
      cubes = new_cubes;
      size = new_size;
    }
    else
    {
      // IPC handle to device pointer could not be opened
      cudaIpcCloseMemHandle(new_cubes);
      return false;
    }
  }
  else
  {
    cubes = NULL; // No memory is allocated when voxellist is empty
    size = new_size;
  }

  return true;
}
コード例 #4
0
bool SharedMemoryManagerOctrees::getOctreeVisualizationData(Cube*& cubes, uint32_t& size, uint32_t index)
{
  bool error = false;

  std::string handler_name = shm_variable_name_octree_handler_dev_pointer + boost::lexical_cast<std::string>(index);
  std::string number_cubes_name = shm_variable_name_number_cubes + boost::lexical_cast<std::string>(index);

  //Find the handler object
  std::pair<cudaIpcMemHandle_t*, std::size_t> res_h = shmm->getMemSegment().find<cudaIpcMemHandle_t>(
      handler_name.c_str());
  error = res_h.second == 0;
  Cube* dev_data_pointer;

  if (!error)
  {
    cudaIpcMemHandle_t handler = *res_h.first;
    // get to device data pointer from the handler
    cudaError_t cuda_error = cudaIpcOpenMemHandle((void**) &dev_data_pointer, (cudaIpcMemHandle_t) handler,
                                                  cudaIpcMemLazyEnablePeerAccess);
    // the handle is closed by Visualizer.cu
    if (cuda_error == cudaSuccess)
    {
      //Find the number of cubes
      std::pair<uint32_t*, std::size_t> res_d = shmm->getMemSegment().find<uint32_t>(number_cubes_name.c_str());
      error = res_d.second == 0;
      if (!error)
      {
        cubes = dev_data_pointer;
        size = *res_d.first;
        return true;
      }
    }
  }
  /*If an error occurred */
  cudaIpcCloseMemHandle(dev_data_pointer);
  return false;
}