static PyObject * THPStorage_(shareCuda)(THPStorage *self) { HANDLE_TH_ERRORS THStorage *storage = self->cdata; AutoGPU gpu_guard(storage->device); THPObjectPtr tuple(PyTuple_New(5)); THPObjectPtr device(PyLong_FromLong(storage->device)); THPObjectPtr _handle(Py_None); Py_INCREF(Py_None); THPObjectPtr size(PyLong_FromLong(storage->size)); THPObjectPtr _offset(PyLong_FromLong(0)); THPObjectPtr view_size(PyLong_FromLong(storage->size)); if (storage->data) { size_t base_size; void *base_ptr = THCCachingAllocator_getBaseAllocation(storage->data, &base_size); ptrdiff_t offset = (char*)storage->data - (char*)base_ptr; cudaIpcMemHandle_t handle; THCudaCheck(cudaIpcGetMemHandle(&handle, base_ptr)); _handle = PyBytes_FromStringAndSize((char *)&handle, CUDA_IPC_HANDLE_SIZE); _offset = PyLong_FromSsize_t((Py_ssize_t)offset); size = PyLong_FromSize_t(base_size / sizeof(real)); } if (!tuple || !device || !_handle || !size || !_offset || !view_size) { return NULL; } PyTuple_SET_ITEM(tuple.get(), 0, device.release()); PyTuple_SET_ITEM(tuple.get(), 1, _handle.release()); PyTuple_SET_ITEM(tuple.get(), 2, size.release()); PyTuple_SET_ITEM(tuple.get(), 3, _offset.release()); PyTuple_SET_ITEM(tuple.get(), 4, view_size.release()); return tuple.release(); END_HANDLE_TH_ERRORS }
void VoxelMapProvider::visualize() { m_mutex.lock(); HANDLE_CUDA_ERROR(cudaIpcGetMemHandle(m_shm_memHandle, m_voxelMap->getVoidDeviceDataPtr())); *m_shm_mapDim = m_voxelMap->getDimensions(); *m_shm_VoxelSize = m_voxelMap->getVoxelSideLength(); m_changed = false; m_mutex.unlock(); }
bool VisVoxelMap::visualize(const bool force_repaint) { if (force_repaint) { openOrCreateSegment(); uint32_t shared_mem_id; if (m_shm_memHandle == NULL) { // there should only be one segment of number_of_voxelmaps std::pair<uint32_t*, std::size_t> r = m_segment.find<uint32_t>( shm_variable_name_number_of_voxelmaps.c_str()); if (r.second == 0) { // if it doesn't exists .. m_segment.construct<uint32_t>(shm_variable_name_number_of_voxelmaps.c_str())(1); shared_mem_id = 0; } else { // if it exists increase it by one shared_mem_id = *r.first; (*r.first)++; } // get shared memory pointer std::stringstream id; id << shared_mem_id; m_shm_memHandle = m_segment.find_or_construct<cudaIpcMemHandle_t>( std::string(shm_variable_name_voxelmap_handler_dev_pointer + id.str()).c_str())( cudaIpcMemHandle_t()); m_shm_mapDim = m_segment.find_or_construct<Vector3ui>( std::string(shm_variable_name_voxelmap_dimension + id.str()).c_str())(Vector3ui(0)); m_shm_VoxelSize = m_segment.find_or_construct<float>( std::string(shm_variable_name_voxel_side_length + id.str()).c_str())(0.0f); m_shm_mapName = m_segment.find_or_construct_it<char>( std::string(shm_variable_name_voxelmap_name + id.str()).c_str())[m_map_name.size()]( m_map_name.data()); m_shm_voxelmap_type = m_segment.find_or_construct<MapType>( std::string(shm_variable_name_voxelmap_type + id.str()).c_str())(m_voxelmap->getMapType()); m_shm_voxelmap_changed = m_segment.find_or_construct<bool>( std::string(shm_variable_name_voxelmap_data_changed + id.str()).c_str())(true); } // first open or create and the set the values HANDLE_CUDA_ERROR(cudaIpcGetMemHandle(m_shm_memHandle, m_voxelmap->getVoidDeviceDataPtr())); *m_shm_mapDim = m_voxelmap->getDimensions(); *m_shm_VoxelSize = m_voxelmap->getVoxelSideLength(); *m_shm_voxelmap_changed = true; // // wait till data was read by visualizer. Otherwise a // while(*m_shm_voxelmap_changed) // usleep(10000); // sleep 10 ms return true; } return false; }
bool VisNTree<InnerNode, LeafNode>::visualize(const bool force_repaint) { openOrCreateSegment(); uint32_t shared_mem_id; if (m_shm_memHandle == NULL) // do this only once { // there should only be one segment of number_of_octrees std::pair<uint32_t*, std::size_t> r = m_segment.find<uint32_t>( shm_variable_name_number_of_octrees.c_str()); if (r.second == 0) { // if it doesn't exist .. m_segment.construct<uint32_t>(shm_variable_name_number_of_octrees.c_str())(1); shared_mem_id = 0; } else { // if it exit increase it by one shared_mem_id = *r.first; (*r.first)++; } // get shared memory pointer std::stringstream id; id << shared_mem_id; m_shm_superVoxelSize = m_segment.find_or_construct<uint32_t>(shm_variable_name_super_voxel_size.c_str())( 1); m_shm_memHandle = m_segment.find_or_construct<cudaIpcMemHandle_t>( std::string(shm_variable_name_octree_handler_dev_pointer + id.str()).c_str())(cudaIpcMemHandle_t()); m_shm_numCubes = m_segment.find_or_construct<uint32_t>( std::string(shm_variable_name_number_cubes + id.str()).c_str())(0); m_shm_bufferSwapped = m_segment.find_or_construct<bool>( std::string(shm_variable_name_buffer_swapped + id.str()).c_str())(false); m_shm_mapName = m_segment.find_or_construct_it<char>( std::string(shm_variable_name_octree_name + id.str()).c_str())[m_map_name.size()](m_map_name.data()); } uint32_t tmp = *m_shm_superVoxelSize - 1; // m_shm_bufferSwapped tells, if visualizer already rendered the frame // m_internal_buffer tells, which buffer should be used if (*m_shm_bufferSwapped == false && (tmp != m_min_level || force_repaint)) { m_min_level = tmp; uint32_t cube_buffer_size; Cube *d_cubes_buffer; if(m_internal_buffer_1) { // extractCubes() allocates memory for the d_cubes_1, if the pointer is NULL cube_buffer_size = m_ntree->extractCubes(m_d_cubes_1, NULL, m_min_level); d_cubes_buffer = thrust::raw_pointer_cast(m_d_cubes_1->data()); m_internal_buffer_1 = false; }else{ // extractCubes() allocates memory for the d_cubes_2, if the pointer is NULL cube_buffer_size = m_ntree->extractCubes(m_d_cubes_2, NULL, m_min_level); d_cubes_buffer = thrust::raw_pointer_cast(m_d_cubes_2->data()); m_internal_buffer_1 = true; } HANDLE_CUDA_ERROR(cudaIpcGetMemHandle(m_shm_memHandle, d_cubes_buffer)); *m_shm_numCubes = cube_buffer_size; *m_shm_bufferSwapped = true; return true; } return false; }
bool VisTemplateVoxelList<Voxel, VoxelIDType>::visualize(const bool force_repaint) { openOrCreateSegment(); uint32_t shared_mem_id; if (m_shm_memHandle == NULL) { // there should only be one segment of number_of_voxelmaps std::pair<uint32_t*, std::size_t> r = m_segment.find<uint32_t>( shm_variable_name_number_of_voxellists.c_str()); if (r.second == 0) { // if it doesn't exists .. m_segment.construct<uint32_t>(shm_variable_name_number_of_voxellists.c_str())(1); shared_mem_id = 0; } else { // if it exists increase it by one shared_mem_id = *r.first; (*r.first)++; } // get shared memory pointer std::stringstream id; id << shared_mem_id; m_shm_memHandle = m_segment.find_or_construct<cudaIpcMemHandle_t>( std::string(shm_variable_name_voxellist_handler_dev_pointer + id.str()).c_str())( cudaIpcMemHandle_t()); m_shm_num_cubes = m_segment.find_or_construct<uint32_t>( std::string(shm_variable_name_voxellist_num_voxels + id.str()).c_str())(uint32_t(0)); m_shm_bufferSwapped = m_segment.find_or_construct<bool>( std::string(shm_variable_name_voxellist_buffer_swapped + id.str()).c_str())(false); std::cout << "Name of shared buffer swapped: " << std::string(shm_variable_name_voxellist_buffer_swapped + id.str()).c_str() << "." << std::endl; m_shm_mapName = m_segment.find_or_construct_it<char>( std::string(shm_variable_name_voxellist_name + id.str()).c_str())[m_map_name.size()]( m_map_name.data()); m_shm_voxellist_type = m_segment.find_or_construct<MapType>( std::string(shm_variable_name_voxellist_type + id.str()).c_str())(m_voxellist->getMapType()); } if (*m_shm_bufferSwapped == false && force_repaint) { uint32_t cube_buffer_size; Cube *d_cubes_buffer; if(m_internal_buffer_1) { // extractCubes() allocates memory for the m_dev_buffer_1, if the pointer is NULL m_voxellist->extractCubes(&m_dev_buffer_1); cube_buffer_size = m_dev_buffer_1->size(); d_cubes_buffer = thrust::raw_pointer_cast(m_dev_buffer_1->data()); m_internal_buffer_1 = false; } else { // extractCubes() allocates memory for the m_dev_buffer_2, if the pointer is NULL m_voxellist->extractCubes(&m_dev_buffer_2); cube_buffer_size = m_dev_buffer_2->size(); d_cubes_buffer = thrust::raw_pointer_cast(m_dev_buffer_2->data()); m_internal_buffer_1 = true; } if(cube_buffer_size > 0) { // first open or create and the set the values HANDLE_CUDA_ERROR(cudaIpcGetMemHandle(m_shm_memHandle, d_cubes_buffer)); *m_shm_num_cubes = cube_buffer_size; *m_shm_bufferSwapped = true; return true; }else{ return false; } } return false; }