void isosurface_renderer_fraglist_raycasting::register_cuda_resources () { // map glresources to kernel resources cudaError_t cuda_err = cudaSuccess; register_buffer ( &_cuda_surface_data_buffer, _surface_data_texturebuffer, cudaGraphicsRegisterFlagsReadOnly ); register_buffer ( &_cuda_surface_points_buffer, _surface_points_texturebuffer, cudaGraphicsRegisterFlagsReadOnly ); register_buffer ( &_cuda_volume_data_buffer, _volume_data_texturebuffer, cudaGraphicsRegisterFlagsReadOnly ); register_buffer ( &_cuda_volume_points_buffer, _volume_points_texturebuffer, cudaGraphicsRegisterFlagsReadOnly ); register_buffer ( &_cuda_attribute_data_buffer, _attribute_data_texturebuffer, cudaGraphicsRegisterFlagsReadOnly ); register_buffer ( &_cuda_attribute_points_buffer, _attribute_points_texturebuffer, cudaGraphicsRegisterFlagsReadOnly ); if ( !_cuda_colorbuffer ) register_image ( &_cuda_colorbuffer, _colorattachment->id(), _colorattachment->target(), cudaGraphicsRegisterFlagsSurfaceLoadStore ); if ( !_cuda_depthbuffer ) register_image ( &_cuda_depthbuffer, _depthattachment->id(), _depthattachment->target(), cudaGraphicsRegisterFlagsSurfaceLoadStore );\ if ( !_cuda_headpointer ) register_image ( &_cuda_headpointer, _indextexture->id(), _indextexture->target(), cudaGraphicsRegisterFlagsSurfaceLoadStore ); if ( !_cuda_fragmentcount ) register_image ( &_cuda_fragmentcount, _fragmentcount->id(), _fragmentcount->target(), cudaGraphicsRegisterFlagsSurfaceLoadStore ); if ( _external_color_depth_texture ) register_image ( &_cuda_external_texture, _external_color_depth_texture->id(), _external_color_depth_texture->target(), cudaGraphicsRegisterFlagsSurfaceLoadStore ); register_buffer ( &_cuda_indexlist, *_indexlist, cudaGraphicsRegisterFlagsNone ); register_buffer ( &_cuda_matrixbuffer, *_matrixbuffer, cudaGraphicsRegisterFlagsReadOnly ); register_buffer ( &_cuda_allocation_grid, *_allocation_grid, cudaGraphicsRegisterFlagsNone ); }
bool read_size(boost::system::error_code & ec) { if(context_.check_wc<false>(MSG_SIZE, ec)) { std::size_t size = context_.connection().size(); buffer_->data_.resize(size); if(size <= message::payload_size) { std::memcpy(&buffer_->data_[0], context_.connection().msg_payload(), size); return next(&receiver::write_ack); } else { mr_ = register_buffer( parcelport_ , context_.pd_ , &buffer_->data_[0] , buffer_->data_.size() , IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE); adapted_mr_ = *mr_.mr_; adapted_mr_.addr = &buffer_->data_[0]; adapted_mr_.length = buffer_->data_.size(); // write the newly received mr ... context_.connection().send_mr(&adapted_mr_, ec); return next(&receiver::sent_mr); } } return false; }
static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait) { int ret; struct virtrng_info *vi = (struct virtrng_info *)rng->priv; if (vi->hwrng_removed) return -ENODEV; if (!vi->busy) { vi->busy = true; reinit_completion(&vi->have_data); register_buffer(vi, buf, size); } if (!wait) return 0; ret = wait_for_completion_killable(&vi->have_data); if (ret < 0) return ret; vi->busy = false; return vi->data_avail; }
static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait) { int ret; struct virtrng_info *vi = (struct virtrng_info *)rng->priv; /* * Don't ask host for data till we're setup. This call can * happen during hwrng_register(), after commit d9e7972619. */ if (unlikely(!probe_done)) return 0; if (!vi->busy) { vi->busy = true; init_completion(&vi->have_data); register_buffer(vi, buf, size); } if (!wait) return 0; ret = wait_for_completion_killable(&vi->have_data); if (ret < 0) return ret; vi->busy = false; return vi->data_avail; }
/* virtio_data_present() must have succeeded before this is called. */ static int virtio_data_read(struct hwrng *rng, u32 *data) { BUG_ON(data_left < sizeof(u32)); data_left -= sizeof(u32); *data = random_data[data_left / 4]; if (data_left < sizeof(u32)) { init_completion(&have_data); register_buffer(); } return sizeof(*data); }
static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait) { if (!busy) { busy = true; init_completion(&have_data); register_buffer(buf, size); } if (!wait) return 0; wait_for_completion(&have_data); busy = false; return data_avail; }
static int virtrng_probe(struct virtio_device *vdev) { int err; /* We expect a single virtqueue. */ vq = virtio_find_single_vq(vdev, random_recv_done, "input"); if (IS_ERR(vq)) return PTR_ERR(vq); err = hwrng_register(&virtio_hwrng); if (err) { vdev->config->del_vqs(vdev); return err; } register_buffer(); return 0; }
/* At least we don't udelay() in a loop like some other drivers. */ static int virtio_data_present(struct hwrng *rng, int wait) { if (data_left >= sizeof(u32)) return 1; again: if (!wait) return 0; wait_for_completion(&have_data); /* Not enough? Re-register. */ if (unlikely(data_left < sizeof(u32))) { register_buffer(); goto again; } return 1; }