/** * @brief Read without advancing the read index. */ int hv_ring_buffer_peek( hv_vmbus_ring_buffer_info* in_ring_info, void* buffer, uint32_t buffer_len) { uint32_t bytesAvailToWrite; uint32_t bytesAvailToRead; uint32_t nextReadLocation = 0; mtx_lock_spin(&in_ring_info->ring_lock); get_ring_buffer_avail_bytes(in_ring_info, &bytesAvailToRead, &bytesAvailToWrite); /* * Make sure there is something to read */ if (bytesAvailToRead < buffer_len) { mtx_unlock_spin(&in_ring_info->ring_lock); return (EAGAIN); } /* * Convert to byte offset */ nextReadLocation = get_next_read_location(in_ring_info); nextReadLocation = copy_from_ring_buffer( in_ring_info, (char *)buffer, buffer_len, nextReadLocation); mtx_unlock_spin(&in_ring_info->ring_lock); return (0); }
uint32_t hv_ring_buffer_read_end( hv_vmbus_ring_buffer_info* ring_info) { uint32_t read, write; ring_info->ring_buffer->interrupt_mask = 0; mb(); /* * Now check to see if the ring buffer is still empty. * If it is not, we raced and we need to process new * incoming messages. */ get_ring_buffer_avail_bytes(ring_info, &read, &write); return (read); }
/** * @brief Read and advance the read index. */ int hv_ring_buffer_read( hv_vmbus_ring_buffer_info* in_ring_info, void* buffer, uint32_t buffer_len, uint32_t offset) { uint32_t bytes_avail_to_write; uint32_t bytes_avail_to_read; uint32_t next_read_location = 0; uint64_t prev_indices = 0; if (buffer_len <= 0) return (EINVAL); mtx_lock_spin(&in_ring_info->ring_lock); get_ring_buffer_avail_bytes( in_ring_info, &bytes_avail_to_read, &bytes_avail_to_write); /* * Make sure there is something to read */ if (bytes_avail_to_read < buffer_len) { mtx_unlock_spin(&in_ring_info->ring_lock); return (EAGAIN); } next_read_location = get_next_read_location_with_offset( in_ring_info, offset); next_read_location = copy_from_ring_buffer( in_ring_info, (char *) buffer, buffer_len, next_read_location); next_read_location = copy_from_ring_buffer( in_ring_info, (char *) &prev_indices, sizeof(uint64_t), next_read_location); /* * Make sure all reads are done before we update the read index since * the writer may start writing to the read area once the read index * is updated. */ wmb(); /* * Update the read index */ set_next_read_location(in_ring_info, next_read_location); mtx_unlock_spin(&in_ring_info->ring_lock); return (0); }
/** * @brief Write to the ring buffer. */ int hv_ring_buffer_write( hv_vmbus_ring_buffer_info* out_ring_info, hv_vmbus_sg_buffer_list sg_buffers[], uint32_t sg_buffer_count) { int i = 0; uint32_t byte_avail_to_write; uint32_t byte_avail_to_read; uint32_t total_bytes_to_write = 0; volatile uint32_t next_write_location; uint64_t prev_indices = 0; for (i = 0; i < sg_buffer_count; i++) { total_bytes_to_write += sg_buffers[i].length; } total_bytes_to_write += sizeof(uint64_t); mtx_lock_spin(&out_ring_info->ring_lock); get_ring_buffer_avail_bytes(out_ring_info, &byte_avail_to_read, &byte_avail_to_write); /* * If there is only room for the packet, assume it is full. * Otherwise, the next time around, we think the ring buffer * is empty since the read index == write index */ if (byte_avail_to_write <= total_bytes_to_write) { mtx_unlock_spin(&out_ring_info->ring_lock); return (EAGAIN); } /* * Write to the ring buffer */ next_write_location = get_next_write_location(out_ring_info); for (i = 0; i < sg_buffer_count; i++) { next_write_location = copy_to_ring_buffer(out_ring_info, next_write_location, (char *) sg_buffers[i].data, sg_buffers[i].length); } /* * Set previous packet start */ prev_indices = get_ring_buffer_indices(out_ring_info); next_write_location = copy_to_ring_buffer( out_ring_info, next_write_location, (char *) &prev_indices, sizeof(uint64_t)); /* * Make sure we flush all writes before updating the writeIndex */ wmb(); /* * Now, update the write location */ set_next_write_location(out_ring_info, next_write_location); mtx_unlock_spin(&out_ring_info->ring_lock); return (0); }