/* * * hv_ringbuffer_write() * * Write to the ring buffer * */ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, struct kvec *kv_list, u32 kv_count, bool *signal) { int i = 0; u32 bytes_avail_towrite; u32 bytes_avail_toread; u32 totalbytes_towrite = 0; u32 next_write_location; u32 old_write; u64 prev_indices = 0; unsigned long flags; for (i = 0; i < kv_count; i++) totalbytes_towrite += kv_list[i].iov_len; totalbytes_towrite += sizeof(u64); spin_lock_irqsave(&outring_info->ring_lock, flags); hv_get_ringbuffer_availbytes(outring_info, &bytes_avail_toread, &bytes_avail_towrite); /* If there is only room for the packet, assume it is full. */ /* Otherwise, the next time around, we think the ring buffer */ /* is empty since the read index == write index */ if (bytes_avail_towrite <= totalbytes_towrite) { spin_unlock_irqrestore(&outring_info->ring_lock, flags); return -EAGAIN; } /* Write to the ring buffer */ next_write_location = hv_get_next_write_location(outring_info); old_write = next_write_location; for (i = 0; i < kv_count; i++) { next_write_location = hv_copyto_ringbuffer(outring_info, next_write_location, kv_list[i].iov_base, kv_list[i].iov_len); } /* Set previous packet start */ prev_indices = hv_get_ring_bufferindices(outring_info); next_write_location = hv_copyto_ringbuffer(outring_info, next_write_location, &prev_indices, sizeof(u64)); /* Issue a full memory barrier before updating the write index */ mb(); /* Now, update the write location */ hv_set_next_write_location(outring_info, next_write_location); spin_unlock_irqrestore(&outring_info->ring_lock, flags); *signal = hv_need_to_signal(old_write, outring_info); return 0; }
/* Write to the ring buffer. */ int hv_ringbuffer_write(struct vmbus_channel *channel, const struct kvec *kv_list, u32 kv_count) { int i; u32 bytes_avail_towrite; u32 totalbytes_towrite = sizeof(u64); u32 next_write_location; u32 old_write; u64 prev_indices; unsigned long flags; struct hv_ring_buffer_info *outring_info = &channel->outbound; if (channel->rescind) return -ENODEV; for (i = 0; i < kv_count; i++) totalbytes_towrite += kv_list[i].iov_len; spin_lock_irqsave(&outring_info->ring_lock, flags); bytes_avail_towrite = hv_get_bytes_to_write(outring_info); /* * If there is only room for the packet, assume it is full. * Otherwise, the next time around, we think the ring buffer * is empty since the read index == write index. */ if (bytes_avail_towrite <= totalbytes_towrite) { spin_unlock_irqrestore(&outring_info->ring_lock, flags); return -EAGAIN; } /* Write to the ring buffer */ next_write_location = hv_get_next_write_location(outring_info); old_write = next_write_location; for (i = 0; i < kv_count; i++) { next_write_location = hv_copyto_ringbuffer(outring_info, next_write_location, kv_list[i].iov_base, kv_list[i].iov_len); } /* Set previous packet start */ prev_indices = hv_get_ring_bufferindices(outring_info); next_write_location = hv_copyto_ringbuffer(outring_info, next_write_location, &prev_indices, sizeof(u64)); /* Issue a full memory barrier before updating the write index */ virt_mb(); /* Now, update the write location */ hv_set_next_write_location(outring_info, next_write_location); spin_unlock_irqrestore(&outring_info->ring_lock, flags); hv_signal_on_write(old_write, channel); if (channel->rescind) return -ENODEV; return 0; }
return -EAGAIN; } /* Write to the ring buffer */ next_write_location = hv_get_next_write_location(outring_info); for_each_sg(sglist, sg, sgcount, i) { next_write_location = hv_copyto_ringbuffer(outring_info, next_write_location, sg_virt(sg), sg->length); } /* Set previous packet start */ prev_indices = hv_get_ring_bufferindices(outring_info); next_write_location = hv_copyto_ringbuffer(outring_info, next_write_location, &prev_indices, sizeof(u64)); /* Make sure we flush all writes before updating the writeIndex */ smp_wmb(); /* Now, update the write location */ hv_set_next_write_location(outring_info, next_write_location); spin_unlock_irqrestore(&outring_info->ring_lock, flags); return 0;