static int netvsc_close(struct net_device *net) { struct net_device_context *net_device_ctx = netdev_priv(net); struct hv_device *device_obj = net_device_ctx->device_ctx; struct netvsc_device *nvdev = net_device_ctx->nvdev; int ret; u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20; struct vmbus_channel *chn; netif_tx_disable(net); /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */ cancel_work_sync(&net_device_ctx->work); ret = rndis_filter_close(device_obj); if (ret != 0) { netdev_err(net, "unable to close device (ret %d).\n", ret); return ret; } /* Ensure pending bytes in ring are read */ while (true) { aread = 0; for (i = 0; i < nvdev->num_chn; i++) { chn = nvdev->chn_table[i]; if (!chn) continue; hv_get_ringbuffer_availbytes(&chn->inbound, &aread, &awrite); if (aread) break; hv_get_ringbuffer_availbytes(&chn->outbound, &aread, &awrite); if (aread) break; } retry++; if (retry > retry_max || aread == 0) break; msleep(msec); if (msec < 1000) msec *= 2; } if (aread) { netdev_err(net, "Ring buffer not empty after closing rndis\n"); ret = -ETIMEDOUT; } return ret; }
/* * * hv_ringbuffer_peek() * * Read without advancing the read index * */ int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info, void *Buffer, u32 buflen) { u32 bytes_avail_towrite; u32 bytes_avail_toread; u32 next_read_location = 0; unsigned long flags; spin_lock_irqsave(&Inring_info->ring_lock, flags); hv_get_ringbuffer_availbytes(Inring_info, &bytes_avail_toread, &bytes_avail_towrite); /* Make sure there is something to read */ if (bytes_avail_toread < buflen) { spin_unlock_irqrestore(&Inring_info->ring_lock, flags); return -EAGAIN; } /* Convert to byte offset */ next_read_location = hv_get_next_read_location(Inring_info); next_read_location = hv_copyfrom_ringbuffer(Inring_info, Buffer, buflen, next_read_location); spin_unlock_irqrestore(&Inring_info->ring_lock, flags); return 0; }
/* * Get the percentage of available bytes to write in the ring. * The return value is in range from 0 to 100. */ static inline u32 hv_ringbuf_avail_percent( struct hv_ring_buffer_info *ring_info) { u32 avail_read, avail_write; hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write); return avail_write * 100 / ring_info->ring_datasize; }
/* * * hv_ringbuffer_read() * * Read and advance the read index * */ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer, u32 buflen, u32 offset, bool *signal) { u32 bytes_avail_towrite; u32 bytes_avail_toread; u32 next_read_location = 0; u64 prev_indices = 0; unsigned long flags; u32 old_read; if (buflen <= 0) return -EINVAL; spin_lock_irqsave(&inring_info->ring_lock, flags); hv_get_ringbuffer_availbytes(inring_info, &bytes_avail_toread, &bytes_avail_towrite); old_read = bytes_avail_toread; /* Make sure there is something to read */ if (bytes_avail_toread < buflen) { spin_unlock_irqrestore(&inring_info->ring_lock, flags); return -EAGAIN; } next_read_location = hv_get_next_readlocation_withoffset(inring_info, offset); next_read_location = hv_copyfrom_ringbuffer(inring_info, buffer, buflen, next_read_location); next_read_location = hv_copyfrom_ringbuffer(inring_info, &prev_indices, sizeof(u64), next_read_location); /* Make sure all reads are done before we update the read index since */ /* the writer may start writing to the read area once the read index */ /*is updated */ mb(); /* Update the read index */ hv_set_next_read_location(inring_info, next_read_location); spin_unlock_irqrestore(&inring_info->ring_lock, flags); *signal = hv_need_to_signal_on_read(old_read, inring_info); return 0; }
void hv_get_ringbuffer_available_space(struct hv_ring_buffer_info *inring_info, u32 *bytes_avail_toread, u32 *bytes_avail_towrite) { unsigned long flags; spin_lock_irqsave(&inring_info->ring_lock, flags); hv_get_ringbuffer_availbytes(inring_info, bytes_avail_toread, bytes_avail_towrite); spin_unlock_irqrestore(&inring_info->ring_lock, flags); }
/* * * hv_ringbuffer_write() * * Write to the ring buffer * */ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, struct scatterlist *sglist, u32 sgcount, bool *signal) { int i = 0; u32 bytes_avail_towrite; u32 bytes_avail_toread; u32 totalbytes_towrite = 0; struct scatterlist *sg; u32 next_write_location; u32 old_write; u64 prev_indices = 0; unsigned long flags; for_each_sg(sglist, sg, sgcount, i) { totalbytes_towrite += sg->length; } totalbytes_towrite += sizeof(u64); spin_lock_irqsave(&outring_info->ring_lock, flags); hv_get_ringbuffer_availbytes(outring_info, &bytes_avail_toread, &bytes_avail_towrite); /* If there is only room for the packet, assume it is full. */ /* Otherwise, the next time around, we think the ring buffer */ /* is empty since the read index == write index */ if (bytes_avail_towrite <= totalbytes_towrite) { spin_unlock_irqrestore(&outring_info->ring_lock, flags); return -EAGAIN; } /* Write to the ring buffer */ next_write_location = hv_get_next_write_location(outring_info); old_write = next_write_location; for_each_sg(sglist, sg, sgcount, i) { next_write_location = hv_copyto_ringbuffer(outring_info, next_write_location, sg_virt(sg), sg->length); }
u32 hv_end_read(struct hv_ring_buffer_info *rbi) { u32 read; u32 write; rbi->ring_buffer->interrupt_mask = 0; mb(); /* * Now check to see if the ring buffer is still empty. * If it is not, we raced and we need to process new * incoming messages. */ hv_get_ringbuffer_availbytes(rbi, &read, &write); return read; }
/* Get various debug metrics for the specified ring buffer. */ void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, struct hv_ring_buffer_debug_info *debug_info) { u32 bytes_avail_towrite; u32 bytes_avail_toread; if (ring_info->ring_buffer) { hv_get_ringbuffer_availbytes(ring_info, &bytes_avail_toread, &bytes_avail_towrite); debug_info->bytes_avail_toread = bytes_avail_toread; debug_info->bytes_avail_towrite = bytes_avail_towrite; debug_info->current_read_index = ring_info->ring_buffer->read_index; debug_info->current_write_index = ring_info->ring_buffer->write_index; debug_info->current_interrupt_mask = ring_info->ring_buffer->interrupt_mask; } }
/* * * hv_dump_ring_info() * * Dump out to console the ring buffer info * */ void hv_dump_ring_info(struct hv_ring_buffer_info *ring_info, char *prefix) { u32 bytes_avail_towrite; u32 bytes_avail_toread; hv_get_ringbuffer_availbytes(ring_info, &bytes_avail_toread, &bytes_avail_towrite); DPRINT(VMBUS, DEBUG_RING_LVL, "%s <<ringinfo %p buffer %p avail write %u " "avail read %u read idx %u write idx %u>>", prefix, ring_info, ring_info->ring_buffer->buffer, bytes_avail_towrite, bytes_avail_toread, ring_info->ring_buffer->read_index, ring_info->ring_buffer->write_index); }
/* * * hv_ringbuffer_write() * * Write to the ring buffer * */ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, struct kvec *kv_list, u32 kv_count, bool *signal) { int i = 0; u32 bytes_avail_towrite; u32 bytes_avail_toread; u32 totalbytes_towrite = 0; u32 next_write_location; u32 old_write; u64 prev_indices = 0; unsigned long flags; for (i = 0; i < kv_count; i++) totalbytes_towrite += kv_list[i].iov_len; totalbytes_towrite += sizeof(u64); spin_lock_irqsave(&outring_info->ring_lock, flags); hv_get_ringbuffer_availbytes(outring_info, &bytes_avail_toread, &bytes_avail_towrite); /* If there is only room for the packet, assume it is full. */ /* Otherwise, the next time around, we think the ring buffer */ /* is empty since the read index == write index */ if (bytes_avail_towrite <= totalbytes_towrite) { spin_unlock_irqrestore(&outring_info->ring_lock, flags); return -EAGAIN; } /* Write to the ring buffer */ next_write_location = hv_get_next_write_location(outring_info); old_write = next_write_location; for (i = 0; i < kv_count; i++) { next_write_location = hv_copyto_ringbuffer(outring_info, next_write_location, kv_list[i].iov_base, kv_list[i].iov_len); } /* Set previous packet start */ prev_indices = hv_get_ring_bufferindices(outring_info); next_write_location = hv_copyto_ringbuffer(outring_info, next_write_location, &prev_indices, sizeof(u64)); /* Issue a full memory barrier before updating the write index */ mb(); /* Now, update the write location */ hv_set_next_write_location(outring_info, next_write_location); spin_unlock_irqrestore(&outring_info->ring_lock, flags); *signal = hv_need_to_signal(old_write, outring_info); return 0; }