static inline int netvsc_send_pkt( struct hv_netvsc_packet *packet, struct netvsc_device *net_device) { struct nvsp_message nvmsg; struct vmbus_channel *out_channel = packet->channel; u16 q_idx = packet->q_idx; struct net_device *ndev = net_device->ndev; u64 req_id; int ret; struct hv_page_buffer *pgbuf; u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound); nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT; if (packet->is_data_pkt) { /* 0 is RMC_DATA; */ nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 0; } else { /* 1 is RMC_CONTROL; */ nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 1; } nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_index = packet->send_buf_index; if (packet->send_buf_index == NETVSC_INVALID_INDEX) nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0; else nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size = packet->total_data_buflen; if (packet->send_completion) req_id = (ulong)packet; else req_id = 0; if (out_channel->rescind) return -ENODEV; /* * It is possible that once we successfully place this packet * on the ringbuffer, we may stop the queue. In that case, we want * to notify the host independent of the xmit_more flag. We don't * need to be precise here; in the worst case we may signal the host * unnecessarily. */ if (ring_avail < (RING_AVAIL_PERCENT_LOWATER + 1)) packet->xmit_more = false; if (packet->page_buf_cnt) { pgbuf = packet->cp_partial ? packet->page_buf + packet->rmsg_pgcnt : packet->page_buf; ret = vmbus_sendpacket_pagebuffer_ctl(out_channel, pgbuf, packet->page_buf_cnt, &nvmsg, sizeof(struct nvsp_message), req_id, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED, !packet->xmit_more); } else { ret = vmbus_sendpacket_ctl(out_channel, &nvmsg, sizeof(struct nvsp_message), req_id, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED, !packet->xmit_more); } if (ret == 0) { atomic_inc(&net_device->num_outstanding_sends); atomic_inc(&net_device->queue_sends[q_idx]); if (ring_avail < RING_AVAIL_PERCENT_LOWATER) { netif_tx_stop_queue(netdev_get_tx_queue(ndev, q_idx)); if (atomic_read(&net_device-> queue_sends[q_idx]) < 1) netif_tx_wake_queue(netdev_get_tx_queue( ndev, q_idx)); } } else if (ret == -EAGAIN) { netif_tx_stop_queue(netdev_get_tx_queue( ndev, q_idx)); if (atomic_read(&net_device->queue_sends[q_idx]) < 1) { netif_tx_wake_queue(netdev_get_tx_queue( ndev, q_idx)); ret = -ENOSPC; } } else { netdev_err(ndev, "Unable to send packet %p ret %d\n", packet, ret); } return ret; }
int netvsc_send(struct hv_device *device, struct hv_netvsc_packet *packet, bool kick_q) { struct netvsc_device *net_device; int ret = 0; struct nvsp_message sendMessage; struct net_device *ndev; struct vmbus_channel *out_channel = NULL; u64 req_id; unsigned int section_index = NETVSC_INVALID_INDEX; u32 msg_size = 0; u16 q_idx = packet->q_idx; u32 vmbus_flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; net_device = get_outbound_net_device(device); if (!net_device) return -ENODEV; ndev = net_device->ndev; sendMessage.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT; if (packet->is_data_pkt) { /* 0 is RMC_DATA; */ sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 0; } else { /* 1 is RMC_CONTROL; */ sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 1; } /* Attempt to send via sendbuf */ if (packet->total_data_buflen < net_device->send_section_size) { section_index = netvsc_get_next_send_section(net_device); if (section_index != NETVSC_INVALID_INDEX) { msg_size = netvsc_copy_to_send_buf(net_device, section_index, packet); packet->page_buf_cnt = 0; } } packet->send_buf_index = section_index; sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_index = section_index; sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = msg_size; if (packet->send_completion) req_id = (ulong)packet; else req_id = 0; out_channel = net_device->chn_table[packet->q_idx]; if (out_channel == NULL) out_channel = device->channel; packet->channel = out_channel; if (out_channel->rescind) return -ENODEV; if (packet->page_buf_cnt) { ret = vmbus_sendpacket_pagebuffer_ctl(out_channel, packet->page_buf, packet->page_buf_cnt, &sendMessage, sizeof(struct nvsp_message), req_id, vmbus_flags, kick_q); } else { ret = vmbus_sendpacket_ctl(out_channel, &sendMessage, sizeof(struct nvsp_message), req_id, VM_PKT_DATA_INBAND, vmbus_flags, kick_q); } if (ret == 0) { atomic_inc(&net_device->num_outstanding_sends); atomic_inc(&net_device->queue_sends[q_idx]); if (hv_ringbuf_avail_percent(&out_channel->outbound) < RING_AVAIL_PERCENT_LOWATER) { netif_tx_stop_queue(netdev_get_tx_queue( ndev, q_idx)); if (atomic_read(&net_device-> queue_sends[q_idx]) < 1) netif_tx_wake_queue(netdev_get_tx_queue( ndev, q_idx)); } } else if (ret == -EAGAIN) { netif_tx_stop_queue(netdev_get_tx_queue( ndev, q_idx)); if (atomic_read(&net_device->queue_sends[q_idx]) < 1) { netif_tx_wake_queue(netdev_get_tx_queue( ndev, q_idx)); ret = -ENOSPC; } } else { netdev_err(ndev, "Unable to send packet %p ret %d\n", packet, ret); } if (ret != 0 && section_index != NETVSC_INVALID_INDEX) netvsc_free_send_slot(net_device, section_index); return ret; }