static void vmbus_dump_ring(FILE *f, const char *id, const struct vmbus_br *br) { const struct vmbus_bufring *vbr = br->vbr; struct vmbus_chanpkt_hdr pkt; fprintf(f, "%s windex=%u rindex=%u mask=%u pending=%u feature=%#x\n", id, vbr->windex, vbr->rindex, vbr->imask, vbr->pending_send, vbr->feature_bits.value); fprintf(f, " size=%u avail write=%u read=%u\n", br->dsize, vmbus_br_availwrite(br, vbr->windex), vmbus_br_availread(br)); if (vmbus_rxbr_peek(br, &pkt, sizeof(pkt)) == 0) fprintf(f, " pkt type %#x len %u flags %#x xactid %#"PRIx64"\n", pkt.type, pkt.tlen << VMBUS_CHANPKT_SIZE_SHIFT, pkt.flags, pkt.xactid); }
static int vmbus_read_and_signal(struct vmbus_channel *chan, void *data, size_t dlen, size_t skip) { struct vmbus_br *rbr = &chan->rxbr; uint32_t write_sz, pending_sz, bytes_read; int error; /* Record where host was when we started read (for debug) */ rbr->windex = rbr->vbr->windex; /* Read data and skip packet header */ error = vmbus_rxbr_read(rbr, data, dlen, skip); if (error) return error; /* No need for signaling on older versions */ if (!rbr->vbr->feature_bits.feat_pending_send_sz) return 0; /* Make sure reading of pending happens after new read index */ rte_mb(); pending_sz = rbr->vbr->pending_send; if (!pending_sz) return 0; rte_smp_rmb(); write_sz = vmbus_br_availwrite(rbr, rbr->vbr->windex); bytes_read = dlen + skip + sizeof(uint64_t); /* If there was space before then host was not blocked */ if (write_sz - bytes_read > pending_sz) return 0; /* If pending write will not fit */ if (write_sz <= pending_sz) return 0; vmbus_set_event(chan->device, chan); return 0; }
/* * Write scattered channel packet to TX bufring. * * The offset of this channel packet is written as a 64bits value * immediately after this channel packet. * * The write goes through three stages: * 1. Reserve space in ring buffer for the new data. * Writer atomically moves priv_write_index. * 2. Copy the new data into the ring. * 3. Update the tail of the ring (visible to host) that indicates * next read location. Writer updates write_index */ int vmbus_txbr_write(struct vmbus_br *tbr, const struct iovec iov[], int iovlen, bool *need_sig) { struct vmbus_bufring *vbr = tbr->vbr; uint32_t ring_size = tbr->dsize; uint32_t old_windex, next_windex, windex, total; uint64_t save_windex; int i; total = 0; for (i = 0; i < iovlen; i++) total += iov[i].iov_len; total += sizeof(save_windex); /* Reserve space in ring */ do { uint32_t avail; /* Get current free location */ old_windex = tbr->windex; /* Prevent compiler reordering this with calculation */ rte_compiler_barrier(); avail = vmbus_br_availwrite(tbr, old_windex); /* If not enough space in ring, then tell caller. */ if (avail <= total) return -EAGAIN; next_windex = vmbus_br_idxinc(old_windex, total, ring_size); /* Atomic update of next write_index for other threads */ } while (!rte_atomic32_cmpset(&tbr->windex, old_windex, next_windex)); /* Space from old..new is now reserved */ windex = old_windex; for (i = 0; i < iovlen; i++) { windex = vmbus_txbr_copyto(tbr, windex, iov[i].iov_base, iov[i].iov_len); } /* Set the offset of the current channel packet. */ save_windex = ((uint64_t)old_windex) << 32; windex = vmbus_txbr_copyto(tbr, windex, &save_windex, sizeof(save_windex)); /* The region reserved should match region used */ RTE_ASSERT(windex == next_windex); /* Ensure that data is available before updating host index */ rte_smp_wmb(); /* Checkin for our reservation. wait for our turn to update host */ while (!rte_atomic32_cmpset(&vbr->windex, old_windex, next_windex)) rte_pause(); /* If host had read all data before this, then need to signal */ *need_sig |= vmbus_txbr_need_signal(tbr, old_windex); return 0; }