static uint16_t nvme_map_prp(QEMUSGList *qsg, uint64_t prp1, uint64_t prp2, uint32_t len, NvmeCtrl *n) { hwaddr trans_len = n->page_size - (prp1 % n->page_size); trans_len = MIN(len, trans_len); int num_prps = (len >> n->page_bits) + 1; if (!prp1) { return NVME_INVALID_FIELD | NVME_DNR; } pci_dma_sglist_init(qsg, &n->parent_obj, num_prps); qemu_sglist_add(qsg, prp1, trans_len); len -= trans_len; if (len) { if (!prp2) { goto unmap; } if (len > n->page_size) { uint64_t prp_list[n->max_prp_ents]; uint32_t nents, prp_trans; int i = 0; nents = (len + n->page_size - 1) >> n->page_bits; prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t); pci_dma_read(&n->parent_obj, prp2, (void *)prp_list, prp_trans); while (len != 0) { uint64_t prp_ent = le64_to_cpu(prp_list[i]); if (i == n->max_prp_ents - 1 && len > n->page_size) { if (!prp_ent || prp_ent & (n->page_size - 1)) { goto unmap; } i = 0; nents = (len + n->page_size - 1) >> n->page_bits; prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t); pci_dma_read(&n->parent_obj, prp_ent, (void *)prp_list, prp_trans); prp_ent = le64_to_cpu(prp_list[i]); } if (!prp_ent || prp_ent & (n->page_size - 1)) { goto unmap; } trans_len = MIN(len, n->page_size); qemu_sglist_add(qsg, prp_ent, trans_len); len -= trans_len; i++; } } else { if (prp2 & (n->page_size - 1)) {
static void edu_dma_timer(void *opaque) { EduState *edu = opaque; bool raise_irq = false; if (!(edu->dma.cmd & EDU_DMA_RUN)) { return; } if (EDU_DMA_DIR(edu->dma.cmd) == EDU_DMA_FROM_PCI) { uint32_t dst = edu->dma.dst; edu_check_range(dst, edu->dma.cnt, DMA_START, DMA_SIZE); dst -= DMA_START; pci_dma_read(&edu->pdev, edu_clamp_addr(edu, edu->dma.src), edu->dma_buf + dst, edu->dma.cnt); } else { uint32_t src = edu->dma.src; edu_check_range(src, edu->dma.cnt, DMA_START, DMA_SIZE); src -= DMA_START; pci_dma_write(&edu->pdev, edu_clamp_addr(edu, edu->dma.dst), edu->dma_buf + src, edu->dma.cnt); } edu->dma.cmd &= ~EDU_DMA_RUN; if (edu->dma.cmd & EDU_DMA_IRQ) { raise_irq = true; } if (raise_irq) { edu_raise_irq(edu, DMA_IRQ); } }
static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size) { if (n->cmbsz && addr >= n->ctrl_mem.addr && addr < (n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size))) { memcpy(buf, (void *)&n->cmbuf[addr - n->ctrl_mem.addr], size); } else { pci_dma_read(&n->parent_obj, addr, buf, size); } }
static DescInfo *desc_read(DescRing *ring, uint32_t index) { PCIDevice *dev = PCI_DEVICE(ring->r); DescInfo *info = &ring->info[index]; hwaddr addr = ring->base_addr + (sizeof(RockerDesc) * index); pci_dma_read(dev, addr, &info->desc, sizeof(info->desc)); return info; }
char *desc_get_buf(DescInfo *info, bool read_only) { PCIDevice *dev = PCI_DEVICE(info->ring->r); size_t size = read_only ? le16_to_cpu(info->desc.tlv_size) : le16_to_cpu(info->desc.buf_size); if (size > info->buf_size) { info->buf = g_realloc(info->buf, size); info->buf_size = size; } pci_dma_read(dev, le64_to_cpu(info->desc.buf_addr), info->buf, size); return info->buf; }
static int tx_consume(Rocker *r, DescInfo *info) { PCIDevice *dev = PCI_DEVICE(r); char *buf = desc_get_buf(info, true); RockerTlv *tlv_frag; RockerTlv *tlvs[ROCKER_TLV_TX_MAX + 1]; struct iovec iov[ROCKER_TX_FRAGS_MAX] = { { 0, }, }; uint32_t pport; uint32_t port; uint16_t tx_offload = ROCKER_TX_OFFLOAD_NONE; uint16_t tx_l3_csum_off = 0; uint16_t tx_tso_mss = 0; uint16_t tx_tso_hdr_len = 0; int iovcnt = 0; int err = ROCKER_OK; int rem; int i; if (!buf) { return -ROCKER_ENXIO; } rocker_tlv_parse(tlvs, ROCKER_TLV_TX_MAX, buf, desc_tlv_size(info)); if (!tlvs[ROCKER_TLV_TX_FRAGS]) { return -ROCKER_EINVAL; } pport = rocker_get_pport_by_tx_ring(r, desc_get_ring(info)); if (!fp_port_from_pport(pport, &port)) { return -ROCKER_EINVAL; } if (tlvs[ROCKER_TLV_TX_OFFLOAD]) { tx_offload = rocker_tlv_get_u8(tlvs[ROCKER_TLV_TX_OFFLOAD]); } switch (tx_offload) { case ROCKER_TX_OFFLOAD_L3_CSUM: if (!tlvs[ROCKER_TLV_TX_L3_CSUM_OFF]) { return -ROCKER_EINVAL; } break; case ROCKER_TX_OFFLOAD_TSO: if (!tlvs[ROCKER_TLV_TX_TSO_MSS] || !tlvs[ROCKER_TLV_TX_TSO_HDR_LEN]) { return -ROCKER_EINVAL; } break; } if (tlvs[ROCKER_TLV_TX_L3_CSUM_OFF]) { tx_l3_csum_off = rocker_tlv_get_le16(tlvs[ROCKER_TLV_TX_L3_CSUM_OFF]); } if (tlvs[ROCKER_TLV_TX_TSO_MSS]) { tx_tso_mss = rocker_tlv_get_le16(tlvs[ROCKER_TLV_TX_TSO_MSS]); } if (tlvs[ROCKER_TLV_TX_TSO_HDR_LEN]) { tx_tso_hdr_len = rocker_tlv_get_le16(tlvs[ROCKER_TLV_TX_TSO_HDR_LEN]); } rocker_tlv_for_each_nested(tlv_frag, tlvs[ROCKER_TLV_TX_FRAGS], rem) { hwaddr frag_addr; uint16_t frag_len; if (rocker_tlv_type(tlv_frag) != ROCKER_TLV_TX_FRAG) { err = -ROCKER_EINVAL; goto err_bad_attr; } rocker_tlv_parse_nested(tlvs, ROCKER_TLV_TX_FRAG_ATTR_MAX, tlv_frag); if (!tlvs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] || !tlvs[ROCKER_TLV_TX_FRAG_ATTR_LEN]) { err = -ROCKER_EINVAL; goto err_bad_attr; } frag_addr = rocker_tlv_get_le64(tlvs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]); frag_len = rocker_tlv_get_le16(tlvs[ROCKER_TLV_TX_FRAG_ATTR_LEN]); if (iovcnt >= ROCKER_TX_FRAGS_MAX) { goto err_too_many_frags; } iov[iovcnt].iov_len = frag_len; iov[iovcnt].iov_base = g_malloc(frag_len); if (!iov[iovcnt].iov_base) { err = -ROCKER_ENOMEM; goto err_no_mem; } if (pci_dma_read(dev, frag_addr, iov[iovcnt].iov_base, iov[iovcnt].iov_len)) { err = -ROCKER_ENXIO; goto err_bad_io; } iovcnt++; }