/** * Get buffer descriptor from the receive buffer list page that has been * supplied by the guest with the H_REGISTER_LOGICAL_LAN call */ static vlan_bd_t spapr_vlan_get_rx_bd_from_page(VIOsPAPRVLANDevice *dev, size_t size) { int buf_ptr = dev->use_buf_ptr; vlan_bd_t bd; do { buf_ptr += 8; if (buf_ptr >= VLAN_RX_BDS_LEN + VLAN_RX_BDS_OFF) { buf_ptr = VLAN_RX_BDS_OFF; } bd = vio_ldq(&dev->sdev, dev->buf_list + buf_ptr); trace_spapr_vlan_get_rx_bd_from_page(buf_ptr, (uint64_t)bd); } while ((!(bd & VLAN_BD_VALID) || VLAN_BD_LEN(bd) < size + 8) && buf_ptr != dev->use_buf_ptr); if (!(bd & VLAN_BD_VALID) || VLAN_BD_LEN(bd) < size + 8) { /* Failed to find a suitable buffer */ return 0; } /* Remove the buffer from the pool */ dev->use_buf_ptr = buf_ptr; vio_stq(&dev->sdev, dev->buf_list + dev->use_buf_ptr, 0); trace_spapr_vlan_get_rx_bd_from_page_found(dev->use_buf_ptr, dev->rx_bufs); return bd; }
static int check_bd(VIOsPAPRVLANDevice *dev, vlan_bd_t bd, target_ulong alignment) { if ((VLAN_BD_ADDR(bd) % alignment) || (VLAN_BD_LEN(bd) % alignment)) { return -1; } if (spapr_vio_check_tces(&dev->sdev, VLAN_BD_ADDR(bd), VLAN_BD_LEN(bd), SPAPR_TCE_RW) != 0) { return -1; } return 0; }
static int check_bd(VIOsPAPRVLANDevice *dev, vlan_bd_t bd, target_ulong alignment) { if ((VLAN_BD_ADDR(bd) % alignment) || (VLAN_BD_LEN(bd) % alignment)) { return -1; } if (!spapr_vio_dma_valid(&dev->sdev, VLAN_BD_ADDR(bd), VLAN_BD_LEN(bd), DMA_DIRECTION_FROM_DEVICE) || !spapr_vio_dma_valid(&dev->sdev, VLAN_BD_ADDR(bd), VLAN_BD_LEN(bd), DMA_DIRECTION_TO_DEVICE)) { return -1; } return 0; }
/** * Enqueuing receive buffer by adding it to one of our receive buffer pools */ static target_long spapr_vlan_add_rxbuf_to_pool(VIOsPAPRVLANDevice *dev, target_ulong buf) { int size = VLAN_BD_LEN(buf); int pool; pool = spapr_vlan_get_rx_pool_id(dev, size); if (pool < 0) { /* * No matching pool found? Try to use a new one. If the guest used all * pools before, but changed the size of one pool inbetween, we might * need to recycle that pool here (if it's empty already). Thus scan * all buffer pools now, starting with the last (likely empty) one. */ for (pool = RX_MAX_POOLS - 1; pool >= 0 ; pool--) { if (dev->rx_pool[pool]->count == 0) { dev->rx_pool[pool]->bufsize = size; /* * Sort pools by size so that spapr_vlan_receive() * can later find the smallest buffer pool easily. */ qsort(dev->rx_pool, RX_MAX_POOLS, sizeof(dev->rx_pool[0]), rx_pool_size_compare); pool = spapr_vlan_get_rx_pool_id(dev, size); trace_spapr_vlan_add_rxbuf_to_pool_create(pool, VLAN_BD_LEN(buf)); break; } } } /* Still no usable pool? Give up */ if (pool < 0 || dev->rx_pool[pool]->count >= RX_POOL_MAX_BDS) { return H_RESOURCE; } trace_spapr_vlan_add_rxbuf_to_pool(pool, VLAN_BD_LEN(buf), dev->rx_pool[pool]->count); dev->rx_pool[pool]->bds[dev->rx_pool[pool]->count++] = buf; return 0; }
static target_ulong h_add_logical_lan_buffer(PowerPCCPU *cpu, sPAPREnvironment *spapr, target_ulong opcode, target_ulong *args) { target_ulong reg = args[0]; target_ulong buf = args[1]; VIOsPAPRDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg); VIOsPAPRVLANDevice *dev = VIO_SPAPR_VLAN_DEVICE(sdev); vlan_bd_t bd; DPRINTF("H_ADD_LOGICAL_LAN_BUFFER(0x" TARGET_FMT_lx ", 0x" TARGET_FMT_lx ")\n", reg, buf); if (!sdev) { hcall_dprintf("Bad device\n"); return H_PARAMETER; } if ((check_bd(dev, buf, 4) < 0) || (VLAN_BD_LEN(buf) < 16)) { hcall_dprintf("Bad buffer enqueued\n"); return H_PARAMETER; } if (!dev->isopen || dev->rx_bufs >= VLAN_MAX_BUFS) { return H_RESOURCE; } do { dev->add_buf_ptr += 8; if (dev->add_buf_ptr >= (VLAN_RX_BDS_LEN + VLAN_RX_BDS_OFF)) { dev->add_buf_ptr = VLAN_RX_BDS_OFF; } bd = vio_ldq(sdev, dev->buf_list + dev->add_buf_ptr); } while (bd & VLAN_BD_VALID); vio_stq(sdev, dev->buf_list + dev->add_buf_ptr, buf); dev->rx_bufs++; qemu_flush_queued_packets(qemu_get_queue(dev->nic)); DPRINTF("h_add_logical_lan_buffer(): Added buf ptr=%d rx_bufs=%d" " bd=0x%016llx\n", dev->add_buf_ptr, dev->rx_bufs, (unsigned long long)buf); return H_SUCCESS; }
static target_ulong h_add_logical_lan_buffer(PowerPCCPU *cpu, sPAPRMachineState *spapr, target_ulong opcode, target_ulong *args) { target_ulong reg = args[0]; target_ulong buf = args[1]; VIOsPAPRDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg); VIOsPAPRVLANDevice *dev = VIO_SPAPR_VLAN_DEVICE(sdev); target_long ret; trace_spapr_vlan_h_add_logical_lan_buffer(reg, buf); if (!sdev) { hcall_dprintf("Bad device\n"); return H_PARAMETER; } if ((check_bd(dev, buf, 4) < 0) || (VLAN_BD_LEN(buf) < 16)) { hcall_dprintf("Bad buffer enqueued\n"); return H_PARAMETER; } if (!dev->isopen) { return H_RESOURCE; } if (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) { ret = spapr_vlan_add_rxbuf_to_pool(dev, buf); } else { ret = spapr_vlan_add_rxbuf_to_page(dev, buf); } if (ret) { return ret; } dev->rx_bufs++; /* * Give guest some more time to add additional RX buffers before we * flush the receive queue, so that e.g. fragmented IP packets can * be passed to the guest in one go later (instead of passing single * fragments if there is only one receive buffer available). */ timer_mod(dev->rxp_timer, qemu_clock_get_us(QEMU_CLOCK_VIRTUAL) + 500); return H_SUCCESS; }
static target_ulong h_add_logical_lan_buffer(CPUState *env, sPAPREnvironment *spapr, target_ulong opcode, target_ulong *args) { target_ulong reg = args[0]; target_ulong buf = args[1]; VIOsPAPRDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg); VIOsPAPRVLANDevice *dev = (VIOsPAPRVLANDevice *)sdev; vlan_bd_t bd; dprintf("H_ADD_LOGICAL_LAN_BUFFER(0x" TARGET_FMT_lx ", 0x" TARGET_FMT_lx ")\n", reg, buf); if (!sdev) { hcall_dprintf("Wrong device in h_add_logical_lan_buffer\n"); return H_PARAMETER; } if ((check_bd(dev, buf, 4) < 0) || (VLAN_BD_LEN(buf) < 16)) { hcall_dprintf("Bad buffer enqueued in h_add_logical_lan_buffer\n"); return H_PARAMETER; } if (!dev->isopen || dev->rx_bufs >= VLAN_MAX_BUFS) { return H_RESOURCE; } do { dev->add_buf_ptr += 8; if (dev->add_buf_ptr >= SPAPR_VIO_TCE_PAGE_SIZE) { dev->add_buf_ptr = VLAN_RX_BDS_OFF; } bd = ldq_tce(sdev, dev->buf_list + dev->add_buf_ptr); } while (bd & VLAN_BD_VALID); stq_tce(sdev, dev->buf_list + dev->add_buf_ptr, buf); dev->rx_bufs++; dprintf("h_add_logical_lan_buffer(): Added buf ptr=%d rx_bufs=%d" " bd=0x%016llx\n", dev->add_buf_ptr, dev->rx_bufs, (unsigned long long)buf); return H_SUCCESS; }
static ssize_t spapr_vlan_receive(NetClientState *nc, const uint8_t *buf, size_t size) { VIOsPAPRDevice *sdev = DO_UPCAST(NICState, nc, nc)->opaque; VIOsPAPRVLANDevice *dev = (VIOsPAPRVLANDevice *)sdev; vlan_bd_t rxq_bd = vio_ldq(sdev, dev->buf_list + VLAN_RXQ_BD_OFF); vlan_bd_t bd; int buf_ptr = dev->use_buf_ptr; uint64_t handle; uint8_t control; dprintf("spapr_vlan_receive() [%s] rx_bufs=%d\n", sdev->qdev.id, dev->rx_bufs); if (!dev->isopen) { return -1; } if (!dev->rx_bufs) { return -1; } do { buf_ptr += 8; if (buf_ptr >= SPAPR_TCE_PAGE_SIZE) { buf_ptr = VLAN_RX_BDS_OFF; } bd = vio_ldq(sdev, dev->buf_list + buf_ptr); dprintf("use_buf_ptr=%d bd=0x%016llx\n", buf_ptr, (unsigned long long)bd); } while ((!(bd & VLAN_BD_VALID) || (VLAN_BD_LEN(bd) < (size + 8))) && (buf_ptr != dev->use_buf_ptr)); if (!(bd & VLAN_BD_VALID) || (VLAN_BD_LEN(bd) < (size + 8))) { /* Failed to find a suitable buffer */ return -1; } /* Remove the buffer from the pool */ dev->rx_bufs--; dev->use_buf_ptr = buf_ptr; vio_stq(sdev, dev->buf_list + dev->use_buf_ptr, 0); dprintf("Found buffer: ptr=%d num=%d\n", dev->use_buf_ptr, dev->rx_bufs); /* Transfer the packet data */ if (spapr_vio_dma_write(sdev, VLAN_BD_ADDR(bd) + 8, buf, size) < 0) { return -1; } dprintf("spapr_vlan_receive: DMA write completed\n"); /* Update the receive queue */ control = VLAN_RXQC_TOGGLE | VLAN_RXQC_VALID; if (rxq_bd & VLAN_BD_TOGGLE) { control ^= VLAN_RXQC_TOGGLE; } handle = vio_ldq(sdev, VLAN_BD_ADDR(bd)); vio_stq(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 8, handle); vio_stl(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 4, size); vio_sth(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 2, 8); vio_stb(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr, control); dprintf("wrote rxq entry (ptr=0x%llx): 0x%016llx 0x%016llx\n", (unsigned long long)dev->rxq_ptr, (unsigned long long)vio_ldq(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr), (unsigned long long)vio_ldq(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 8)); dev->rxq_ptr += 16; if (dev->rxq_ptr >= VLAN_BD_LEN(rxq_bd)) { dev->rxq_ptr = 0; vio_stq(sdev, dev->buf_list + VLAN_RXQ_BD_OFF, rxq_bd ^ VLAN_BD_TOGGLE); } if (sdev->signal_state & 1) { qemu_irq_pulse(spapr_vio_qirq(sdev)); } return size; }
static target_ulong h_send_logical_lan(PowerPCCPU *cpu, sPAPREnvironment *spapr, target_ulong opcode, target_ulong *args) { target_ulong reg = args[0]; target_ulong *bufs = args + 1; target_ulong continue_token = args[7]; VIOsPAPRDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg); VIOsPAPRVLANDevice *dev = (VIOsPAPRVLANDevice *)sdev; unsigned total_len; uint8_t *lbuf, *p; int i, nbufs; int ret; dprintf("H_SEND_LOGICAL_LAN(0x" TARGET_FMT_lx ", <bufs>, 0x" TARGET_FMT_lx ")\n", reg, continue_token); if (!sdev) { return H_PARAMETER; } dprintf("rxbufs = %d\n", dev->rx_bufs); if (!dev->isopen) { return H_DROPPED; } if (continue_token) { return H_HARDWARE; /* FIXME actually handle this */ } total_len = 0; for (i = 0; i < 6; i++) { dprintf(" buf desc: 0x" TARGET_FMT_lx "\n", bufs[i]); if (!(bufs[i] & VLAN_BD_VALID)) { break; } total_len += VLAN_BD_LEN(bufs[i]); } nbufs = i; dprintf("h_send_logical_lan() %d buffers, total length 0x%x\n", nbufs, total_len); if (total_len == 0) { return H_SUCCESS; } if (total_len > MAX_PACKET_SIZE) { /* Don't let the guest force too large an allocation */ return H_RESOURCE; } lbuf = alloca(total_len); p = lbuf; for (i = 0; i < nbufs; i++) { ret = spapr_vio_dma_read(sdev, VLAN_BD_ADDR(bufs[i]), p, VLAN_BD_LEN(bufs[i])); if (ret < 0) { return ret; } p += VLAN_BD_LEN(bufs[i]); } qemu_send_packet(&dev->nic->nc, lbuf, total_len); return H_SUCCESS; }
static target_ulong h_register_logical_lan(PowerPCCPU *cpu, sPAPREnvironment *spapr, target_ulong opcode, target_ulong *args) { target_ulong reg = args[0]; target_ulong buf_list = args[1]; target_ulong rec_queue = args[2]; target_ulong filter_list = args[3]; VIOsPAPRDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg); VIOsPAPRVLANDevice *dev = (VIOsPAPRVLANDevice *)sdev; vlan_bd_t filter_list_bd; if (!dev) { return H_PARAMETER; } if (dev->isopen) { hcall_dprintf("H_REGISTER_LOGICAL_LAN called twice without " "H_FREE_LOGICAL_LAN\n"); return H_RESOURCE; } if (check_bd(dev, VLAN_VALID_BD(buf_list, SPAPR_TCE_PAGE_SIZE), SPAPR_TCE_PAGE_SIZE) < 0) { hcall_dprintf("Bad buf_list 0x" TARGET_FMT_lx "\n", buf_list); return H_PARAMETER; } filter_list_bd = VLAN_VALID_BD(filter_list, SPAPR_TCE_PAGE_SIZE); if (check_bd(dev, filter_list_bd, SPAPR_TCE_PAGE_SIZE) < 0) { hcall_dprintf("Bad filter_list 0x" TARGET_FMT_lx "\n", filter_list); return H_PARAMETER; } if (!(rec_queue & VLAN_BD_VALID) || (check_bd(dev, rec_queue, VLAN_RQ_ALIGNMENT) < 0)) { hcall_dprintf("Bad receive queue\n"); return H_PARAMETER; } dev->buf_list = buf_list; sdev->signal_state = 0; rec_queue &= ~VLAN_BD_TOGGLE; /* Initialize the buffer list */ vio_stq(sdev, buf_list, rec_queue); vio_stq(sdev, buf_list + 8, filter_list_bd); spapr_vio_dma_set(sdev, buf_list + VLAN_RX_BDS_OFF, 0, SPAPR_TCE_PAGE_SIZE - VLAN_RX_BDS_OFF); dev->add_buf_ptr = VLAN_RX_BDS_OFF - 8; dev->use_buf_ptr = VLAN_RX_BDS_OFF - 8; dev->rx_bufs = 0; dev->rxq_ptr = 0; /* Initialize the receive queue */ spapr_vio_dma_set(sdev, VLAN_BD_ADDR(rec_queue), 0, VLAN_BD_LEN(rec_queue)); dev->isopen = 1; return H_SUCCESS; }
static target_ulong h_send_logical_lan(PowerPCCPU *cpu, sPAPRMachineState *spapr, target_ulong opcode, target_ulong *args) { target_ulong reg = args[0]; target_ulong *bufs = args + 1; target_ulong continue_token = args[7]; VIOsPAPRDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg); VIOsPAPRVLANDevice *dev = VIO_SPAPR_VLAN_DEVICE(sdev); unsigned total_len; uint8_t *lbuf, *p; int i, nbufs; int ret; trace_spapr_vlan_h_send_logical_lan(reg, continue_token); if (!sdev) { return H_PARAMETER; } trace_spapr_vlan_h_send_logical_lan_rxbufs(dev->rx_bufs); if (!dev->isopen) { return H_DROPPED; } if (continue_token) { return H_HARDWARE; /* FIXME actually handle this */ } total_len = 0; for (i = 0; i < 6; i++) { trace_spapr_vlan_h_send_logical_lan_buf_desc(bufs[i]); if (!(bufs[i] & VLAN_BD_VALID)) { break; } total_len += VLAN_BD_LEN(bufs[i]); } nbufs = i; trace_spapr_vlan_h_send_logical_lan_total(nbufs, total_len); if (total_len == 0) { return H_SUCCESS; } if (total_len > MAX_PACKET_SIZE) { /* Don't let the guest force too large an allocation */ return H_RESOURCE; } lbuf = alloca(total_len); p = lbuf; for (i = 0; i < nbufs; i++) { ret = spapr_vio_dma_read(sdev, VLAN_BD_ADDR(bufs[i]), p, VLAN_BD_LEN(bufs[i])); if (ret < 0) { return ret; } p += VLAN_BD_LEN(bufs[i]); } qemu_send_packet(qemu_get_queue(dev->nic), lbuf, total_len); return H_SUCCESS; }
static ssize_t spapr_vlan_receive(NetClientState *nc, const uint8_t *buf, size_t size) { VIOsPAPRVLANDevice *dev = qemu_get_nic_opaque(nc); VIOsPAPRDevice *sdev = VIO_SPAPR_DEVICE(dev); vlan_bd_t rxq_bd = vio_ldq(sdev, dev->buf_list + VLAN_RXQ_BD_OFF); vlan_bd_t bd; uint64_t handle; uint8_t control; trace_spapr_vlan_receive(sdev->qdev.id, dev->rx_bufs); if (!dev->isopen) { return -1; } if (!dev->rx_bufs) { spapr_vlan_record_dropped_rx_frame(dev); return 0; } if (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) { bd = spapr_vlan_get_rx_bd_from_pool(dev, size); } else { bd = spapr_vlan_get_rx_bd_from_page(dev, size); } if (!bd) { spapr_vlan_record_dropped_rx_frame(dev); return 0; } dev->rx_bufs--; /* Transfer the packet data */ if (spapr_vio_dma_write(sdev, VLAN_BD_ADDR(bd) + 8, buf, size) < 0) { return -1; } trace_spapr_vlan_receive_dma_completed(); /* Update the receive queue */ control = VLAN_RXQC_TOGGLE | VLAN_RXQC_VALID; if (rxq_bd & VLAN_BD_TOGGLE) { control ^= VLAN_RXQC_TOGGLE; } handle = vio_ldq(sdev, VLAN_BD_ADDR(bd)); vio_stq(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 8, handle); vio_stl(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 4, size); vio_sth(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 2, 8); vio_stb(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr, control); trace_spapr_vlan_receive_wrote(dev->rxq_ptr, vio_ldq(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr), vio_ldq(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 8)); dev->rxq_ptr += 16; if (dev->rxq_ptr >= VLAN_BD_LEN(rxq_bd)) { dev->rxq_ptr = 0; vio_stq(sdev, dev->buf_list + VLAN_RXQ_BD_OFF, rxq_bd ^ VLAN_BD_TOGGLE); } if (sdev->signal_state & 1) { qemu_irq_pulse(spapr_vio_qirq(sdev)); } return size; }