/* Release the memory allocated to a packet. */ static void smc91c111_release_packet(smc91c111_state *s, int packet) { s->allocated &= ~(1 << packet); if (s->tx_alloc == 0x80) smc91c111_tx_alloc(s); qemu_flush_queued_packets(qemu_get_queue(s->nic)); }
static void dp8393x_do_receiver_enable(dp8393xState *s) { s->regs[SONIC_CR] &= ~SONIC_CR_RXDIS; if (dp8393x_can_receive(s->nic->ncs)) { qemu_flush_queued_packets(qemu_get_queue(s->nic)); } }
/* * The fd_write() callback, invoked if the fd is marked as * writable after a poll. Unregister the handler and flush any * buffered packets. */ static void netmap_writable(void *opaque) { NetmapState *s = opaque; netmap_write_poll(s, false); qemu_flush_queued_packets(&s->nc); }
static void vnic_writable(void *opaque) { VNICState *vsp = opaque; vnic_write_poll(vsp, 0); qemu_flush_queued_packets(&vsp->vns_nc); }
static void tap_writable(void *opaque) { TAPState *s = opaque; tap_write_poll(s, 0); qemu_flush_queued_packets(&s->nc); }
static void eth_write(void *opaque, hwaddr addr, uint64_t val64, unsigned int size) { struct xlx_ethlite *s = opaque; unsigned int base = 0; uint32_t value = val64; addr >>= 2; switch (addr) { case R_TX_CTRL0: case R_TX_CTRL1: if (addr == R_TX_CTRL1) base = 0x800 / 4; D(qemu_log("%s addr=" TARGET_FMT_plx " val=%x\n", __func__, addr * 4, value)); if ((value & (CTRL_P | CTRL_S)) == CTRL_S) { qemu_send_packet(qemu_get_queue(s->nic), (void *) &s->regs[base], s->regs[base + R_TX_LEN0]); D(qemu_log("eth_tx %d\n", s->regs[base + R_TX_LEN0])); if (s->regs[base + R_TX_CTRL0] & CTRL_I) eth_pulse_irq(s); } else if ((value & (CTRL_P | CTRL_S)) == (CTRL_P | CTRL_S)) { memcpy(&s->conf.macaddr.a[0], &s->regs[base], 6); if (s->regs[base + R_TX_CTRL0] & CTRL_I) eth_pulse_irq(s); } /* We are fast and get ready pretty much immediately so we actually never flip the S nor P bits to one. */ s->regs[addr] = value & ~(CTRL_P | CTRL_S); break; /* Keep these native. */ case R_RX_CTRL0: case R_RX_CTRL1: if (!(value & CTRL_S)) { qemu_flush_queued_packets(qemu_get_queue(s->nic)); } /* fall through */ case R_TX_LEN0: case R_TX_LEN1: case R_TX_GIE0: D(qemu_log("%s addr=" TARGET_FMT_plx " val=%x\n", __func__, addr * 4, value)); s->regs[addr] = value; break; default: s->regs[addr] = tswap32(value); break; } }
static void e1000e_write_config(PCIDevice *pci_dev, uint32_t address, uint32_t val, int len) { E1000EState *s = E1000E(pci_dev); pci_default_write_config(pci_dev, address, val, len); if (range_covers_byte(address, len, PCI_COMMAND) && (pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) { qemu_flush_queued_packets(qemu_get_queue(s->nic)); } }
static target_ulong h_add_logical_lan_buffer(PowerPCCPU *cpu, sPAPREnvironment *spapr, target_ulong opcode, target_ulong *args) { target_ulong reg = args[0]; target_ulong buf = args[1]; VIOsPAPRDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg); VIOsPAPRVLANDevice *dev = VIO_SPAPR_VLAN_DEVICE(sdev); vlan_bd_t bd; DPRINTF("H_ADD_LOGICAL_LAN_BUFFER(0x" TARGET_FMT_lx ", 0x" TARGET_FMT_lx ")\n", reg, buf); if (!sdev) { hcall_dprintf("Bad device\n"); return H_PARAMETER; } if ((check_bd(dev, buf, 4) < 0) || (VLAN_BD_LEN(buf) < 16)) { hcall_dprintf("Bad buffer enqueued\n"); return H_PARAMETER; } if (!dev->isopen || dev->rx_bufs >= VLAN_MAX_BUFS) { return H_RESOURCE; } do { dev->add_buf_ptr += 8; if (dev->add_buf_ptr >= (VLAN_RX_BDS_LEN + VLAN_RX_BDS_OFF)) { dev->add_buf_ptr = VLAN_RX_BDS_OFF; } bd = vio_ldq(sdev, dev->buf_list + dev->add_buf_ptr); } while (bd & VLAN_BD_VALID); vio_stq(sdev, dev->buf_list + dev->add_buf_ptr, buf); dev->rx_bufs++; qemu_flush_queued_packets(qemu_get_queue(dev->nic)); DPRINTF("h_add_logical_lan_buffer(): Added buf ptr=%d rx_bufs=%d" " bd=0x%016llx\n", dev->add_buf_ptr, dev->rx_bufs, (unsigned long long)buf); return H_SUCCESS; }
static void ftgmac100_enable_rx(Ftgmac100State *s) { Ftgmac100Desc bd; uint32_t full; /* Find an empty descriptor to use */ while (1) { ftgmac100_read_bd(&bd, s->rx_descriptor); full = (bd.des0 & FTGMAC100_RXDES0_RXPKT_RDY); if (!full || bd.des0 & ftgmac100_txdes0_edotr(s)) { break; } s->rx_descriptor += sizeof(Ftgmac100Desc); } if (full) { DEBUG("RX buffer full\n"); } s->rx_enabled = (full == 0); if (s->rx_enabled) { qemu_flush_queued_packets(qemu_get_queue(s->nic)); } }
static uint64_t stellaris_enet_read(void *opaque, hwaddr offset, unsigned size) { stellaris_enet_state *s = (stellaris_enet_state *)opaque; uint32_t val; switch (offset) { case 0x00: /* RIS */ DPRINTF("IRQ status %02x\n", s->ris); return s->ris; case 0x04: /* IM */ return s->im; case 0x08: /* RCTL */ return s->rctl; case 0x0c: /* TCTL */ return s->tctl; case 0x10: /* DATA */ { uint8_t *rx_fifo; if (s->np == 0) { BADF("RX underflow\n"); return 0; } rx_fifo = s->rx[s->next_packet].data + s->rx_fifo_offset; val = rx_fifo[0] | (rx_fifo[1] << 8) | (rx_fifo[2] << 16) | (rx_fifo[3] << 24); s->rx_fifo_offset += 4; if (s->rx_fifo_offset >= s->rx[s->next_packet].len) { s->rx_fifo_offset = 0; s->next_packet++; if (s->next_packet >= 31) s->next_packet = 0; s->np--; DPRINTF("RX done np=%d\n", s->np); if (!s->np && stellaris_enet_can_receive(s)) { qemu_flush_queued_packets(qemu_get_queue(s->nic)); } } return val; } case 0x14: /* IA0 */ return s->conf.macaddr.a[0] | (s->conf.macaddr.a[1] << 8) | (s->conf.macaddr.a[2] << 16) | ((uint32_t)s->conf.macaddr.a[3] << 24); case 0x18: /* IA1 */ return s->conf.macaddr.a[4] | (s->conf.macaddr.a[5] << 8); case 0x1c: /* THR */ return s->thr; case 0x20: /* MCTL */ return s->mctl; case 0x24: /* MDV */ return s->mdv; case 0x28: /* MADD */ return 0; case 0x2c: /* MTXD */ return s->mtxd; case 0x30: /* MRXD */ return s->mrxd; case 0x34: /* NP */ return s->np; case 0x38: /* TR */ return 0; case 0x3c: /* Undocuented: Timestamp? */ return 0; default: hw_error("stellaris_enet_read: Bad offset %x\n", (int)offset); return 0; } }
static void dp8393x_write(void *opaque, hwaddr addr, uint64_t data, unsigned int size) { dp8393xState *s = opaque; int reg = addr >> s->it_shift; DPRINTF("write 0x%04x to reg %s\n", (uint16_t)data, reg_names[reg]); switch (reg) { /* Command register */ case SONIC_CR: dp8393x_do_command(s, data); break; /* Prevent write to read-only registers */ case SONIC_CAP2: case SONIC_CAP1: case SONIC_CAP0: case SONIC_SR: case SONIC_MDT: DPRINTF("writing to reg %d invalid\n", reg); break; /* Accept write to some registers only when in reset mode */ case SONIC_DCR: if (s->regs[SONIC_CR] & SONIC_CR_RST) { s->regs[reg] = data & 0xbfff; } else { DPRINTF("writing to DCR invalid\n"); } break; case SONIC_DCR2: if (s->regs[SONIC_CR] & SONIC_CR_RST) { s->regs[reg] = data & 0xf017; } else { DPRINTF("writing to DCR2 invalid\n"); } break; /* 12 lower bytes are Read Only */ case SONIC_TCR: s->regs[reg] = data & 0xf000; break; /* 9 lower bytes are Read Only */ case SONIC_RCR: s->regs[reg] = data & 0xffe0; break; /* Ignore most significant bit */ case SONIC_IMR: s->regs[reg] = data & 0x7fff; dp8393x_update_irq(s); break; /* Clear bits by writing 1 to them */ case SONIC_ISR: data &= s->regs[reg]; s->regs[reg] &= ~data; if (data & SONIC_ISR_RBE) { dp8393x_do_read_rra(s); } dp8393x_update_irq(s); if (dp8393x_can_receive(s->nic->ncs)) { qemu_flush_queued_packets(qemu_get_queue(s->nic)); } break; /* Ignore least significant bit */ case SONIC_RSA: case SONIC_REA: case SONIC_RRP: case SONIC_RWP: s->regs[reg] = data & 0xfffe; break; /* Invert written value for some registers */ case SONIC_CRCT: case SONIC_FAET: case SONIC_MPT: s->regs[reg] = data ^ 0xffff; break; /* All other registers have no special contrainst */ default: s->regs[reg] = data; } if (reg == SONIC_WT0 || reg == SONIC_WT1) { dp8393x_set_next_tick(s); } }
static void net_event(struct XenDevice *xendev) { struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); net_tx_packets(netdev); qemu_flush_queued_packets(&netdev->nic->nc); }
static void eth_write(void *opaque, hwaddr addr, uint64_t val64, unsigned int size) { struct xlx_ethlite *s = opaque; unsigned int base = 0; uint32_t value = val64; addr >>= 2; switch (addr) { case R_TX_CTRL0: case R_TX_CTRL1: if (addr == R_TX_CTRL1) base = 0x800 / 4; D(qemu_log("%s addr=" TARGET_FMT_plx " val=%x\n", __func__, addr * 4, value)); if ((value & (CTRL_P | CTRL_S)) == CTRL_S) { qemu_send_packet(qemu_get_queue(s->nic), (void *) &s->regs[base], s->regs[base + R_TX_LEN0]); D(qemu_log("eth_tx %d\n", s->regs[base + R_TX_LEN0])); if (s->regs[base + R_TX_CTRL0] & CTRL_I) eth_pulse_irq(s); } else if ((value & (CTRL_P | CTRL_S)) == (CTRL_P | CTRL_S)) { memcpy(&s->conf.macaddr.a[0], &s->regs[base], 6); if (s->regs[base + R_TX_CTRL0] & CTRL_I) eth_pulse_irq(s); } /* We are fast and get ready pretty much immediately so we actually never flip the S nor P bits to one. */ s->regs[addr] = value & ~(CTRL_P | CTRL_S); break; /* Keep these native. */ case R_RX_CTRL0: case R_RX_CTRL1: if (!(value & CTRL_S)) { qemu_flush_queued_packets(qemu_get_queue(s->nic)); } case R_TX_LEN0: case R_TX_LEN1: case R_TX_GIE0: D(qemu_log("%s addr=" TARGET_FMT_plx " val=%x\n", __func__, addr * 4, value)); s->regs[addr] = value; break; case R_MDIOCTRL: if (((unsigned int)value & R_MDIOCTRL_MDIOSTS_MASK) != 0) { struct TEMAC *t = &s->TEMAC; unsigned int op = s->regs[R_MDIOADDR] & R_MDIOADDR_OP_MASK; unsigned int phyaddr = (s->regs[R_MDIOADDR] & R_MDIOADDR_PHYADR_MASK) >> R_MDIOADDR_PHYADR_SHIFT; unsigned int regaddr = s->regs[R_MDIOADDR] & R_MDIOADDR_REGADR_MASK; if (op) { /* read PHY registers */ s->regs[R_MDIORD] = mdio_read_req( &t->mdio_bus, phyaddr, regaddr); } else { /* write PHY registers */ mdio_write_req(&t->mdio_bus, phyaddr, regaddr, s->regs[R_MDIOWR]); } } s->regs[addr] = value; default: s->regs[addr] = tswap32(value); break; } }
static target_ulong h_register_logical_lan(PowerPCCPU *cpu, sPAPRMachineState *spapr, target_ulong opcode, target_ulong *args) { target_ulong reg = args[0]; target_ulong buf_list = args[1]; target_ulong rec_queue = args[2]; target_ulong filter_list = args[3]; VIOsPAPRDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg); VIOsPAPRVLANDevice *dev = VIO_SPAPR_VLAN_DEVICE(sdev); vlan_bd_t filter_list_bd; if (!dev) { return H_PARAMETER; } if (dev->isopen) { hcall_dprintf("H_REGISTER_LOGICAL_LAN called twice without " "H_FREE_LOGICAL_LAN\n"); return H_RESOURCE; } if (check_bd(dev, VLAN_VALID_BD(buf_list, SPAPR_TCE_PAGE_SIZE), SPAPR_TCE_PAGE_SIZE) < 0) { hcall_dprintf("Bad buf_list 0x" TARGET_FMT_lx "\n", buf_list); return H_PARAMETER; } filter_list_bd = VLAN_VALID_BD(filter_list, SPAPR_TCE_PAGE_SIZE); if (check_bd(dev, filter_list_bd, SPAPR_TCE_PAGE_SIZE) < 0) { hcall_dprintf("Bad filter_list 0x" TARGET_FMT_lx "\n", filter_list); return H_PARAMETER; } if (!(rec_queue & VLAN_BD_VALID) || (check_bd(dev, rec_queue, VLAN_RQ_ALIGNMENT) < 0)) { hcall_dprintf("Bad receive queue\n"); return H_PARAMETER; } dev->buf_list = buf_list; sdev->signal_state = 0; rec_queue &= ~VLAN_BD_TOGGLE; /* Initialize the buffer list */ vio_stq(sdev, buf_list, rec_queue); vio_stq(sdev, buf_list + 8, filter_list_bd); spapr_vio_dma_set(sdev, buf_list + VLAN_RX_BDS_OFF, 0, SPAPR_TCE_PAGE_SIZE - VLAN_RX_BDS_OFF); dev->add_buf_ptr = VLAN_RX_BDS_OFF - 8; dev->use_buf_ptr = VLAN_RX_BDS_OFF - 8; dev->rx_bufs = 0; dev->rxq_ptr = 0; /* Initialize the receive queue */ spapr_vio_dma_set(sdev, VLAN_BD_ADDR(rec_queue), 0, VLAN_BD_LEN(rec_queue)); dev->isopen = 1; qemu_flush_queued_packets(qemu_get_queue(dev->nic)); return H_SUCCESS; }
static void spapr_vlan_flush_rx_queue(void *opaque) { VIOsPAPRVLANDevice *dev = opaque; qemu_flush_queued_packets(qemu_get_queue(dev->nic)); }