/** * Refill RX descriptor ring * * @v rhn Rhine device */ static void rhine_refill_rx ( struct rhine_nic *rhn ) { struct rhine_descriptor *desc; struct io_buffer *iobuf; unsigned int rx_idx; physaddr_t address; while ( ( rhn->rx.prod - rhn->rx.cons ) < RHINE_RXDESC_NUM ) { /* Allocate I/O buffer */ iobuf = alloc_iob ( RHINE_RX_MAX_LEN ); if ( ! iobuf ) { /* Wait for next refill */ return; } /* Populate next receive descriptor */ rx_idx = ( rhn->rx.prod++ % RHINE_RXDESC_NUM ); desc = &rhn->rx.desc[rx_idx]; address = virt_to_bus ( iobuf->data ); desc->buffer = cpu_to_le32 ( address ); desc->des1 = cpu_to_le32 ( RHINE_DES1_SIZE ( RHINE_RX_MAX_LEN - 1) | RHINE_DES1_CHAIN | RHINE_DES1_IC ); wmb(); desc->des0 = cpu_to_le32 ( RHINE_DES0_OWN ); /* Record I/O buffer */ rhn->rx_iobuf[rx_idx] = iobuf; DBGC2 ( rhn, "RHINE %p RX %d is [%llx,%llx)\n", rhn, rx_idx, ( ( unsigned long long ) address ), ( ( unsigned long long ) address + RHINE_RX_MAX_LEN ) ); } }
static int rtl818x_init_rx_ring(struct net80211_device *dev) { struct rtl818x_priv *priv = dev->priv; struct rtl818x_rx_desc *entry; int i; priv->rx_ring = malloc_dma(sizeof(*priv->rx_ring) * RTL818X_RX_RING_SIZE, RTL818X_RING_ALIGN); priv->rx_ring_dma = virt_to_bus(priv->rx_ring); if (!priv->rx_ring) { DBG("rtl818x %s: cannot allocate RX ring\n", dev->netdev->name); return -ENOMEM; } memset(priv->rx_ring, 0, sizeof(*priv->rx_ring) * RTL818X_RX_RING_SIZE); priv->rx_idx = 0; for (i = 0; i < RTL818X_RX_RING_SIZE; i++) { struct io_buffer *iob = alloc_iob(MAX_RX_SIZE); entry = &priv->rx_ring[i]; if (!iob) return -ENOMEM; priv->rx_buf[i] = iob; entry->rx_buf = cpu_to_le32(virt_to_bus(iob->data)); entry->flags = cpu_to_le32(RTL818X_RX_DESC_FLAG_OWN | MAX_RX_SIZE); } entry->flags |= cpu_to_le32(RTL818X_RX_DESC_FLAG_EOR); return 0; }
/** * Allocate I/O buffer * * @v intf Data transfer interface * @v len I/O buffer payload length * @ret iobuf I/O buffer */ struct io_buffer * xfer_alloc_iob ( struct interface *intf, size_t len ) { struct interface *dest; xfer_alloc_iob_TYPE ( void * ) *op = intf_get_dest_op ( intf, xfer_alloc_iob, &dest ); void *object = intf_object ( dest ); struct io_buffer *iobuf; DBGC ( INTF_COL ( intf ), "INTF " INTF_INTF_FMT " alloc_iob %zd\n", INTF_INTF_DBG ( intf, dest ), len ); if ( op ) { iobuf = op ( object, len ); } else { /* Default is to allocate an I/O buffer with no * reserved space. */ iobuf = alloc_iob ( len ); } if ( ! iobuf ) { DBGC ( INTF_COL ( intf ), "INTF " INTF_INTF_FMT " alloc_iob " "failed\n", INTF_INTF_DBG ( intf, dest ) ); } intf_put ( dest ); return iobuf; }
/** * Encapsulate and encrypt a packet using CCMP * * @v crypto CCMP cryptosystem * @v iob I/O buffer containing cleartext packet * @ret eiob I/O buffer containing encrypted packet */ struct io_buffer * ccmp_encrypt ( struct net80211_crypto *crypto, struct io_buffer *iob ) { struct ccmp_ctx *ctx = crypto->priv; struct ieee80211_frame *hdr = iob->data; struct io_buffer *eiob; const int hdrlen = IEEE80211_TYP_FRAME_HEADER_LEN; int datalen = iob_len ( iob ) - hdrlen; struct ccmp_head head; struct ccmp_nonce nonce; struct ccmp_aad aad; u8 mic[8], tx_pn[6]; void *edata, *emic; ctx->tx_seq++; u64_to_pn ( ctx->tx_seq, tx_pn, PN_LSB ); /* Allocate memory */ eiob = alloc_iob ( iob_len ( iob ) + CCMP_HEAD_LEN + CCMP_MIC_LEN ); if ( ! eiob ) return NULL; /* Copy frame header */ memcpy ( iob_put ( eiob, hdrlen ), iob->data, hdrlen ); hdr = eiob->data; hdr->fc |= IEEE80211_FC_PROTECTED; /* Fill in packet number and extended IV */ memcpy ( head.pn_lo, tx_pn, 2 ); memcpy ( head.pn_hi, tx_pn + 2, 4 ); head.kid = 0x20; /* have Extended IV, key ID 0 */ head._rsvd = 0; memcpy ( iob_put ( eiob, sizeof ( head ) ), &head, sizeof ( head ) ); /* Form nonce */ nonce.prio = 0; memcpy ( nonce.a2, hdr->addr2, ETH_ALEN ); u64_to_pn ( ctx->tx_seq, nonce.pn, PN_MSB ); /* Form additional authentication data */ aad.fc = hdr->fc & CCMP_AAD_FC_MASK; memcpy ( aad.a1, hdr->addr1, 3 * ETH_ALEN ); /* all 3 at once */ aad.seq = hdr->seq & CCMP_AAD_SEQ_MASK; /* Calculate MIC over the data */ ccmp_cbc_mac ( ctx, &nonce, iob->data + hdrlen, datalen, &aad, mic ); /* Copy and encrypt data and MIC */ edata = iob_put ( eiob, datalen ); emic = iob_put ( eiob, CCMP_MIC_LEN ); ccmp_ctr_xor ( ctx, &nonce, iob->data + hdrlen, edata, datalen, mic, emic ); /* Done! */ DBGC2 ( ctx, "WPA-CCMP %p: encrypted packet %p -> %p\n", ctx, iob, eiob ); return eiob; }
static void rtl818x_handle_rx(struct net80211_device *dev) { struct rtl818x_priv *priv = dev->priv; unsigned int count = RTL818X_RX_RING_SIZE; while (count--) { struct rtl818x_rx_desc *entry = &priv->rx_ring[priv->rx_idx]; struct io_buffer *iob = priv->rx_buf[priv->rx_idx]; u32 flags = le32_to_cpu(entry->flags); if (flags & RTL818X_RX_DESC_FLAG_OWN) return; if (flags & (RTL818X_RX_DESC_FLAG_DMA_FAIL | RTL818X_RX_DESC_FLAG_FOF | RTL818X_RX_DESC_FLAG_RX_ERR)) { /* This is crappy hardware. The Linux driver doesn't even log these. */ goto done; } else if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR) { /* This is actually a corrupt packet. */ DBG2("rtl818x RX:%d CRC fail: flags %08x\n", priv->rx_idx, flags); net80211_rx_err(dev, NULL, EIO); } else { u32 flags2 = le32_to_cpu(entry->flags2); struct io_buffer *new_iob = alloc_iob(MAX_RX_SIZE); if (!new_iob) { net80211_rx_err(dev, NULL, ENOMEM); goto done; } DBGP("rtl818x RX:%d success: flags %08x %08x\n", priv->rx_idx, flags, flags2); iob_put(iob, flags & 0xFFF); net80211_rx(dev, iob, (flags2 >> 8) & 0x7f, rtl818x_rates[(flags >> 20) & 0xf]); iob = new_iob; priv->rx_buf[priv->rx_idx] = iob; } done: entry->rx_buf = cpu_to_le32(virt_to_bus(iob->data)); entry->flags = cpu_to_le32(RTL818X_RX_DESC_FLAG_OWN | MAX_RX_SIZE); if (priv->rx_idx == RTL818X_RX_RING_SIZE - 1) entry->flags |= cpu_to_le32(RTL818X_RX_DESC_FLAG_EOR); priv->rx_idx = (priv->rx_idx + 1) % RTL818X_RX_RING_SIZE; } }
/** Poll for new packets */ static void af_packet_nic_poll ( struct net_device *netdev ) { struct af_packet_nic * nic = netdev->priv; struct pollfd pfd; struct io_buffer * iobuf; int r; pfd.fd = nic->fd; pfd.events = POLLIN; if (linux_poll(&pfd, 1, 0) == -1) { DBGC(nic, "af_packet %p poll failed (%s)\n", nic, linux_strerror(linux_errno)); return; } if ((pfd.revents & POLLIN) == 0) return; /* At this point we know there is at least one new packet to be read */ iobuf = alloc_iob(RX_BUF_SIZE); if (! iobuf) goto allocfail; while ((r = linux_read(nic->fd, iobuf->data, RX_BUF_SIZE)) > 0) { DBGC2(nic, "af_packet %p read %d bytes\n", nic, r); iob_put(iobuf, r); netdev_rx(netdev, iobuf); iobuf = alloc_iob(RX_BUF_SIZE); if (! iobuf) goto allocfail; } free_iob(iobuf); return; allocfail: DBGC(nic, "af_packet %p alloc_iob failed\n", nic); }
/** * Look up media-specific link-layer address in the ARP cache * * @v netdev Network device * @v net_protocol Network-layer protocol * @v dest_net_addr Destination network-layer address * @v source_net_addr Source network-layer address * @ret dest_ll_addr Destination link layer address * @ret rc Return status code * * This function will use the ARP cache to look up the link-layer * address for the link-layer protocol associated with the network * device and the given network-layer protocol and addresses. If * found, the destination link-layer address will be filled in in @c * dest_ll_addr. * * If no address is found in the ARP cache, an ARP request will be * transmitted on the specified network device and -ENOENT will be * returned. */ int arp_resolve ( struct net_device *netdev, struct net_protocol *net_protocol, const void *dest_net_addr, const void *source_net_addr, void *dest_ll_addr ) { struct ll_protocol *ll_protocol = netdev->ll_protocol; const struct arp_entry *arp; struct io_buffer *iobuf; struct arphdr *arphdr; int rc; /* Look for existing entry in ARP table */ arp = arp_find_entry ( ll_protocol, net_protocol, dest_net_addr ); if ( arp ) { DBG ( "ARP cache hit: %s %s => %s %s\n", net_protocol->name, net_protocol->ntoa ( arp->net_addr ), ll_protocol->name, ll_protocol->ntoa ( arp->ll_addr ) ); memcpy ( dest_ll_addr, arp->ll_addr, ll_protocol->ll_addr_len); return 0; } DBG ( "ARP cache miss: %s %s\n", net_protocol->name, net_protocol->ntoa ( dest_net_addr ) ); /* Allocate ARP packet */ iobuf = alloc_iob ( MAX_LL_HEADER_LEN + sizeof ( *arphdr ) + 2 * ( MAX_LL_ADDR_LEN + MAX_NET_ADDR_LEN ) ); if ( ! iobuf ) return -ENOMEM; iob_reserve ( iobuf, MAX_LL_HEADER_LEN ); /* Build up ARP request */ arphdr = iob_put ( iobuf, sizeof ( *arphdr ) ); arphdr->ar_hrd = ll_protocol->ll_proto; arphdr->ar_hln = ll_protocol->ll_addr_len; arphdr->ar_pro = net_protocol->net_proto; arphdr->ar_pln = net_protocol->net_addr_len; arphdr->ar_op = htons ( ARPOP_REQUEST ); memcpy ( iob_put ( iobuf, ll_protocol->ll_addr_len ), netdev->ll_addr, ll_protocol->ll_addr_len ); memcpy ( iob_put ( iobuf, net_protocol->net_addr_len ), source_net_addr, net_protocol->net_addr_len ); memset ( iob_put ( iobuf, ll_protocol->ll_addr_len ), 0, ll_protocol->ll_addr_len ); memcpy ( iob_put ( iobuf, net_protocol->net_addr_len ), dest_net_addr, net_protocol->net_addr_len ); /* Transmit ARP request */ if ( ( rc = net_tx ( iobuf, netdev, &arp_protocol, netdev->ll_broadcast, netdev->ll_addr ) ) != 0 ) return rc; return -ENOENT; }
/** * Transmit NDP neighbour solicitation/advertisement packet * * @v netdev Network device * @v sin6_src Source socket address * @v sin6_dest Destination socket address * @v target Neighbour target address * @v icmp_type ICMPv6 type * @v flags NDP flags * @v option_type NDP option type * @ret rc Return status code */ static int ndp_tx_neighbour ( struct net_device *netdev, struct sockaddr_in6 *sin6_src, struct sockaddr_in6 *sin6_dest, const struct in6_addr *target, unsigned int icmp_type, unsigned int flags, unsigned int option_type ) { struct sockaddr_tcpip *st_src = ( ( struct sockaddr_tcpip * ) sin6_src ); struct sockaddr_tcpip *st_dest = ( ( struct sockaddr_tcpip * ) sin6_dest ); struct ll_protocol *ll_protocol = netdev->ll_protocol; struct io_buffer *iobuf; struct ndp_neighbour_header *neigh; struct ndp_ll_addr_option *ll_addr_opt; size_t option_len; size_t len; int rc; /* Allocate and populate buffer */ option_len = ( ( sizeof ( *ll_addr_opt ) + ll_protocol->ll_addr_len + NDP_OPTION_BLKSZ - 1 ) & ~( NDP_OPTION_BLKSZ - 1 ) ); len = ( sizeof ( *neigh ) + option_len ); iobuf = alloc_iob ( MAX_LL_NET_HEADER_LEN + len ); if ( ! iobuf ) return -ENOMEM; iob_reserve ( iobuf, MAX_LL_NET_HEADER_LEN ); neigh = iob_put ( iobuf, len ); memset ( neigh, 0, len ); neigh->icmp.type = icmp_type; neigh->flags = flags; memcpy ( &neigh->target, target, sizeof ( neigh->target ) ); ll_addr_opt = &neigh->option[0].ll_addr; ll_addr_opt->header.type = option_type; ll_addr_opt->header.blocks = ( option_len / NDP_OPTION_BLKSZ ); memcpy ( ll_addr_opt->ll_addr, netdev->ll_addr, ll_protocol->ll_addr_len ); neigh->icmp.chksum = tcpip_chksum ( neigh, len ); /* Transmit packet */ if ( ( rc = tcpip_tx ( iobuf, &icmpv6_protocol, st_src, st_dest, netdev, &neigh->icmp.chksum ) ) != 0 ) { DBGC ( netdev, "NDP could not transmit packet: %s\n", strerror ( rc ) ); return rc; } return 0; }
/** * Poll for received packets * * @v netdev Network device */ static void nii_poll_rx ( struct net_device *netdev ) { struct nii_nic *nii = netdev->priv; PXE_CPB_RECEIVE cpb; PXE_DB_RECEIVE db; unsigned int quota; int stat; int rc; /* Retrieve up to NII_RX_QUOTA packets */ for ( quota = NII_RX_QUOTA ; quota ; quota-- ) { /* Allocate buffer, if required */ if ( ! nii->rxbuf ) { nii->rxbuf = alloc_iob ( nii->mtu ); if ( ! nii->rxbuf ) { /* Leave for next poll */ break; } } /* Construct parameter block */ memset ( &cpb, 0, sizeof ( cpb ) ); cpb.BufferAddr = virt_to_bus ( nii->rxbuf->data ); cpb.BufferLen = iob_tailroom ( nii->rxbuf ); /* Issue command */ if ( ( stat = nii_issue_cpb_db ( nii, PXE_OPCODE_RECEIVE, &cpb, sizeof ( cpb ), &db, sizeof ( db ) ) ) < 0 ) { /* PXE_STATCODE_NO_DATA is just the usual "no packet" * status indicator; ignore it. */ if ( stat == -PXE_STATCODE_NO_DATA ) break; /* Anything else is an error */ rc = -EIO_STAT ( stat ); DBGC ( nii, "NII %s could not receive: %s\n", nii->dev.name, strerror ( rc ) ); netdev_rx_err ( netdev, NULL, rc ); break; } /* Hand off to network stack */ iob_put ( nii->rxbuf, db.FrameLen ); netdev_rx ( netdev, nii->rxbuf ); nii->rxbuf = NULL; } }
/** * Refill receive descriptor ring * * @v netdev Network device */ static void myson_refill_rx ( struct net_device *netdev ) { struct myson_nic *myson = netdev->priv; struct myson_descriptor *rx; struct io_buffer *iobuf; unsigned int rx_idx; physaddr_t address; while ( ( myson->rx.prod - myson->rx.cons ) < MYSON_NUM_RX_DESC ) { /* Allocate I/O buffer */ iobuf = alloc_iob ( MYSON_RX_MAX_LEN ); if ( ! iobuf ) { /* Wait for next refill */ return; } /* Check address is usable by card */ address = virt_to_bus ( iobuf->data ); if ( ! myson_address_ok ( address ) ) { DBGC ( myson, "MYSON %p cannot support 64-bit RX " "buffer address\n", myson ); netdev_rx_err ( netdev, iobuf, -ENOTSUP ); return; } /* Get next receive descriptor */ rx_idx = ( myson->rx.prod++ % MYSON_NUM_RX_DESC ); rx = &myson->rx.desc[rx_idx]; /* Populate receive descriptor */ rx->address = cpu_to_le32 ( address ); rx->control = cpu_to_le32 ( MYSON_RX_CTRL_RBS ( MYSON_RX_MAX_LEN ) ); wmb(); rx->status = cpu_to_le32 ( MYSON_RX_STAT_OWN ); wmb(); /* Record I/O buffer */ assert ( myson->rx_iobuf[rx_idx] == NULL ); myson->rx_iobuf[rx_idx] = iobuf; /* Notify card that there are descriptors available */ writel ( 0, myson->regs + MYSON_RXPDR ); DBGC2 ( myson, "MYSON %p RX %d is [%llx,%llx)\n", myson, rx_idx, ( ( unsigned long long ) address ), ( ( unsigned long long ) address + MYSON_RX_MAX_LEN ) ); } }
static void gdbudp_send ( const char *buf, size_t len ) { struct io_buffer *iob; struct ethhdr *ethhdr; struct iphdr *iphdr; struct udp_header *udphdr; /* Check that we are connected */ if ( dest_addr.sin_port == 0 ) { return; } gdbudp_ensure_netdev_open ( netdev ); iob = alloc_iob ( sizeof ( *ethhdr ) + sizeof ( *iphdr ) + sizeof ( *udphdr ) + len ); if ( !iob ) { return; } /* Payload */ iob_reserve ( iob, sizeof ( *ethhdr ) + sizeof ( *iphdr ) + sizeof ( *udphdr ) ); memcpy ( iob_put ( iob, len ), buf, len ); /* UDP header */ udphdr = iob_push ( iob, sizeof ( *udphdr ) ); udphdr->src = source_addr.sin_port; udphdr->dest = dest_addr.sin_port; udphdr->len = htons ( iob_len ( iob ) ); udphdr->chksum = 0; /* optional and we are not using it */ /* IP header */ iphdr = iob_push ( iob, sizeof ( *iphdr ) ); memset ( iphdr, 0, sizeof ( *iphdr ) ); iphdr->verhdrlen = ( IP_VER | ( sizeof ( *iphdr ) / 4 ) ); iphdr->service = IP_TOS; iphdr->len = htons ( iob_len ( iob ) ); iphdr->ttl = IP_TTL; iphdr->protocol = IP_UDP; iphdr->dest.s_addr = dest_addr.sin_addr.s_addr; iphdr->src.s_addr = source_addr.sin_addr.s_addr; iphdr->chksum = tcpip_chksum ( iphdr, sizeof ( *iphdr ) ); /* Ethernet header */ ethhdr = iob_push ( iob, sizeof ( *ethhdr ) ); memcpy ( ethhdr->h_dest, dest_eth, ETH_ALEN ); memcpy ( ethhdr->h_source, netdev->ll_addr, ETH_ALEN ); ethhdr->h_protocol = htons ( ETH_P_IP ); netdev_tx ( netdev, iob ); }
static void legacy_poll ( struct net_device *netdev ) { struct nic *nic = netdev->priv; struct io_buffer *iobuf; iobuf = alloc_iob ( ETH_FRAME_LEN ); if ( ! iobuf ) return; nic->packet = iobuf->data; if ( nic->nic_op->poll ( nic, 1 ) ) { DBG ( "Received %d bytes\n", nic->packetlen ); iob_put ( iobuf, nic->packetlen ); netdev_rx ( netdev, iobuf ); } else { free_iob ( iobuf ); } }
/** * Receive control packet * * @v acm USB RNDIS device * @ret rc Return status code */ static int acm_control_receive ( struct acm_device *acm ) { struct rndis_device *rndis = acm->rndis; struct usb_device *usb = acm->usb; struct io_buffer *iobuf; struct rndis_header *header; size_t mtu = ACM_RESPONSE_MTU; size_t len; int rc; /* Allocate I/O buffer */ iobuf = alloc_iob ( mtu ); if ( ! iobuf ) { rc = -ENOMEM; goto err_alloc; } /* Get encapsulated response */ if ( ( rc = cdc_get_encapsulated_response ( usb, acm->usbnet.comms, iobuf->data, mtu ) ) != 0 ){ DBGC ( acm, "ACM %p could not get encapsulated response: %s\n", acm, strerror ( rc ) ); goto err_get_response; } /* Fix up buffer length */ header = iobuf->data; len = le32_to_cpu ( header->len ); if ( len > mtu ) { DBGC ( acm, "ACM %p overlength encapsulated response\n", acm ); DBGC_HDA ( acm, 0, iobuf->data, mtu ); rc = -EPROTO; goto err_len; } iob_put ( iobuf, len ); /* Hand off to RNDIS */ rndis_rx ( rndis, iob_disown ( iobuf ) ); return 0; err_len: err_get_response: free_iob ( iobuf ); err_alloc: return rc; }
static int b44_init_rx_ring(struct b44_private *bp) { b44_free_rx_ring(bp); bp->rx = malloc_dma(B44_RX_RING_LEN_BYTES, B44_DMA_ALIGNMENT); if (!bp->rx) return -ENOMEM; memset(bp->rx_iobuf, 0, sizeof(bp->rx_iobuf)); bp->rx_iobuf[0] = alloc_iob(RX_PKT_BUF_SZ); b44_populate_rx_descriptor(bp, 0); b44_rx_refill(bp, 0); DBG("Init RX rings: rx=0x%08lx\n", VIRT_TO_B44(bp->rx)); return 0; }
/** * Poll for received packets * * @v netdev Network device */ static void snpnet_poll_rx ( struct net_device *netdev ) { struct snp_nic *snp = netdev->priv; UINTN len; unsigned int quota; EFI_STATUS efirc; int rc; /* Retrieve up to SNP_RX_QUOTA packets */ for ( quota = SNP_RX_QUOTA ; quota ; quota-- ) { /* Allocate buffer, if required */ if ( ! snp->rxbuf ) { snp->rxbuf = alloc_iob ( snp->mtu ); if ( ! snp->rxbuf ) { /* Leave for next poll */ break; } } /* Receive packet */ len = iob_tailroom ( snp->rxbuf ); if ( ( efirc = snp->snp->Receive ( snp->snp, NULL, &len, snp->rxbuf->data, NULL, NULL, NULL ) ) != 0 ) { /* EFI_NOT_READY is just the usual "no packet" * status indication; ignore it. */ if ( efirc == EFI_NOT_READY ) break; /* Anything else is an error */ rc = -EEFI ( efirc ); DBGC ( snp, "SNP %s could not receive: %s\n", netdev->name, strerror ( rc ) ); netdev_rx_err ( netdev, NULL, rc ); break; } /* Hand off to network stack */ iob_put ( snp->rxbuf, len ); netdev_rx ( netdev, snp->rxbuf ); snp->rxbuf = NULL; } }
/** * e1000_setup_rx_resources - allocate Rx resources (Descriptors) * * @v adapter e1000 private structure * * @ret rc Returns 0 on success, negative on failure **/ static int e1000_setup_rx_resources ( struct e1000_adapter *adapter ) { int i, j; struct e1000_rx_desc *rx_curr_desc; DBG ( "e1000_setup_rx_resources\n" ); /* Allocate receive descriptor ring memory. It must not cross a 64K boundary because of hardware errata */ adapter->rx_base = malloc_dma ( adapter->rx_ring_size, adapter->rx_ring_size ); if ( ! adapter->rx_base ) { return -ENOMEM; } memset ( adapter->rx_base, 0, adapter->rx_ring_size ); for ( i = 0; i < NUM_RX_DESC; i++ ) { adapter->rx_iobuf[i] = alloc_iob ( MAXIMUM_ETHERNET_VLAN_SIZE ); /* If unable to allocate all iobufs, free any that * were successfully allocated, and return an error */ if ( ! adapter->rx_iobuf[i] ) { for ( j = 0; j < i; j++ ) { free_iob ( adapter->rx_iobuf[j] ); } return -ENOMEM; } rx_curr_desc = ( void * ) ( adapter->rx_base ) + ( i * sizeof ( *adapter->rx_base ) ); rx_curr_desc->buffer_addr = virt_to_bus ( adapter->rx_iobuf[i]->data ); DBG ( "i = %d rx_curr_desc->buffer_addr = %#16llx\n", i, rx_curr_desc->buffer_addr ); } return 0; }
/** * Refill receive descriptor ring * * @v netdev Network device */ static void netfront_refill_rx ( struct net_device *netdev ) { struct netfront_nic *netfront = netdev->priv; struct xen_device *xendev = netfront->xendev; struct io_buffer *iobuf; struct netif_rx_request *request; int notify; int rc; /* Do nothing if ring is already full */ if ( netfront_ring_is_full ( &netfront->rx ) ) return; /* Refill ring */ do { /* Allocate I/O buffer */ iobuf = alloc_iob ( PAGE_SIZE ); if ( ! iobuf ) { /* Wait for next refill */ break; } /* Add to descriptor ring */ request = RING_GET_REQUEST ( &netfront->rx_fring, netfront->rx_fring.req_prod_pvt ); if ( ( rc = netfront_push ( netfront, &netfront->rx, iobuf, &request->id, &request->gref ) ) != 0 ) { netdev_rx_err ( netdev, iobuf, rc ); break; } DBGC2 ( netfront, "NETFRONT %s RX id %d ref %d is %#08lx+%zx\n", xendev->key, request->id, request->gref, virt_to_phys ( iobuf->data ), iob_tailroom ( iobuf ) ); /* Move to next descriptor */ netfront->rx_fring.req_prod_pvt++; } while ( ! netfront_ring_is_full ( &netfront->rx ) ); /* Push new descriptors and notify backend if applicable */ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY ( &netfront->rx_fring, notify ); if ( notify ) netfront_send_event ( netfront ); }
/* * Refill RX ring descriptors with buffers. This is needed * because during rx we are passing ownership of descriptor * buffers to the network stack. */ static void b44_rx_refill(struct b44_private *bp, u32 pending) { u32 i; // skip pending for (i = pending + 1; i != bp->rx_cur; i = ring_next(i)) { if (bp->rx_iobuf[i] != NULL) continue; bp->rx_iobuf[i] = alloc_iob(RX_PKT_BUF_SZ); if (!bp->rx_iobuf[i]) { DBG("Refill rx ring failed!!\n"); break; } b44_populate_rx_descriptor(bp, i); } }
/** * Transmit NDP packet with link-layer address option * * @v netdev Network device * @v sin6_src Source socket address * @v sin6_dest Destination socket address * @v data NDP header * @v len Size of NDP header * @v option_type NDP option type * @ret rc Return status code */ static int ndp_tx_ll_addr ( struct net_device *netdev, struct sockaddr_in6 *sin6_src, struct sockaddr_in6 *sin6_dest, const void *data, size_t len, unsigned int option_type ) { struct sockaddr_tcpip *st_src = ( ( struct sockaddr_tcpip * ) sin6_src ); struct sockaddr_tcpip *st_dest = ( ( struct sockaddr_tcpip * ) sin6_dest ); struct ll_protocol *ll_protocol = netdev->ll_protocol; struct io_buffer *iobuf; struct ndp_ll_addr_option *ll_addr_opt; union ndp_header *ndp; size_t option_len; int rc; /* Allocate and populate buffer */ option_len = ( ( sizeof ( *ll_addr_opt ) + ll_protocol->ll_addr_len + NDP_OPTION_BLKSZ - 1 ) & ~( NDP_OPTION_BLKSZ - 1 ) ); iobuf = alloc_iob ( MAX_LL_NET_HEADER_LEN + len + option_len ); if ( ! iobuf ) return -ENOMEM; iob_reserve ( iobuf, MAX_LL_NET_HEADER_LEN ); memcpy ( iob_put ( iobuf, len ), data, len ); ll_addr_opt = iob_put ( iobuf, option_len ); ll_addr_opt->header.type = option_type; ll_addr_opt->header.blocks = ( option_len / NDP_OPTION_BLKSZ ); memcpy ( ll_addr_opt->ll_addr, netdev->ll_addr, ll_protocol->ll_addr_len ); ndp = iobuf->data; ndp->icmp.chksum = tcpip_chksum ( ndp, ( len + option_len ) ); /* Transmit packet */ if ( ( rc = tcpip_tx ( iobuf, &icmpv6_protocol, st_src, st_dest, netdev, &ndp->icmp.chksum ) ) != 0 ) { DBGC ( netdev, "NDP could not transmit packet: %s\n", strerror ( rc ) ); return rc; } return 0; }
/** * e1000_refill_rx_ring - allocate Rx io_buffers * * @v adapter e1000 private structure * * @ret rc Returns 0 on success, negative on failure **/ int e1000_refill_rx_ring ( struct e1000_adapter *adapter ) { int i, rx_curr; int rc = 0; struct e1000_rx_desc *rx_curr_desc; struct e1000_hw *hw = &adapter->hw; struct io_buffer *iob; DBG ("e1000_refill_rx_ring\n"); for ( i = 0; i < NUM_RX_DESC; i++ ) { rx_curr = ( ( adapter->rx_curr + i ) % NUM_RX_DESC ); rx_curr_desc = adapter->rx_base + rx_curr; if ( rx_curr_desc->status & E1000_RXD_STAT_DD ) continue; if ( adapter->rx_iobuf[rx_curr] != NULL ) continue; DBG2 ( "Refilling rx desc %d\n", rx_curr ); iob = alloc_iob ( MAXIMUM_ETHERNET_VLAN_SIZE ); adapter->rx_iobuf[rx_curr] = iob; if ( ! iob ) { DBG ( "alloc_iob failed\n" ); rc = -ENOMEM; break; } else { rx_curr_desc->buffer_addr = virt_to_bus ( iob->data ); E1000_WRITE_REG ( hw, RDT, rx_curr ); } } return rc; }
/** * e1000_poll - Poll for received packets * * @v netdev Network device */ static void e1000_poll ( struct net_device *netdev ) { struct e1000_adapter *adapter = netdev_priv( netdev ); struct e1000_hw *hw = &adapter->hw; uint32_t icr; uint32_t tx_status; uint32_t rx_status; uint32_t rx_len; uint32_t rx_err; struct io_buffer *rx_iob; struct e1000_tx_desc *tx_curr_desc; struct e1000_rx_desc *rx_curr_desc; uint32_t i; uint64_t tmp_buffer_addr; DBGP ( "e1000_poll\n" ); /* Acknowledge interrupts */ icr = E1000_READ_REG ( hw, ICR ); if ( ! icr ) return; DBG ( "e1000_poll: intr_status = %#08x\n", icr ); /* Check status of transmitted packets */ while ( ( i = adapter->tx_head ) != adapter->tx_tail ) { tx_curr_desc = ( void * ) ( adapter->tx_base ) + ( i * sizeof ( *adapter->tx_base ) ); tx_status = tx_curr_desc->upper.data; /* if the packet at tx_head is not owned by hardware it is for us */ if ( ! ( tx_status & E1000_TXD_STAT_DD ) ) break; DBG ( "Sent packet. tx_head: %d tx_tail: %d tx_status: %#08x\n", adapter->tx_head, adapter->tx_tail, tx_status ); if ( tx_status & ( E1000_TXD_STAT_EC | E1000_TXD_STAT_LC | E1000_TXD_STAT_TU ) ) { netdev_tx_complete_err ( netdev, adapter->tx_iobuf[i], -EINVAL ); DBG ( "Error transmitting packet, tx_status: %#08x\n", tx_status ); } else { netdev_tx_complete ( netdev, adapter->tx_iobuf[i] ); DBG ( "Success transmitting packet, tx_status: %#08x\n", tx_status ); } /* Decrement count of used descriptors, clear this descriptor */ adapter->tx_fill_ctr--; memset ( tx_curr_desc, 0, sizeof ( *tx_curr_desc ) ); adapter->tx_head = ( adapter->tx_head + 1 ) % NUM_TX_DESC; } /* Process received packets */ while ( 1 ) { i = adapter->rx_curr; rx_curr_desc = ( void * ) ( adapter->rx_base ) + ( i * sizeof ( *adapter->rx_base ) ); rx_status = rx_curr_desc->status; DBG2 ( "Before DD Check RX_status: %#08x\n", rx_status ); if ( ! ( rx_status & E1000_RXD_STAT_DD ) ) break; DBG ( "RCTL = %#08x\n", E1000_READ_REG ( &adapter->hw, RCTL ) ); rx_len = rx_curr_desc->length; DBG ( "Received packet, rx_curr: %d rx_status: %#08x rx_len: %d\n", i, rx_status, rx_len ); rx_err = rx_curr_desc->errors; if ( rx_err & E1000_RXD_ERR_FRAME_ERR_MASK ) { netdev_rx_err ( netdev, NULL, -EINVAL ); DBG ( "e1000_poll: Corrupted packet received!" " rx_err: %#08x\n", rx_err ); } else { /* If unable allocate space for this packet, * try again next poll */ rx_iob = alloc_iob ( rx_len ); if ( ! rx_iob ) break; memcpy ( iob_put ( rx_iob, rx_len ), adapter->rx_iobuf[i]->data, rx_len ); /* Add this packet to the receive queue. */ netdev_rx ( netdev, rx_iob ); } tmp_buffer_addr = rx_curr_desc->buffer_addr; memset ( rx_curr_desc, 0, sizeof ( *rx_curr_desc ) ); rx_curr_desc->buffer_addr = tmp_buffer_addr; E1000_WRITE_REG ( hw, RDT, adapter->rx_curr ); adapter->rx_curr = ( adapter->rx_curr + 1 ) % NUM_RX_DESC; } }
/** * Send AoE command * * @v aoe AoE session * @ret rc Return status code * * This transmits an AoE command packet. It does not wait for a * response. */ static int aoe_send_command ( struct aoe_session *aoe ) { struct ata_command *command = aoe->command; struct io_buffer *iobuf; struct aoehdr *aoehdr; union aoecmd *aoecmd; struct aoeata *aoeata; unsigned int count; unsigned int data_out_len; unsigned int aoecmdlen; /* Fail immediately if we have no netdev to send on */ if ( ! aoe->netdev ) { aoe_done ( aoe, -ENETUNREACH ); return -ENETUNREACH; } /* If we are transmitting anything that requires a response, * start the retransmission timer. Do this before attempting * to allocate the I/O buffer, in case allocation itself * fails. */ start_timer ( &aoe->timer ); /* Calculate count and data_out_len for this subcommand */ switch ( aoe->aoe_cmd_type ) { case AOE_CMD_ATA: count = command->cb.count.native; if ( count > AOE_MAX_COUNT ) count = AOE_MAX_COUNT; data_out_len = ( command->data_out ? ( count * ATA_SECTOR_SIZE ) : 0 ); aoecmdlen = sizeof ( aoecmd->ata ); break; case AOE_CMD_CONFIG: count = 0; data_out_len = 0; aoecmdlen = sizeof ( aoecmd->cfg ); break; default: return -ENOTSUP; } /* Create outgoing I/O buffer */ iobuf = alloc_iob ( ETH_HLEN + sizeof ( *aoehdr ) + aoecmdlen + data_out_len ); if ( ! iobuf ) return -ENOMEM; iob_reserve ( iobuf, ETH_HLEN ); aoehdr = iob_put ( iobuf, sizeof ( *aoehdr ) ); aoecmd = iob_put ( iobuf, aoecmdlen ); memset ( aoehdr, 0, ( sizeof ( *aoehdr ) + aoecmdlen ) ); /* Fill AoE header */ aoehdr->ver_flags = AOE_VERSION; aoehdr->major = htons ( aoe->major ); aoehdr->minor = aoe->minor; aoehdr->command = aoe->aoe_cmd_type; aoehdr->tag = htonl ( ++aoe->tag ); /* Fill AoE payload */ switch ( aoe->aoe_cmd_type ) { case AOE_CMD_ATA: /* Fill AoE command */ aoeata = &aoecmd->ata; linker_assert ( AOE_FL_DEV_HEAD == ATA_DEV_SLAVE, __fix_ata_h__ ); aoeata->aflags = ( ( command->cb.lba48 ? AOE_FL_EXTENDED : 0 )| ( command->cb.device & ATA_DEV_SLAVE ) | ( data_out_len ? AOE_FL_WRITE : 0 ) ); aoeata->err_feat = command->cb.err_feat.bytes.cur; aoeata->count = count; aoeata->cmd_stat = command->cb.cmd_stat; aoeata->lba.u64 = cpu_to_le64 ( command->cb.lba.native ); if ( ! command->cb.lba48 ) aoeata->lba.bytes[3] |= ( command->cb.device & ATA_DEV_MASK ); /* Fill data payload */ copy_from_user ( iob_put ( iobuf, data_out_len ), command->data_out, aoe->command_offset, data_out_len ); break; case AOE_CMD_CONFIG: /* Nothing to do */ break; default: assert ( 0 ); } /* Send packet */ return net_tx ( iobuf, aoe->netdev, &aoe_protocol, aoe->target ); }
/** * Decrypt a packet using CCMP * * @v crypto CCMP cryptosystem * @v eiob I/O buffer containing encrypted packet * @ret iob I/O buffer containing cleartext packet */ static struct io_buffer * ccmp_decrypt ( struct net80211_crypto *crypto, struct io_buffer *eiob ) { struct ccmp_ctx *ctx = crypto->priv; struct ieee80211_frame *hdr; struct io_buffer *iob; const int hdrlen = IEEE80211_TYP_FRAME_HEADER_LEN; int datalen = iob_len ( eiob ) - hdrlen - CCMP_HEAD_LEN - CCMP_MIC_LEN; struct ccmp_head *head; struct ccmp_nonce nonce; struct ccmp_aad aad; u8 rx_pn[6], their_mic[8], our_mic[8]; iob = alloc_iob ( hdrlen + datalen ); if ( ! iob ) return NULL; /* Copy frame header */ memcpy ( iob_put ( iob, hdrlen ), eiob->data, hdrlen ); hdr = iob->data; hdr->fc &= ~IEEE80211_FC_PROTECTED; /* Check and update RX packet number */ head = eiob->data + hdrlen; memcpy ( rx_pn, head->pn_lo, 2 ); memcpy ( rx_pn + 2, head->pn_hi, 4 ); if ( pn_to_u64 ( rx_pn ) <= ctx->rx_seq ) { DBGC ( ctx, "WPA-CCMP %p: packet received out of order " "(%012llx <= %012llx)\n", ctx, pn_to_u64 ( rx_pn ), ctx->rx_seq ); free_iob ( iob ); return NULL; } ctx->rx_seq = pn_to_u64 ( rx_pn ); DBGC2 ( ctx, "WPA-CCMP %p: RX packet number %012llx\n", ctx, ctx->rx_seq ); /* Form nonce */ nonce.prio = 0; memcpy ( nonce.a2, hdr->addr2, ETH_ALEN ); u64_to_pn ( ctx->rx_seq, nonce.pn, PN_MSB ); /* Form additional authentication data */ aad.fc = ( hdr->fc & CCMP_AAD_FC_MASK ) | IEEE80211_FC_PROTECTED; memcpy ( aad.a1, hdr->addr1, 3 * ETH_ALEN ); /* all 3 at once */ aad.seq = hdr->seq & CCMP_AAD_SEQ_MASK; /* Copy-decrypt data and MIC */ ccmp_ctr_xor ( ctx, &nonce, eiob->data + hdrlen + sizeof ( *head ), iob_put ( iob, datalen ), datalen, eiob->tail - CCMP_MIC_LEN, their_mic ); /* Check MIC */ ccmp_cbc_mac ( ctx, &nonce, iob->data + hdrlen, datalen, &aad, our_mic ); if ( memcmp ( their_mic, our_mic, CCMP_MIC_LEN ) != 0 ) { DBGC2 ( ctx, "WPA-CCMP %p: MIC failure\n", ctx ); free_iob ( iob ); return NULL; } DBGC2 ( ctx, "WPA-CCMP %p: decrypted packet %p -> %p\n", ctx, eiob, iob ); return iob; }
/* PXENV_UNDI_TRANSMIT * * Status: working */ static PXENV_EXIT_t pxenv_undi_transmit ( struct s_PXENV_UNDI_TRANSMIT *undi_transmit ) { struct s_PXENV_UNDI_TBD tbd; struct DataBlk *datablk; struct io_buffer *iobuf; struct net_protocol *net_protocol; struct ll_protocol *ll_protocol; char destaddr[MAX_LL_ADDR_LEN]; const void *ll_dest; size_t len; unsigned int i; int rc; /* Start profiling */ profile_start ( &undi_tx_profiler ); /* Sanity check */ if ( ! pxe_netdev ) { DBGC ( &pxe_netdev, "PXENV_UNDI_TRANSMIT called with no " "network device\n" ); undi_transmit->Status = PXENV_STATUS_UNDI_INVALID_STATE; return PXENV_EXIT_FAILURE; } DBGC2 ( &pxe_netdev, "PXENV_UNDI_TRANSMIT" ); /* Forcibly enable interrupts and freeze receive queue * processing at this point, to work around callers that never * call PXENV_UNDI_OPEN before attempting to use the UNDI API. */ if ( ! netdev_rx_frozen ( pxe_netdev ) ) { netdev_rx_freeze ( pxe_netdev ); netdev_irq ( pxe_netdev, 1 ); } /* Identify network-layer protocol */ switch ( undi_transmit->Protocol ) { case P_IP: net_protocol = &ipv4_protocol; break; case P_ARP: net_protocol = &arp_protocol; break; case P_RARP: net_protocol = &rarp_protocol; break; case P_UNKNOWN: net_protocol = NULL; break; default: DBGC2 ( &pxe_netdev, " %02x invalid protocol\n", undi_transmit->Protocol ); undi_transmit->Status = PXENV_STATUS_UNDI_INVALID_PARAMETER; return PXENV_EXIT_FAILURE; } DBGC2 ( &pxe_netdev, " %s", ( net_protocol ? net_protocol->name : "RAW" ) ); /* Calculate total packet length */ copy_from_real ( &tbd, undi_transmit->TBD.segment, undi_transmit->TBD.offset, sizeof ( tbd ) ); len = tbd.ImmedLength; DBGC2 ( &pxe_netdev, " %04x:%04x+%x", tbd.Xmit.segment, tbd.Xmit.offset, tbd.ImmedLength ); for ( i = 0 ; i < tbd.DataBlkCount ; i++ ) { datablk = &tbd.DataBlock[i]; len += datablk->TDDataLen; DBGC2 ( &pxe_netdev, " %04x:%04x+%x", datablk->TDDataPtr.segment, datablk->TDDataPtr.offset, datablk->TDDataLen ); } /* Allocate and fill I/O buffer */ iobuf = alloc_iob ( MAX_LL_HEADER_LEN + ( ( len > IOB_ZLEN ) ? len : IOB_ZLEN ) ); if ( ! iobuf ) { DBGC2 ( &pxe_netdev, " could not allocate iobuf\n" ); undi_transmit->Status = PXENV_STATUS_OUT_OF_RESOURCES; return PXENV_EXIT_FAILURE; } iob_reserve ( iobuf, MAX_LL_HEADER_LEN ); copy_from_real ( iob_put ( iobuf, tbd.ImmedLength ), tbd.Xmit.segment, tbd.Xmit.offset, tbd.ImmedLength ); for ( i = 0 ; i < tbd.DataBlkCount ; i++ ) { datablk = &tbd.DataBlock[i]; copy_from_real ( iob_put ( iobuf, datablk->TDDataLen ), datablk->TDDataPtr.segment, datablk->TDDataPtr.offset, datablk->TDDataLen ); } /* Add link-layer header, if required to do so */ if ( net_protocol != NULL ) { /* Calculate destination address */ ll_protocol = pxe_netdev->ll_protocol; if ( undi_transmit->XmitFlag == XMT_DESTADDR ) { copy_from_real ( destaddr, undi_transmit->DestAddr.segment, undi_transmit->DestAddr.offset, ll_protocol->ll_addr_len ); ll_dest = destaddr; DBGC2 ( &pxe_netdev, " DEST %s", ll_protocol->ntoa ( ll_dest ) ); } else { ll_dest = pxe_netdev->ll_broadcast; DBGC2 ( &pxe_netdev, " BCAST" ); } /* Add link-layer header */ if ( ( rc = ll_protocol->push ( pxe_netdev, iobuf, ll_dest, pxe_netdev->ll_addr, net_protocol->net_proto ))!=0){ DBGC2 ( &pxe_netdev, " could not add link-layer " "header: %s\n", strerror ( rc ) ); free_iob ( iobuf ); undi_transmit->Status = PXENV_STATUS ( rc ); return PXENV_EXIT_FAILURE; } } /* Flag transmission as in-progress. Do this before starting * to transmit the packet, because the ISR may trigger before * we return from netdev_tx(). */ undi_tx_count++; /* Transmit packet */ DBGC2 ( &pxe_netdev, "\n" ); if ( ( rc = netdev_tx ( pxe_netdev, iobuf ) ) != 0 ) { DBGC2 ( &pxe_netdev, "PXENV_UNDI_TRANSMIT could not transmit: " "%s\n", strerror ( rc ) ); undi_tx_count--; undi_transmit->Status = PXENV_STATUS ( rc ); return PXENV_EXIT_FAILURE; } profile_stop ( &undi_tx_profiler ); undi_transmit->Status = PXENV_STATUS_SUCCESS; return PXENV_EXIT_SUCCESS; }