static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) { struct net_device_context *net_device_ctx = netdev_priv(net); struct hv_netvsc_packet *packet = NULL; int ret; unsigned int num_data_pgs; struct rndis_message *rndis_msg; struct rndis_packet *rndis_pkt; u32 rndis_msg_size; struct rndis_per_packet_info *ppi; u32 hash; struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT]; struct hv_page_buffer *pb = page_buf; /* We will atmost need two pages to describe the rndis * header. We can only transmit MAX_PAGE_BUFFER_COUNT number * of pages in a single packet. If skb is scattered around * more pages we try linearizing it. */ num_data_pgs = netvsc_get_slots(skb) + 2; if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) { ++net_device_ctx->eth_stats.tx_scattered; if (skb_linearize(skb)) goto no_memory; num_data_pgs = netvsc_get_slots(skb) + 2; if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) { ++net_device_ctx->eth_stats.tx_too_big; goto drop; } } /* * Place the rndis header in the skb head room and * the skb->cb will be used for hv_netvsc_packet * structure. */ ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE); if (ret) goto no_memory; /* Use the skb control buffer for building up the packet */ BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) > FIELD_SIZEOF(struct sk_buff, cb)); packet = (struct hv_netvsc_packet *)skb->cb; packet->q_idx = skb_get_queue_mapping(skb); packet->total_data_buflen = skb->len; packet->total_bytes = skb->len; packet->total_packets = 1; rndis_msg = (struct rndis_message *)skb->head; memset(rndis_msg, 0, RNDIS_AND_PPI_SIZE); /* Add the rndis header */ rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET; rndis_msg->msg_len = packet->total_data_buflen; rndis_pkt = &rndis_msg->msg.pkt; rndis_pkt->data_offset = sizeof(struct rndis_packet); rndis_pkt->data_len = packet->total_data_buflen; rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet); rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet); hash = skb_get_hash_raw(skb); if (hash != 0 && net->real_num_tx_queues > 1) { rndis_msg_size += NDIS_HASH_PPI_SIZE; ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE, NBL_HASH_VALUE); *(u32 *)((void *)ppi + ppi->ppi_offset) = hash; } if (skb_vlan_tag_present(skb)) { struct ndis_pkt_8021q_info *vlan; rndis_msg_size += NDIS_VLAN_PPI_SIZE; ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE, IEEE_8021Q_INFO); vlan = (struct ndis_pkt_8021q_info *)((void *)ppi + ppi->ppi_offset); vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK; vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; } if (skb_is_gso(skb)) { struct ndis_tcp_lso_info *lso_info; rndis_msg_size += NDIS_LSO_PPI_SIZE; ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE, TCP_LARGESEND_PKTINFO); lso_info = (struct ndis_tcp_lso_info *)((void *)ppi + ppi->ppi_offset); lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE; if (skb->protocol == htons(ETH_P_IP)) { lso_info->lso_v2_transmit.ip_version = NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4; ip_hdr(skb)->tot_len = 0; ip_hdr(skb)->check = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); } else { lso_info->lso_v2_transmit.ip_version = NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6; ipv6_hdr(skb)->payload_len = 0; tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); } lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb); lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size; } else if (skb->ip_summed == CHECKSUM_PARTIAL) { if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) { struct ndis_tcp_ip_checksum_info *csum_info; rndis_msg_size += NDIS_CSUM_PPI_SIZE; ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE, TCPIP_CHKSUM_PKTINFO); csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi + ppi->ppi_offset); csum_info->transmit.tcp_header_offset = skb_transport_offset(skb); if (skb->protocol == htons(ETH_P_IP)) { csum_info->transmit.is_ipv4 = 1; if (ip_hdr(skb)->protocol == IPPROTO_TCP) csum_info->transmit.tcp_checksum = 1; else csum_info->transmit.udp_checksum = 1; } else { csum_info->transmit.is_ipv6 = 1; if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) csum_info->transmit.tcp_checksum = 1; else csum_info->transmit.udp_checksum = 1; } } else { /* Can't do offload of this type of checksum */ if (skb_checksum_help(skb)) goto drop; } } /* Start filling in the page buffers with the rndis hdr */ rndis_msg->msg_len += rndis_msg_size; packet->total_data_buflen = rndis_msg->msg_len; packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size, skb, packet, &pb); /* timestamp packet in software */ skb_tx_timestamp(skb); ret = netvsc_send(net_device_ctx->device_ctx, packet, rndis_msg, &pb, skb); if (likely(ret == 0)) return NETDEV_TX_OK; if (ret == -EAGAIN) { ++net_device_ctx->eth_stats.tx_busy; return NETDEV_TX_BUSY; } if (ret == -ENOSPC) ++net_device_ctx->eth_stats.tx_no_space; drop: dev_kfree_skb_any(skb); net->stats.tx_dropped++; return NETDEV_TX_OK; no_memory: ++net_device_ctx->eth_stats.tx_no_memory; goto drop; }
static int start_xmit(struct sk_buff *skb, struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); int num, err; struct scatterlist sg[1+MAX_SKB_FRAGS]; struct virtio_net_hdr *hdr; const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; sg_init_table(sg, 1+MAX_SKB_FRAGS); pr_debug("%s: xmit %p " MAC_FMT "\n", dev->name, skb, dest[0], dest[1], dest[2], dest[3], dest[4], dest[5]); /* Encode metadata header at front. */ hdr = skb_vnet_hdr(skb); if (skb->ip_summed == CHECKSUM_PARTIAL) { hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; hdr->csum_start = skb->csum_start - skb_headroom(skb); hdr->csum_offset = skb->csum_offset; } else { hdr->flags = 0; hdr->csum_offset = hdr->csum_start = 0; } if (skb_is_gso(skb)) { hdr->hdr_len = skb_transport_header(skb) - skb->data; hdr->gso_size = skb_shinfo(skb)->gso_size; if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP; else BUG(); if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; } else { hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE; hdr->gso_size = hdr->hdr_len = 0; } vnet_hdr_to_sg(sg, skb); num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; __skb_queue_head(&vi->send, skb); again: /* Free up any pending old buffers before queueing new ones. */ free_old_xmit_skbs(vi); err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb); if (err) { pr_debug("%s: virtio not prepared to send\n", dev->name); netif_stop_queue(dev); /* Activate callback for using skbs: if this returns false it * means some were used in the meantime. */ if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) { vi->svq->vq_ops->disable_cb(vi->svq); netif_start_queue(dev); goto again; } __skb_unlink(skb, &vi->send); return NETDEV_TX_BUSY; } vi->svq->vq_ops->kick(vi->svq); return 0; }
static int talitos_process(device_t dev, struct cryptop *crp, int hint) { int i, err = 0, ivsize; struct talitos_softc *sc = device_get_softc(dev); struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; caddr_t iv; struct talitos_session *ses; struct talitos_desc *td; unsigned long flags; /* descriptor mappings */ int hmac_key, hmac_data, cipher_iv, cipher_key, in_fifo, out_fifo, cipher_iv_out; static int chsel = -1; DPRINTF("%s()\n", __FUNCTION__); if (crp == NULL || crp->crp_callback == NULL || sc == NULL) { return EINVAL; } crp->crp_etype = 0; if (TALITOS_SESSION(crp->crp_sid) >= sc->sc_nsessions) { return EINVAL; } ses = &sc->sc_sessions[TALITOS_SESSION(crp->crp_sid)]; /* enter the channel scheduler */ spin_lock_irqsave(&sc->sc_chnfifolock[sc->sc_num_channels], flags); /* reuse channel that already had/has requests for the required EU */ for (i = 0; i < sc->sc_num_channels; i++) { if (sc->sc_chnlastalg[i] == crp->crp_desc->crd_alg) break; } if (i == sc->sc_num_channels) { /* * haven't seen this algo the last sc_num_channels or more * use round robin in this case * nb: sc->sc_num_channels must be power of 2 */ chsel = (chsel + 1) & (sc->sc_num_channels - 1); } else { /* * matches channel with same target execution unit; * use same channel in this case */ chsel = i; } sc->sc_chnlastalg[chsel] = crp->crp_desc->crd_alg; /* release the channel scheduler lock */ spin_unlock_irqrestore(&sc->sc_chnfifolock[sc->sc_num_channels], flags); /* acquire the selected channel fifo lock */ spin_lock_irqsave(&sc->sc_chnfifolock[chsel], flags); /* find and reserve next available descriptor-cryptop pair */ for (i = 0; i < sc->sc_chfifo_len; i++) { if (sc->sc_chnfifo[chsel][i].cf_desc.hdr == 0) { /* * ensure correct descriptor formation by * avoiding inadvertently setting "optional" entries * e.g. not using "optional" dptr2 for MD/HMAC descs */ memset(&sc->sc_chnfifo[chsel][i].cf_desc, 0, sizeof(*td)); /* reserve it with done notification request bit */ sc->sc_chnfifo[chsel][i].cf_desc.hdr |= TALITOS_DONE_NOTIFY; break; } } spin_unlock_irqrestore(&sc->sc_chnfifolock[chsel], flags); if (i == sc->sc_chfifo_len) { /* fifo full */ err = ERESTART; goto errout; } td = &sc->sc_chnfifo[chsel][i].cf_desc; sc->sc_chnfifo[chsel][i].cf_crp = crp; crd1 = crp->crp_desc; if (crd1 == NULL) { err = EINVAL; goto errout; } crd2 = crd1->crd_next; /* prevent compiler warning */ hmac_key = 0; hmac_data = 0; if (crd2 == NULL) { td->hdr |= TD_TYPE_COMMON_NONSNOOP_NO_AFEU; /* assign descriptor dword ptr mappings for this desc. type */ cipher_iv = 1; cipher_key = 2; in_fifo = 3; cipher_iv_out = 5; if (crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC || crd1->crd_alg == CRYPTO_SHA1 || crd1->crd_alg == CRYPTO_MD5) { out_fifo = 5; maccrd = crd1; enccrd = NULL; } else if (crd1->crd_alg == CRYPTO_DES_CBC || crd1->crd_alg == CRYPTO_3DES_CBC || crd1->crd_alg == CRYPTO_AES_CBC || crd1->crd_alg == CRYPTO_ARC4) { out_fifo = 4; maccrd = NULL; enccrd = crd1; } else { DPRINTF("UNKNOWN crd1->crd_alg %d\n", crd1->crd_alg); err = EINVAL; goto errout; } } else { if (sc->sc_desc_types & TALITOS_HAS_DT_IPSEC_ESP) { td->hdr |= TD_TYPE_IPSEC_ESP; } else { DPRINTF("unimplemented: multiple descriptor ipsec\n"); err = EINVAL; goto errout; } /* assign descriptor dword ptr mappings for this desc. type */ hmac_key = 0; hmac_data = 1; cipher_iv = 2; cipher_key = 3; in_fifo = 4; out_fifo = 5; cipher_iv_out = 6; if ((crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC || crd1->crd_alg == CRYPTO_MD5 || crd1->crd_alg == CRYPTO_SHA1) && (crd2->crd_alg == CRYPTO_DES_CBC || crd2->crd_alg == CRYPTO_3DES_CBC || crd2->crd_alg == CRYPTO_AES_CBC || crd2->crd_alg == CRYPTO_ARC4) && ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { maccrd = crd1; enccrd = crd2; } else if ((crd1->crd_alg == CRYPTO_DES_CBC || crd1->crd_alg == CRYPTO_ARC4 || crd1->crd_alg == CRYPTO_3DES_CBC || crd1->crd_alg == CRYPTO_AES_CBC) && (crd2->crd_alg == CRYPTO_MD5_HMAC || crd2->crd_alg == CRYPTO_SHA1_HMAC || crd2->crd_alg == CRYPTO_MD5 || crd2->crd_alg == CRYPTO_SHA1) && (crd1->crd_flags & CRD_F_ENCRYPT)) { enccrd = crd1; maccrd = crd2; } else { /* We cannot order the SEC as requested */ printk("%s: cannot do the order\n", device_get_nameunit(sc->sc_cdev)); err = EINVAL; goto errout; } } /* assign in_fifo and out_fifo based on input/output struct type */ if (crp->crp_flags & CRYPTO_F_SKBUF) { /* using SKB buffers */ struct sk_buff *skb = (struct sk_buff *)crp->crp_buf; if (skb_shinfo(skb)->nr_frags) { printk("%s: skb frags unimplemented\n", device_get_nameunit(sc->sc_cdev)); err = EINVAL; goto errout; } td->ptr[in_fifo].ptr = dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE); td->ptr[in_fifo].len = skb->len; td->ptr[out_fifo].ptr = dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE); td->ptr[out_fifo].len = skb->len; td->ptr[hmac_data].ptr = dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE); } else if (crp->crp_flags & CRYPTO_F_IOV) { /* using IOV buffers */ struct uio *uiop = (struct uio *)crp->crp_buf; if (uiop->uio_iovcnt > 1) { printk("%s: iov frags unimplemented\n", device_get_nameunit(sc->sc_cdev)); err = EINVAL; goto errout; } td->ptr[in_fifo].ptr = dma_map_single(NULL, uiop->uio_iov->iov_base, crp->crp_ilen, DMA_TO_DEVICE); td->ptr[in_fifo].len = crp->crp_ilen; /* crp_olen is never set; always use crp_ilen */ td->ptr[out_fifo].ptr = dma_map_single(NULL, uiop->uio_iov->iov_base, crp->crp_ilen, DMA_TO_DEVICE); td->ptr[out_fifo].len = crp->crp_ilen; } else { /* using contig buffers */ td->ptr[in_fifo].ptr = dma_map_single(NULL, crp->crp_buf, crp->crp_ilen, DMA_TO_DEVICE); td->ptr[in_fifo].len = crp->crp_ilen; td->ptr[out_fifo].ptr = dma_map_single(NULL, crp->crp_buf, crp->crp_ilen, DMA_TO_DEVICE); td->ptr[out_fifo].len = crp->crp_ilen; } if (enccrd) { switch (enccrd->crd_alg) { case CRYPTO_3DES_CBC: td->hdr |= TALITOS_MODE0_DEU_3DES; /* FALLTHROUGH */ case CRYPTO_DES_CBC: td->hdr |= TALITOS_SEL0_DEU | TALITOS_MODE0_DEU_CBC; if (enccrd->crd_flags & CRD_F_ENCRYPT) td->hdr |= TALITOS_MODE0_DEU_ENC; ivsize = 2*sizeof(u_int32_t); DPRINTF("%cDES ses %d ch %d len %d\n", (td->hdr & TALITOS_MODE0_DEU_3DES)?'3':'1', (u32)TALITOS_SESSION(crp->crp_sid), chsel, td->ptr[in_fifo].len); break; case CRYPTO_AES_CBC: td->hdr |= TALITOS_SEL0_AESU | TALITOS_MODE0_AESU_CBC; if (enccrd->crd_flags & CRD_F_ENCRYPT) td->hdr |= TALITOS_MODE0_AESU_ENC; ivsize = 4*sizeof(u_int32_t); DPRINTF("AES ses %d ch %d len %d\n", (u32)TALITOS_SESSION(crp->crp_sid), chsel, td->ptr[in_fifo].len); break; default: printk("%s: unimplemented enccrd->crd_alg %d\n", device_get_nameunit(sc->sc_cdev), enccrd->crd_alg); err = EINVAL; goto errout; } /* * Setup encrypt/decrypt state. When using basic ops * we can't use an inline IV because hash/crypt offset * must be from the end of the IV to the start of the * crypt data and this leaves out the preceding header * from the hash calculation. Instead we place the IV * in the state record and set the hash/crypt offset to * copy both the header+IV. */ if (enccrd->crd_flags & CRD_F_ENCRYPT) { td->hdr |= TALITOS_DIR_OUTBOUND; if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) iv = enccrd->crd_iv; else iv = (caddr_t) ses->ses_iv; if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) { crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_inject, ivsize, iv); } } else { td->hdr |= TALITOS_DIR_INBOUND; if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) { iv = enccrd->crd_iv; bcopy(enccrd->crd_iv, iv, ivsize); } else { iv = (caddr_t) ses->ses_iv; crypto_copydata(crp->crp_flags, crp->crp_buf, enccrd->crd_inject, ivsize, iv); } } td->ptr[cipher_iv].ptr = dma_map_single(NULL, iv, ivsize, DMA_TO_DEVICE); td->ptr[cipher_iv].len = ivsize; /* * we don't need the cipher iv out length/pointer * field to do ESP IPsec. Therefore we set the len field as 0, * which tells the SEC not to do anything with this len/ptr * field. Previously, when length/pointer as pointing to iv, * it gave us corruption of packets. */ td->ptr[cipher_iv_out].len = 0; } if (enccrd && maccrd) { /* this is ipsec only for now */ td->hdr |= TALITOS_SEL1_MDEU | TALITOS_MODE1_MDEU_INIT | TALITOS_MODE1_MDEU_PAD; switch (maccrd->crd_alg) { case CRYPTO_MD5: td->hdr |= TALITOS_MODE1_MDEU_MD5; break; case CRYPTO_MD5_HMAC: td->hdr |= TALITOS_MODE1_MDEU_MD5_HMAC; break; case CRYPTO_SHA1: td->hdr |= TALITOS_MODE1_MDEU_SHA1; break; case CRYPTO_SHA1_HMAC: td->hdr |= TALITOS_MODE1_MDEU_SHA1_HMAC; break; default: /* We cannot order the SEC as requested */ printk("%s: cannot do the order\n", device_get_nameunit(sc->sc_cdev)); err = EINVAL; goto errout; } if ((maccrd->crd_alg == CRYPTO_MD5_HMAC) || (maccrd->crd_alg == CRYPTO_SHA1_HMAC)) { /* * The offset from hash data to the start of * crypt data is the difference in the skips. */ /* ipsec only for now */ td->ptr[hmac_key].ptr = dma_map_single(NULL, ses->ses_hmac, ses->ses_hmac_len, DMA_TO_DEVICE); td->ptr[hmac_key].len = ses->ses_hmac_len; td->ptr[in_fifo].ptr += enccrd->crd_skip; td->ptr[in_fifo].len = enccrd->crd_len; td->ptr[out_fifo].ptr += enccrd->crd_skip; td->ptr[out_fifo].len = enccrd->crd_len; /* bytes of HMAC to postpend to ciphertext */ td->ptr[out_fifo].extent = ses->ses_mlen; td->ptr[hmac_data].ptr += maccrd->crd_skip; td->ptr[hmac_data].len = enccrd->crd_skip - maccrd->crd_skip; } if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) { printk("%s: CRD_F_KEY_EXPLICIT unimplemented\n", device_get_nameunit(sc->sc_cdev)); } } if (!enccrd && maccrd) { /* single MD5 or SHA */ td->hdr |= TALITOS_SEL0_MDEU | TALITOS_MODE0_MDEU_INIT | TALITOS_MODE0_MDEU_PAD; switch (maccrd->crd_alg) { case CRYPTO_MD5: td->hdr |= TALITOS_MODE0_MDEU_MD5; DPRINTF("MD5 ses %d ch %d len %d\n", (u32)TALITOS_SESSION(crp->crp_sid), chsel, td->ptr[in_fifo].len); break; case CRYPTO_MD5_HMAC: td->hdr |= TALITOS_MODE0_MDEU_MD5_HMAC; break; case CRYPTO_SHA1: td->hdr |= TALITOS_MODE0_MDEU_SHA1; DPRINTF("SHA1 ses %d ch %d len %d\n", (u32)TALITOS_SESSION(crp->crp_sid), chsel, td->ptr[in_fifo].len); break; case CRYPTO_SHA1_HMAC: td->hdr |= TALITOS_MODE0_MDEU_SHA1_HMAC; break; default: /* We cannot order the SEC as requested */ DPRINTF("cannot do the order\n"); err = EINVAL; goto errout; } if (crp->crp_flags & CRYPTO_F_IOV) td->ptr[out_fifo].ptr += maccrd->crd_inject; if ((maccrd->crd_alg == CRYPTO_MD5_HMAC) || (maccrd->crd_alg == CRYPTO_SHA1_HMAC)) { td->ptr[hmac_key].ptr = dma_map_single(NULL, ses->ses_hmac, ses->ses_hmac_len, DMA_TO_DEVICE); td->ptr[hmac_key].len = ses->ses_hmac_len; } } else { /* using process key (session data has duplicate) */ td->ptr[cipher_key].ptr = dma_map_single(NULL, enccrd->crd_key, (enccrd->crd_klen + 7) / 8, DMA_TO_DEVICE); td->ptr[cipher_key].len = (enccrd->crd_klen + 7) / 8; } /* descriptor complete - GO! */ return talitos_submit(sc, td, chsel); errout: if (err != ERESTART) { crp->crp_etype = err; crypto_done(crp); } return err; }
/* * Main event dispatcher. Called from other parts and drivers. * Send the event on the appropriate channels. * May be called from interrupt context. */ void wireless_send_event(struct net_device * dev, unsigned int cmd, union iwreq_data * wrqu, const char * extra) { const struct iw_ioctl_description * descr = NULL; int extra_len = 0; struct iw_event *event; /* Mallocated whole event */ int event_len; /* Its size */ int hdr_len; /* Size of the event header */ int wrqu_off = 0; /* Offset in wrqu */ /* Don't "optimise" the following variable, it will crash */ unsigned cmd_index; /* *MUST* be unsigned */ struct sk_buff *skb; struct nlmsghdr *nlh; struct nlattr *nla; #ifdef CONFIG_COMPAT struct __compat_iw_event *compat_event; struct compat_iw_point compat_wrqu; struct sk_buff *compskb; #endif /* * Nothing in the kernel sends scan events with data, be safe. * This is necessary because we cannot fix up scan event data * for compat, due to being contained in 'extra', but normally * applications are required to retrieve the scan data anyway * and no data is included in the event, this codifies that * practice. */ if (WARN_ON(cmd == SIOCGIWSCAN && extra)) extra = NULL; /* Get the description of the Event */ if (cmd <= SIOCIWLAST) { cmd_index = IW_IOCTL_IDX(cmd); if (cmd_index < standard_ioctl_num) descr = &(standard_ioctl[cmd_index]); } else { cmd_index = IW_EVENT_IDX(cmd); if (cmd_index < standard_event_num) descr = &(standard_event[cmd_index]); } /* Don't accept unknown events */ if (descr == NULL) { /* Note : we don't return an error to the driver, because * the driver would not know what to do about it. It can't * return an error to the user, because the event is not * initiated by a user request. * The best the driver could do is to log an error message. * We will do it ourselves instead... */ netdev_err(dev, "(WE) : Invalid/Unknown Wireless Event (0x%04X)\n", cmd); return; } /* Check extra parameters and set extra_len */ if (descr->header_type == IW_HEADER_TYPE_POINT) { /* Check if number of token fits within bounds */ if (wrqu->data.length > descr->max_tokens) { netdev_err(dev, "(WE) : Wireless Event too big (%d)\n", wrqu->data.length); return; } if (wrqu->data.length < descr->min_tokens) { netdev_err(dev, "(WE) : Wireless Event too small (%d)\n", wrqu->data.length); return; } /* Calculate extra_len - extra is NULL for restricted events */ if (extra != NULL) extra_len = wrqu->data.length * descr->token_size; /* Always at an offset in wrqu */ wrqu_off = IW_EV_POINT_OFF; } /* Total length of the event */ hdr_len = event_type_size[descr->header_type]; event_len = hdr_len + extra_len; /* * The problem for 64/32 bit. * * On 64-bit, a regular event is laid out as follows: * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | * | event.len | event.cmd | p a d d i n g | * | wrqu data ... (with the correct size) | * * This padding exists because we manipulate event->u, * and 'event' is not packed. * * An iw_point event is laid out like this instead: * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | * | event.len | event.cmd | p a d d i n g | * | iwpnt.len | iwpnt.flg | p a d d i n g | * | extra data ... * * The second padding exists because struct iw_point is extended, * but this depends on the platform... * * On 32-bit, all the padding shouldn't be there. */ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!skb) return; /* Send via the RtNetlink event channel */ nlh = rtnetlink_ifinfo_prep(dev, skb); if (WARN_ON(!nlh)) { kfree_skb(skb); return; } /* Add the wireless events in the netlink packet */ nla = nla_reserve(skb, IFLA_WIRELESS, event_len); if (!nla) { kfree_skb(skb); return; } event = nla_data(nla); /* Fill event - first clear to avoid data leaking */ memset(event, 0, hdr_len); event->len = event_len; event->cmd = cmd; memcpy(&event->u, ((char *) wrqu) + wrqu_off, hdr_len - IW_EV_LCP_LEN); if (extra_len) memcpy(((char *) event) + hdr_len, extra, extra_len); nlmsg_end(skb, nlh); #ifdef CONFIG_COMPAT hdr_len = compat_event_type_size[descr->header_type]; event_len = hdr_len + extra_len; compskb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!compskb) { kfree_skb(skb); return; } /* Send via the RtNetlink event channel */ nlh = rtnetlink_ifinfo_prep(dev, compskb); if (WARN_ON(!nlh)) { kfree_skb(skb); kfree_skb(compskb); return; } /* Add the wireless events in the netlink packet */ nla = nla_reserve(compskb, IFLA_WIRELESS, event_len); if (!nla) { kfree_skb(skb); kfree_skb(compskb); return; } compat_event = nla_data(nla); compat_event->len = event_len; compat_event->cmd = cmd; if (descr->header_type == IW_HEADER_TYPE_POINT) { compat_wrqu.length = wrqu->data.length; compat_wrqu.flags = wrqu->data.flags; memcpy(&compat_event->pointer, ((char *) &compat_wrqu) + IW_EV_COMPAT_POINT_OFF, hdr_len - IW_EV_COMPAT_LCP_LEN); if (extra_len) memcpy(((char *) compat_event) + hdr_len, extra, extra_len); } else { /* extra_len must be zero, so no if (extra) needed */ memcpy(&compat_event->pointer, wrqu, hdr_len - IW_EV_COMPAT_LCP_LEN); } nlmsg_end(compskb, nlh); skb_shinfo(skb)->frag_list = compskb; #endif #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) skb_queue_tail(&dev_net(dev)->wext_nlevents, skb); schedule_work(&wireless_nlevent_work); #else skb_queue_tail(&wireless_nlevent_queue, skb); tasklet_schedule(&wireless_nlevent_tasklet); #endif }
struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask) { struct sk_buff *n; n = skb_head_from_pool(); if (!n) { n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); if (!n) return NULL; } #define C(x) n->x = skb->x n->next = n->prev = NULL; n->list = NULL; n->sk = NULL; C(stamp); C(dev); C(h); C(nh); C(mac); C(dst); dst_clone(n->dst); memcpy(n->cb, skb->cb, sizeof(skb->cb)); C(len); C(data_len); C(csum); n->cloned = 1; C(pkt_type); C(ip_summed); C(priority); atomic_set(&n->users, 1); C(protocol); C(security); C(truesize); C(head); C(data); C(tail); C(end); n->destructor = NULL; #ifdef CONFIG_NETFILTER C(nfmark); C(nfcache); C(nfct); #ifdef CONFIG_NETFILTER_DEBUG C(nf_debug); #endif #endif /*CONFIG_NETFILTER*/ #if defined(CONFIG_HIPPI) C(private); #endif #ifdef CONFIG_NET_SCHED C(tc_index); #endif atomic_inc(&(skb_shinfo(skb)->dataref)); skb->cloned = 1; #ifdef CONFIG_NETFILTER nf_conntrack_get(skb->nfct); #endif return n; }
static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, struct net_device *dev) { struct net *net = container_of(qp->q.net, struct net, ipv4.frags); struct iphdr *iph; struct sk_buff *fp, *head = qp->q.fragments; int len; int ihlen; int err; int sum_truesize; u8 ecn; ipq_kill(qp); ecn = ip_frag_ecn_table[qp->ecn]; if (unlikely(ecn == 0xff)) { err = -EINVAL; goto out_fail; } /* Make the one we just received the head. */ if (prev) { head = prev->next; fp = skb_clone(head, GFP_ATOMIC); if (!fp) goto out_nomem; fp->next = head->next; if (!fp->next) qp->q.fragments_tail = fp; prev->next = fp; skb_morph(head, qp->q.fragments); head->next = qp->q.fragments->next; consume_skb(qp->q.fragments); qp->q.fragments = head; } WARN_ON(head == NULL); WARN_ON(FRAG_CB(head)->offset != 0); /* Allocate a new buffer for the datagram. */ ihlen = ip_hdrlen(head); len = ihlen + qp->q.len; err = -E2BIG; if (len > 65535) goto out_oversize; /* Head of list must not be cloned. */ if (skb_unclone(head, GFP_ATOMIC)) goto out_nomem; /* If the first fragment is fragmented itself, we split * it to two chunks: the first with data and paged part * and the second, holding only fragments. */ if (skb_has_frag_list(head)) { struct sk_buff *clone; int i, plen = 0; if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) goto out_nomem; clone->next = head->next; head->next = clone; skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; skb_frag_list_init(head); for (i = 0; i < skb_shinfo(head)->nr_frags; i++) plen += skb_frag_size(&skb_shinfo(head)->frags[i]); clone->len = clone->data_len = head->data_len - plen; head->data_len -= clone->len; head->len -= clone->len; clone->csum = 0; clone->ip_summed = head->ip_summed; add_frag_mem_limit(&qp->q, clone->truesize); } skb_push(head, head->data - skb_network_header(head)); sum_truesize = head->truesize; for (fp = head->next; fp;) { bool headstolen; int delta; struct sk_buff *next = fp->next; sum_truesize += fp->truesize; if (head->ip_summed != fp->ip_summed) head->ip_summed = CHECKSUM_NONE; else if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_add(head->csum, fp->csum); if (skb_try_coalesce(head, fp, &headstolen, &delta)) { kfree_skb_partial(fp, headstolen); } else { if (!skb_shinfo(head)->frag_list) skb_shinfo(head)->frag_list = fp; head->data_len += fp->len; head->len += fp->len; head->truesize += fp->truesize; } fp = next; } sub_frag_mem_limit(&qp->q, sum_truesize); head->next = NULL; head->dev = dev; head->tstamp = qp->q.stamp; IPCB(head)->frag_max_size = qp->q.max_size; iph = ip_hdr(head); /* max_size != 0 implies at least one fragment had IP_DF set */ iph->frag_off = qp->q.max_size ? htons(IP_DF) : 0; iph->tot_len = htons(len); iph->tos |= ecn; IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); qp->q.fragments = NULL; qp->q.fragments_tail = NULL; return 0; out_nomem: LIMIT_NETDEBUG(KERN_ERR pr_fmt("queue_glue: no memory for gluing queue %p\n"), qp); err = -ENOMEM; goto out_fail; out_oversize: net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr); out_fail: IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); return err; }
static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, struct xen_netif_tx_request *tx) { struct netfront_info *np = netdev_priv(dev); char *data = skb->data; unsigned long mfn; RING_IDX prod = np->tx.req_prod_pvt; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); unsigned int id; grant_ref_t ref; int i; while (len > PAGE_SIZE - offset) { tx->size = PAGE_SIZE - offset; tx->flags |= XEN_NETTXF_more_data; len -= tx->size; data += tx->size; offset = 0; id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); np->tx_skbs[id].skb = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; } for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; tx->flags |= XEN_NETTXF_more_data; id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); np->tx_skbs[id].skb = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = pfn_to_mfn(page_to_pfn(skb_frag_page(frag))); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = frag->page_offset; tx->size = skb_frag_size(frag); tx->flags = 0; } np->tx.req_prod_pvt = prod; }
/** * skb_copy_datagram_iovec - Copy a datagram to an iovec. * @skb: buffer to copy * @offset: offset in the buffer to start copying from * @to: io vector to copy to * @len: amount of data to copy from buffer to iovec * * Note: the iovec is modified during the copy. */ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, struct iovec *to, int len) { int start = skb_headlen(skb); int i, copy = start - offset; struct sk_buff *frag_iter; trace_skb_copy_datagram_iovec(skb, len); /* Copy header. */ if (copy > 0) { if (copy > len) copy = len; if (memcpy_toiovec(to, skb->data + offset, copy)) goto fault; if ((len -= copy) == 0) return 0; offset += copy; } /* Copy paged appendix. Hmm... why does this look so complicated? */ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { int end; WARN_ON(start > offset + len); end = start + skb_shinfo(skb)->frags[i].size; if ((copy = end - offset) > 0) { int err; u8 *vaddr; skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; struct page *page = frag->page; if (copy > len) copy = len; vaddr = kmap(page); err = memcpy_toiovec(to, vaddr + frag->page_offset + offset - start, copy); kunmap(page); if (err) goto fault; if (!(len -= copy)) return 0; offset += copy; } start = end; } skb_walk_frags(skb, frag_iter) { int end; WARN_ON(start > offset + len); end = start + frag_iter->len; if ((copy = end - offset) > 0) { if (copy > len) copy = len; if (skb_copy_datagram_iovec(frag_iter, offset - start, to, copy)) goto fault; if ((len -= copy) == 0) return 0; offset += copy; } start = end; }
/** * __alloc_skb - allocate a network buffer * @size: size to allocate * @gfp_mask: allocation mask * @fclone: allocate from fclone cache instead of head cache * and allocate a cloned (child) skb * * Allocate a new &sk_buff. The returned buffer has no headroom and a * tail room of size bytes. The object has a reference count of one. * The return is the buffer. On a failure the return is %NULL. * * Buffers may only be allocated from interrupts using a @gfp_mask of * %GFP_ATOMIC. */ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, int fclone) { kmem_cache_t *cache; struct skb_shared_info *shinfo; struct sk_buff *skb; u8 *data; cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; /* Get the HEAD */ skb = kmem_cache_alloc(cache, gfp_mask & ~__GFP_DMA); if (!skb) goto out; /* Get the DATA. Size must match skb_add_mtu(). */ size = SKB_DATA_ALIGN(size); //data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); //data = kmalloc(size + sizeof(struct skb_shared_info)+4, gfp_mask);//incifer support vlan id /* 20007/10/16 pppoe acc incifer LAN2WAN */ data = kmalloc(size + sizeof(struct skb_shared_info) + NK_EXTRA_OFFSET, gfp_mask); if (!data) goto nodata; memset(skb, 0, offsetof(struct sk_buff, truesize)); skb->truesize = size + sizeof(struct sk_buff); atomic_set(&skb->users, 1); skb->head = data; //skb->data = data+4;//incifer /* 20007/10/16 pppoe acc incifer LAN2WAN */ skb->data = data + NK_EXTRA_OFFSET; //skb->tail = data+4;//incifer /* 20007/10/16 pppoe acc incifer LAN2WAN */ skb->tail = data + NK_EXTRA_OFFSET; //skb->end = data + size+4;//incifer /* 20007/10/16 pppoe acc incifer LAN2WAN */ skb->end = data + size + NK_EXTRA_OFFSET; /* make sure we initialize shinfo sequentially */ shinfo = skb_shinfo(skb); atomic_set(&shinfo->dataref, 1); shinfo->nr_frags = 0; shinfo->tso_size = 0; shinfo->tso_segs = 0; shinfo->ufo_size = 0; shinfo->ip6_frag_id = 0; shinfo->frag_list = NULL; if (fclone) { struct sk_buff *child = skb + 1; atomic_t *fclone_ref = (atomic_t *) (child + 1); skb->fclone = SKB_FCLONE_ORIG; atomic_set(fclone_ref, 1); child->fclone = SKB_FCLONE_UNAVAILABLE; } out: return skb; nodata: kmem_cache_free(cache, skb); skb = NULL; goto out; }
int ip6_push_pending_frames(struct sock *sk) { struct sk_buff *skb, *tmp_skb; struct sk_buff **tail_skb; struct in6_addr final_dst_buf, *final_dst = &final_dst_buf; struct inet_opt *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6hdr *hdr; struct ipv6_txoptions *opt = np->cork.opt; struct rt6_info *rt = np->cork.rt; struct flowi *fl = np->cork.fl; unsigned char proto = fl->proto; int err = 0; if ((skb = __skb_dequeue(&sk->write_queue)) == NULL) goto out; tail_skb = &(skb_shinfo(skb)->frag_list); /* move skb->data to ip header from ext header */ if (skb->data < skb->nh.raw) __skb_pull(skb, skb->nh.raw - skb->data); while ((tmp_skb = __skb_dequeue(&sk->write_queue)) != NULL) { __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw); *tail_skb = tmp_skb; tail_skb = &(tmp_skb->next); skb->len += tmp_skb->len; skb->data_len += tmp_skb->len; #if 0 /* Logically correct, but useless work, ip_fragment() will have to undo */ skb->truesize += tmp_skb->truesize; __sock_put(tmp_skb->sk); tmp_skb->destructor = NULL; tmp_skb->sk = NULL; #endif } ipv6_addr_copy(final_dst, &fl->fl6_dst); __skb_pull(skb, skb->h.raw - skb->nh.raw); if (opt && opt->opt_flen) ipv6_push_frag_opts(skb, opt, &proto); if (opt && opt->opt_nflen) ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst); skb->nh.ipv6h = hdr = (struct ipv6hdr*) skb_push(skb, sizeof(struct ipv6hdr)); *(u32*)hdr = fl->fl6_flowlabel | htonl(0x60000000); if (skb->len <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); else hdr->payload_len = 0; hdr->hop_limit = np->cork.hop_limit; hdr->nexthdr = proto; ipv6_addr_copy(&hdr->saddr, &fl->fl6_src); ipv6_addr_copy(&hdr->daddr, final_dst); skb->dst = dst_clone(&rt->u.dst); err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output); if (err) { if (err > 0) err = inet->recverr ? net_xmit_errno(err) : 0; if (err) goto error; } out: inet->cork.flags &= ~IPCORK_OPT; if (np->cork.opt) { kfree(np->cork.opt); np->cork.opt = NULL; } if (np->cork.rt) { dst_release(&np->cork.rt->u.dst); np->cork.rt = NULL; } if (np->cork.fl) { np->cork.fl = NULL; } return err; error: goto out; }
static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*)) { struct net_device *dev; struct rt6_info *rt = (struct rt6_info*)skb->dst; struct sk_buff *frag; struct ipv6hdr *tmp_hdr; struct frag_hdr *fh; unsigned int mtu, hlen, left, len; u32 frag_id = 0; int ptr, offset = 0, err=0; u8 *prevhdr, nexthdr = 0; dev = rt->u.dst.dev; hlen = ip6_find_1stfragopt(skb, &prevhdr); nexthdr = *prevhdr; mtu = dst_pmtu(&rt->u.dst) - hlen - sizeof(struct frag_hdr); if (skb_shinfo(skb)->frag_list) { int first_len = skb_pagelen(skb); if (first_len - hlen > mtu || ((first_len - hlen) & 7) || skb_cloned(skb)) goto slow_path; for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) { /* Correct geometry. */ if (frag->len > mtu || ((frag->len & 7) && frag->next) || skb_headroom(frag) < hlen) goto slow_path; /* Correct socket ownership. */ if (frag->sk == NULL) goto slow_path; /* Partially cloned skb? */ if (skb_shared(frag)) goto slow_path; } err = 0; offset = 0; frag = skb_shinfo(skb)->frag_list; skb_shinfo(skb)->frag_list = 0; /* BUILD HEADER */ tmp_hdr = kmalloc(hlen, GFP_ATOMIC); if (!tmp_hdr) { IP6_INC_STATS(Ip6FragFails); return -ENOMEM; } *prevhdr = NEXTHDR_FRAGMENT; memcpy(tmp_hdr, skb->nh.raw, hlen); __skb_pull(skb, hlen); fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr)); skb->nh.raw = __skb_push(skb, hlen); memcpy(skb->nh.raw, tmp_hdr, hlen); ipv6_select_ident(skb, fh); fh->nexthdr = nexthdr; fh->reserved = 0; fh->frag_off = htons(IP6_MF); frag_id = fh->identification; first_len = skb_pagelen(skb); skb->data_len = first_len - skb_headlen(skb); skb->len = first_len; skb->nh.ipv6h->payload_len = htons(first_len - sizeof(struct ipv6hdr)); for (;;) { /* Prepare header of the next frame, * before previous one went down. */ if (frag) { frag->h.raw = frag->data; fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr)); frag->nh.raw = __skb_push(frag, hlen); memcpy(frag->nh.raw, tmp_hdr, hlen); offset += skb->len - hlen - sizeof(struct frag_hdr); fh->nexthdr = nexthdr; fh->reserved = 0; fh->frag_off = htons(offset); if (frag->next != NULL) fh->frag_off |= htons(IP6_MF); fh->identification = frag_id; frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr)); ip6_copy_metadata(frag, skb); } err = output(skb); if (err || !frag) break; skb = frag; frag = skb->next; skb->next = NULL; } if (tmp_hdr) kfree(tmp_hdr); if (err == 0) { IP6_INC_STATS(Ip6FragOKs); return 0; } while (frag) { skb = frag->next; kfree_skb(frag); frag = skb; } IP6_INC_STATS(Ip6FragFails); return err; } slow_path: left = skb->len - hlen; /* Space per frame */ ptr = hlen; /* Where to start from */ /* * Fragment the datagram. */ *prevhdr = NEXTHDR_FRAGMENT; /* * Keep copying data until we run out. */ while(left > 0) { len = left; /* IF: it doesn't fit, use 'mtu' - the data space left */ if (len > mtu) len = mtu; /* IF: we are not sending upto and including the packet end then align the next start on an eight byte boundary */ if (len < left) { len &= ~7; } /* * Allocate buffer. */ if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_RESERVED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) { NETDEBUG(printk(KERN_INFO "IPv6: frag: no memory for new fragment!\n")); err = -ENOMEM; goto fail; } /* * Set up data on packet */ ip6_copy_metadata(frag, skb); skb_reserve(frag, LL_RESERVED_SPACE(rt->u.dst.dev)); skb_put(frag, len + hlen + sizeof(struct frag_hdr)); frag->nh.raw = frag->data; fh = (struct frag_hdr*)(frag->data + hlen); frag->h.raw = frag->data + hlen + sizeof(struct frag_hdr); /* * Charge the memory for the fragment to any owner * it might possess */ if (skb->sk) skb_set_owner_w(frag, skb->sk); /* * Copy the packet header into the new buffer. */ memcpy(frag->nh.raw, skb->data, hlen); /* * Build fragment header. */ fh->nexthdr = nexthdr; fh->reserved = 0; if (frag_id) { ipv6_select_ident(skb, fh); frag_id = fh->identification; } else fh->identification = frag_id; /* * Copy a block of the IP datagram. */ if (skb_copy_bits(skb, ptr, frag->h.raw, len)) BUG(); left -= len; fh->frag_off = htons(offset); if (left > 0) fh->frag_off |= htons(IP6_MF); frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr)); ptr += len; offset += len; /* * Put this fragment into the sending queue. */ IP6_INC_STATS(Ip6FragCreates); err = output(frag); if (err) goto fail; } kfree_skb(skb); IP6_INC_STATS(Ip6FragOKs); return err; fail: kfree_skb(skb); IP6_INC_STATS(Ip6FragFails); return err; }
int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, int hlimit, struct ipv6_txoptions *opt, struct flowi *fl, struct rt6_info *rt, unsigned int flags) { struct inet_opt *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct sk_buff *skb; unsigned int maxfraglen, fragheaderlen; int exthdrlen; int hh_len; int mtu; int copy = 0; int err; int offset = 0; int csummode = CHECKSUM_NONE; if (flags&MSG_PROBE) return 0; if (skb_queue_empty(&sk->write_queue)) { /* * setup for corking */ if (opt) { if (np->cork.opt == NULL) np->cork.opt = kmalloc(opt->tot_len, sk->allocation); memcpy(np->cork.opt, opt, opt->tot_len); inet->cork.flags |= IPCORK_OPT; /* need source address above miyazawa*/ } dst_hold(&rt->u.dst); np->cork.rt = rt; np->cork.fl = fl; np->cork.hop_limit = hlimit; inet->cork.fragsize = mtu = dst_pmtu(&rt->u.dst); inet->cork.length = 0; inet->sndmsg_page = NULL; inet->sndmsg_off = 0; exthdrlen = rt->u.dst.header_len + (opt ? opt->opt_flen : 0); length += exthdrlen; transhdrlen += exthdrlen; } else { rt = np->cork.rt; if (inet->cork.flags & IPCORK_OPT) opt = np->cork.opt; transhdrlen = 0; exthdrlen = 0; mtu = inet->cork.fragsize; } hh_len = (rt->u.dst.dev->hard_header_len&~15) + 16; fragheaderlen = sizeof(struct ipv6hdr) + (opt ? opt->opt_nflen : 0); maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr); if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) { if (inet->cork.length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) { ipv6_local_error(sk, EMSGSIZE, fl, mtu-exthdrlen); return -EMSGSIZE; } } inet->cork.length += length; if ((skb = skb_peek_tail(&sk->write_queue)) == NULL) goto alloc_new_skb; while (length > 0) { if ((copy = maxfraglen - skb->len) <= 0) { char *data; unsigned int datalen; unsigned int fraglen; unsigned int alloclen; BUG_TRAP(copy == 0); alloc_new_skb: datalen = maxfraglen - fragheaderlen; if (datalen > length) datalen = length; fraglen = datalen + fragheaderlen; if ((flags & MSG_MORE) && !(rt->u.dst.dev->features&NETIF_F_SG)) alloclen = maxfraglen; else alloclen = fraglen; alloclen += sizeof(struct frag_hdr); if (transhdrlen) { skb = sock_alloc_send_skb(sk, alloclen + hh_len + 15, (flags & MSG_DONTWAIT), &err); } else { skb = NULL; if (atomic_read(&sk->wmem_alloc) <= 2*sk->sndbuf) skb = sock_wmalloc(sk, alloclen + hh_len + 15, 1, sk->allocation); if (unlikely(skb == NULL)) err = -ENOBUFS; } if (skb == NULL) goto error; /* * Fill in the control structures */ skb->ip_summed = csummode; skb->csum = 0; /* reserve 8 byte for fragmentation */ skb_reserve(skb, hh_len+sizeof(struct frag_hdr)); /* * Find where to start putting bytes */ data = skb_put(skb, fraglen); skb->nh.raw = data + exthdrlen; data += fragheaderlen; skb->h.raw = data + exthdrlen; copy = datalen - transhdrlen; if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, 0, skb) < 0) { err = -EFAULT; kfree_skb(skb); goto error; } offset += copy; length -= datalen; transhdrlen = 0; exthdrlen = 0; csummode = CHECKSUM_NONE; /* * Put the packet on the pending queue */ __skb_queue_tail(&sk->write_queue, skb); continue; } if (copy > length) copy = length; if (!(rt->u.dst.dev->features&NETIF_F_SG)) { unsigned int off; off = skb->len; if (getfrag(from, skb_put(skb, copy), offset, copy, off, skb) < 0) { __skb_trim(skb, off); err = -EFAULT; goto error; } } else { int i = skb_shinfo(skb)->nr_frags; skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1]; struct page *page = inet->sndmsg_page; int off = inet->sndmsg_off; unsigned int left; if (page && (left = PAGE_SIZE - off) > 0) { if (copy >= left) copy = left; if (page != frag->page) { if (i == MAX_SKB_FRAGS) { err = -EMSGSIZE; goto error; } get_page(page); skb_fill_page_desc(skb, i, page, inet->sndmsg_off, 0); frag = &skb_shinfo(skb)->frags[i]; } } else if(i < MAX_SKB_FRAGS) { if (copy > PAGE_SIZE) copy = PAGE_SIZE; page = alloc_pages(sk->allocation, 0); if (page == NULL) { err = -ENOMEM; goto error; } inet->sndmsg_page = page; inet->sndmsg_off = 0; skb_fill_page_desc(skb, i, page, 0, 0); frag = &skb_shinfo(skb)->frags[i]; skb->truesize += PAGE_SIZE; atomic_add(PAGE_SIZE, &sk->wmem_alloc); } else { err = -EMSGSIZE; goto error; } if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) { err = -EFAULT; goto error; } inet->sndmsg_off += copy; frag->size += copy; skb->len += copy; skb->data_len += copy; } offset += copy; length -= copy; } return 0; error: inet->cork.length -= length; IP6_INC_STATS(Ip6OutDiscards); return err; }
static bool is_gre_gso(struct sk_buff *skb) { return skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | SKB_GSO_GRE_CSUM); }
static struct sk_buff *gre_gso_segment(struct sk_buff *skb, netdev_features_t features) { int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); struct sk_buff *segs = ERR_PTR(-EINVAL); u16 mac_offset = skb->mac_header; __be16 protocol = skb->protocol; u16 mac_len = skb->mac_len; int gre_offset, outer_hlen; bool need_csum, ufo; if (!skb->encapsulation) goto out; if (unlikely(tnl_hlen < sizeof(struct gre_base_hdr))) goto out; if (unlikely(!pskb_may_pull(skb, tnl_hlen))) goto out; /* setup inner skb. */ skb->encapsulation = 0; SKB_GSO_CB(skb)->encap_level = 0; __skb_pull(skb, tnl_hlen); skb_reset_mac_header(skb); skb_set_network_header(skb, skb_inner_network_offset(skb)); skb->mac_len = skb_inner_network_offset(skb); skb->protocol = skb->inner_protocol; need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM); skb->encap_hdr_csum = need_csum; ufo = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP); features &= skb->dev->hw_enc_features; /* The only checksum offload we care about from here on out is the * outer one so strip the existing checksum feature flags based * on the fact that we will be computing our checksum in software. */ if (ufo) { features &= ~NETIF_F_CSUM_MASK; if (!need_csum) features |= NETIF_F_HW_CSUM; } /* segment inner packet. */ segs = skb_mac_gso_segment(skb, features); if (IS_ERR_OR_NULL(segs)) { skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset, mac_len); goto out; } outer_hlen = skb_tnl_header_len(skb); gre_offset = outer_hlen - tnl_hlen; skb = segs; do { struct gre_base_hdr *greh; __sum16 *pcsum; /* Set up inner headers if we are offloading inner checksum */ if (skb->ip_summed == CHECKSUM_PARTIAL) { skb_reset_inner_headers(skb); skb->encapsulation = 1; } skb->mac_len = mac_len; skb->protocol = protocol; __skb_push(skb, outer_hlen); skb_reset_mac_header(skb); skb_set_network_header(skb, mac_len); skb_set_transport_header(skb, gre_offset); if (!need_csum) continue; greh = (struct gre_base_hdr *)skb_transport_header(skb); pcsum = (__sum16 *)(greh + 1); if (skb_is_gso(skb)) { unsigned int partial_adj; /* Adjust checksum to account for the fact that * the partial checksum is based on actual size * whereas headers should be based on MSS size. */ partial_adj = skb->len + skb_headroom(skb) - SKB_GSO_CB(skb)->data_offset - skb_shinfo(skb)->gso_size; *pcsum = ~csum_fold((__force __wsum)htonl(partial_adj)); } else { *pcsum = 0; } *(pcsum + 1) = 0; *pcsum = gso_make_checksum(skb, 0); } while ((skb = skb->next)); out: return segs; }
static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) { struct cp_private *cp = netdev_priv(dev); unsigned entry; u32 eor; #if CP_VLAN_TAG_USED u32 vlan_tag = 0; #endif spin_lock_irq(&cp->lock); /* This is a hard error, log it. */ if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) { netif_stop_queue(dev); spin_unlock_irq(&cp->lock); printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n", dev->name); return 1; } #if CP_VLAN_TAG_USED if (cp->vlgrp && vlan_tx_tag_present(skb)) vlan_tag = TxVlanTag | cpu_to_be16(vlan_tx_tag_get(skb)); #endif entry = cp->tx_head; eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; if (skb_shinfo(skb)->nr_frags == 0) { struct cp_desc *txd = &cp->tx_ring[entry]; u32 len; dma_addr_t mapping; len = skb->len; mapping = pci_map_single(cp->pdev, skb->data, len, PCI_DMA_TODEVICE); CP_VLAN_TX_TAG(txd, vlan_tag); txd->addr = cpu_to_le64(mapping); wmb(); if (skb->ip_summed == CHECKSUM_HW) { const struct iphdr *ip = skb->nh.iph; if (ip->protocol == IPPROTO_TCP) txd->opts1 = cpu_to_le32(eor | len | DescOwn | FirstFrag | LastFrag | IPCS | TCPCS); else if (ip->protocol == IPPROTO_UDP) txd->opts1 = cpu_to_le32(eor | len | DescOwn | FirstFrag | LastFrag | IPCS | UDPCS); else BUG(); } else txd->opts1 = cpu_to_le32(eor | len | DescOwn | FirstFrag | LastFrag); wmb(); cp->tx_skb[entry].skb = skb; cp->tx_skb[entry].mapping = mapping; cp->tx_skb[entry].frag = 0; entry = NEXT_TX(entry); } else { struct cp_desc *txd; u32 first_len, first_eor; dma_addr_t first_mapping; int frag, first_entry = entry; const struct iphdr *ip = skb->nh.iph; /* We must give this initial chunk to the device last. * Otherwise we could race with the device. */ first_eor = eor; first_len = skb_headlen(skb); first_mapping = pci_map_single(cp->pdev, skb->data, first_len, PCI_DMA_TODEVICE); cp->tx_skb[entry].skb = skb; cp->tx_skb[entry].mapping = first_mapping; cp->tx_skb[entry].frag = 1; entry = NEXT_TX(entry); for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; u32 len; u32 ctrl; dma_addr_t mapping; len = this_frag->size; mapping = pci_map_single(cp->pdev, ((void *) page_address(this_frag->page) + this_frag->page_offset), len, PCI_DMA_TODEVICE); eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; if (skb->ip_summed == CHECKSUM_HW) { ctrl = eor | len | DescOwn | IPCS; if (ip->protocol == IPPROTO_TCP) ctrl |= TCPCS; else if (ip->protocol == IPPROTO_UDP) ctrl |= UDPCS; else BUG(); } else ctrl = eor | len | DescOwn; if (frag == skb_shinfo(skb)->nr_frags - 1) ctrl |= LastFrag; txd = &cp->tx_ring[entry]; CP_VLAN_TX_TAG(txd, vlan_tag); txd->addr = cpu_to_le64(mapping); wmb(); txd->opts1 = cpu_to_le32(ctrl); wmb(); cp->tx_skb[entry].skb = skb; cp->tx_skb[entry].mapping = mapping; cp->tx_skb[entry].frag = frag + 2; entry = NEXT_TX(entry); } txd = &cp->tx_ring[first_entry]; CP_VLAN_TX_TAG(txd, vlan_tag); txd->addr = cpu_to_le64(first_mapping); wmb(); if (skb->ip_summed == CHECKSUM_HW) { if (ip->protocol == IPPROTO_TCP) txd->opts1 = cpu_to_le32(first_eor | first_len | FirstFrag | DescOwn | IPCS | TCPCS); else if (ip->protocol == IPPROTO_UDP) txd->opts1 = cpu_to_le32(first_eor | first_len | FirstFrag | DescOwn | IPCS | UDPCS); else BUG(); } else txd->opts1 = cpu_to_le32(first_eor | first_len | FirstFrag | DescOwn); wmb(); } cp->tx_head = entry; if (netif_msg_tx_queued(cp)) printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n", dev->name, entry, skb->len); if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) netif_stop_queue(dev); spin_unlock_irq(&cp->lock); cpw8(TxPoll, NormalTxPoll); dev->trans_start = jiffies; return 0; }
struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) { struct sk_buff *n; n = skb + 1; if (skb->fclone == SKB_FCLONE_ORIG && n->fclone == SKB_FCLONE_UNAVAILABLE) { atomic_t *fclone_ref = (atomic_t *) (n + 1); n->fclone = SKB_FCLONE_CLONE; atomic_inc(fclone_ref); } else { n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); if (!n) return NULL; n->fclone = SKB_FCLONE_UNAVAILABLE; } #define C(x) n->x = skb->x n->next = n->prev = NULL; n->sk = NULL; C(tstamp); C(dev); C(h); C(nh); C(mac); C(dst); dst_clone(skb->dst); C(sp); #ifdef CONFIG_INET secpath_get(skb->sp); #endif memcpy(n->cb, skb->cb, sizeof(skb->cb)); C(len); C(data_len); C(csum); C(local_df); n->cloned = 1; n->nohdr = 0; C(pkt_type); C(ip_summed); C(priority); #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) C(ipvs_property); #endif C(protocol); n->destructor = NULL; #ifdef CONFIG_NETFILTER C(nfmark); C(nfct); nf_conntrack_get(skb->nfct); C(nfctinfo); #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) C(nfct_reasm); nf_conntrack_get_reasm(skb->nfct_reasm); #endif #ifdef CONFIG_BRIDGE_NETFILTER C(nf_bridge); nf_bridge_get(skb->nf_bridge); #endif #endif /*CONFIG_NETFILTER*/ #ifdef CONFIG_NET_SCHED C(tc_index); #ifdef CONFIG_NET_CLS_ACT n->tc_verd = SET_TC_VERD(skb->tc_verd,0); n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd); n->tc_verd = CLR_TC_MUNGED(n->tc_verd); C(input_dev); #endif #endif C(truesize); atomic_set(&n->users, 1); C(head); C(data); C(tail); C(end); atomic_inc(&(skb_shinfo(skb)->dataref)); skb->cloned = 1; return n; }
/* * Deliver read data back to initiator. * XXX TBD handle resource problems later. */ int ft_queue_data_in(struct se_cmd *se_cmd) { struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); struct fc_frame *fp = NULL; struct fc_exch *ep; struct fc_lport *lport; struct scatterlist *sg = NULL; size_t remaining; u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF; u32 mem_off = 0; u32 fh_off = 0; u32 frame_off = 0; size_t frame_len = 0; size_t mem_len = 0; size_t tlen; size_t off_in_page; struct page *page = NULL; int use_sg; int error; void *page_addr; void *from; void *to = NULL; ep = fc_seq_exch(cmd->seq); lport = ep->lp; cmd->seq = lport->tt.seq_start_next(cmd->seq); remaining = se_cmd->data_length; /* * Setup to use first mem list entry, unless no data. */ BUG_ON(remaining && !se_cmd->t_data_sg); if (remaining) { sg = se_cmd->t_data_sg; mem_len = sg->length; mem_off = sg->offset; page = sg_page(sg); } /* no scatter/gather in skb for odd word length due to fc_seq_send() */ use_sg = !(remaining % 4); while (remaining) { if (!mem_len) { sg = sg_next(sg); mem_len = min((size_t)sg->length, remaining); mem_off = sg->offset; page = sg_page(sg); } if (!frame_len) { /* * If lport's has capability of Large Send Offload LSO) * , then allow 'frame_len' to be as big as 'lso_max' * if indicated transfer length is >= lport->lso_max */ frame_len = (lport->seq_offload) ? lport->lso_max : cmd->sess->max_frame; frame_len = min(frame_len, remaining); fp = fc_frame_alloc(lport, use_sg ? 0 : frame_len); if (!fp) return -ENOMEM; to = fc_frame_payload_get(fp, 0); fh_off = frame_off; frame_off += frame_len; /* * Setup the frame's max payload which is used by base * driver to indicate HW about max frame size, so that * HW can do fragmentation appropriately based on * "gso_max_size" of underline netdev. */ fr_max_payload(fp) = cmd->sess->max_frame; } tlen = min(mem_len, frame_len); if (use_sg) { off_in_page = mem_off; BUG_ON(!page); get_page(page); skb_fill_page_desc(fp_skb(fp), skb_shinfo(fp_skb(fp))->nr_frags, page, off_in_page, tlen); fr_len(fp) += tlen; fp_skb(fp)->data_len += tlen; fp_skb(fp)->truesize += PAGE_SIZE << compound_order(page); } else { BUG_ON(!page); from = kmap_atomic(page + (mem_off >> PAGE_SHIFT)); page_addr = from; from += mem_off & ~PAGE_MASK; tlen = min(tlen, (size_t)(PAGE_SIZE - (mem_off & ~PAGE_MASK))); memcpy(to, from, tlen); kunmap_atomic(page_addr); to += tlen; } mem_off += tlen; mem_len -= tlen; frame_len -= tlen; remaining -= tlen; if (frame_len && (skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN)) continue; if (!remaining) f_ctl |= FC_FC_END_SEQ; fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid, FC_TYPE_FCP, f_ctl, fh_off); error = lport->tt.seq_send(lport, cmd->seq, fp); if (error) { /* XXX For now, initiator will retry */ pr_err_ratelimited("%s: Failed to send frame %p, " "xid <0x%x>, remaining %zu, " "lso_max <0x%x>\n", __func__, fp, ep->xid, remaining, lport->lso_max); } } return ft_queue_status(se_cmd); }
/** Copy some data bits from a kernel buffer to an skb. * Derived in the obvious way from skb_copy_bits(). */ int skb_put_bits(const struct sk_buff *skb, int offset, void *src, int len) { int i, copy; int start = skb->len - skb->data_len; if (offset > (int)skb->len-len) goto fault; /* Copy header. */ if ((copy = start-offset) > 0) { if (copy > len) copy = len; memcpy(skb->data + offset, src, copy); if ((len -= copy) == 0) return 0; offset += copy; src += copy; } #ifdef __KERNEL__ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { int end; BUG_TRAP(start <= offset+len); end = start + skb_shinfo(skb)->frags[i].size; if ((copy = end-offset) > 0) { u8 *vaddr; if (copy > len) copy = len; vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); memcpy(vaddr + skb_shinfo(skb)->frags[i].page_offset + offset - start, src, copy); kunmap_skb_frag(vaddr); if ((len -= copy) == 0) return 0; offset += copy; src += copy; } start = end; } if (skb_shinfo(skb)->frag_list) { struct sk_buff *list; for (list = skb_shinfo(skb)->frag_list; list; list=list->next) { int end; BUG_TRAP(start <= offset+len); end = start + list->len; if ((copy = end-offset) > 0) { if (copy > len) copy = len; if (skb_put_bits(list, offset-start, src, copy)) goto fault; if ((len -= copy) == 0) return 0; offset += copy; src += copy; } start = end; } } #else i=0; #endif if (len == 0) return 0; fault: return -EFAULT; }
static void xennet_alloc_rx_buffers(struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; struct page *page; int i, batch_target, notify; RING_IDX req_prod = np->rx.req_prod_pvt; grant_ref_t ref; unsigned long pfn; void *vaddr; struct xen_netif_rx_request *req; if (unlikely(!netif_carrier_ok(dev))) return; batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) goto no_skb; skb_reserve(skb, NET_IP_ALIGN); page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (!page) { kfree_skb(skb); no_skb: if (i != 0) goto refill; mod_timer(&np->rx_refill_timer, jiffies + (HZ/10)); break; } __skb_fill_page_desc(skb, 0, page, 0, 0); skb_shinfo(skb)->nr_frags = 1; __skb_queue_tail(&np->rx_batch, skb); } if (i < (np->rx_target/2)) { if (req_prod > np->rx.sring->req_prod) goto push; return; } if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && ((np->rx_target *= 2) > np->rx_max_target)) np->rx_target = np->rx_max_target; refill: for (i = 0; ; i++) { skb = __skb_dequeue(&np->rx_batch); if (skb == NULL) break; skb->dev = dev; id = xennet_rxidx(req_prod + i); BUG_ON(np->rx_skbs[id]); np->rx_skbs[id] = skb; ref = gnttab_claim_grant_reference(&np->gref_rx_head); BUG_ON((signed short)ref < 0); np->grant_rx_ref[id] = ref; pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0])); vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0])); req = RING_GET_REQUEST(&np->rx, req_prod + i); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, pfn_to_mfn(pfn), 0); req->id = id; req->gref = ref; } wmb(); np->rx.req_prod_pvt = req_prod + i; push: RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); if (notify) notify_remote_via_irq(np->netdev->irq); }
/** Convert a (possibly fragmented) skb into a scatter list. * * @param skb skb to convert * @param sg scatterlist to set up * @param sg_n size of sg on input, number of elements set on output * @param offset offset into data to start at * @param len number of bytes * @return 0 on success, error code otherwise */ int skb_scatterlist(struct sk_buff *skb, struct scatterlist *sg, int *sg_n, int offset, int len){ int err = 0; int start; // No. of bytes copied so far (where next copy starts). int size; // Size of the next chunk. int end; // Where the next chunk ends (start + size). int copy; // Number of bytes to copy in one operation. int sg_i = 0; // Index into sg. int i; if(DEBUG_SCATTERLIST){ dprintf("> offset=%d len=%d (end=%d), skb len=%d,\n", offset, len, offset+len, skb->len); } start = 0; size = skb_headlen(skb); end = start + size; copy = end - offset; if(copy > 0){ char *p; if(copy > len) copy = len; if(sg_i >= *sg_n){ err = -EINVAL; goto exit; } p = skb->data + offset; SET_SCATTER_ADDR(sg[sg_i], NULL); sg[sg_i].page = virt_to_page(p); sg[sg_i].offset = ((unsigned long)p & ~PAGE_MASK); sg[sg_i].length = copy; if(DEBUG_SCATTERLIST){ dprintf("> sg_i=%d .page=%p .offset=%u .length=%d\n", sg_i, sg[sg_i].page, sg[sg_i].offset, sg[sg_i].length); } sg_i++; if((len -= copy) == 0) goto exit; offset += copy; } start = end; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++){ BUG_TRAP(start <= offset + len); size = skb_shinfo(skb)->frags[i].size; end = start + size; copy = end - offset; if(copy > 0){ skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; if(copy > len) copy = len; if(sg_i >= *sg_n){ err = -EINVAL; goto exit; } SET_SCATTER_ADDR(sg[sg_i], NULL); sg[sg_i].page = frag->page; sg[sg_i].offset = frag->page_offset + offset - start; sg[sg_i].length = copy; if(DEBUG_SCATTERLIST){ dprintf("> sg_i=%d .page=%p .offset=%u .length=%d\n", sg_i, sg[sg_i].page, sg[sg_i].offset, sg[sg_i].length); } sg_i++; if((len -= copy) == 0) goto exit; offset += copy; } start = end; } exit: if(!err) *sg_n = sg_i; if(len) wprintf("> len=%d\n", len); if(len) BUG(); if(err) dprintf("< err=%d sg_n=%d\n", err, *sg_n); return err; }
static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct netfront_stats *stats = this_cpu_ptr(np->stats); struct xen_netif_tx_request *tx; struct xen_netif_extra_info *extra; char *data = skb->data; RING_IDX i; grant_ref_t ref; unsigned long mfn; int notify; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); unsigned long flags; frags += DIV_ROUND_UP(offset + len, PAGE_SIZE); if (unlikely(frags > MAX_SKB_FRAGS + 1)) { printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n", frags); dump_stack(); goto drop; } spin_lock_irqsave(&np->tx_lock, flags); if (unlikely(!netif_carrier_ok(dev) || (frags > 1 && !xennet_can_sg(dev)) || netif_needs_gso(skb, netif_skb_features(skb)))) { spin_unlock_irqrestore(&np->tx_lock, flags); goto drop; } i = np->tx.req_prod_pvt; id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); np->tx_skbs[id].skb = skb; tx = RING_GET_REQUEST(&np->tx, i); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; extra = NULL; tx->flags = 0; if (skb->ip_summed == CHECKSUM_PARTIAL) tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated; else if (skb->ip_summed == CHECKSUM_UNNECESSARY) tx->flags |= XEN_NETTXF_data_validated; if (skb_shinfo(skb)->gso_size) { struct xen_netif_extra_info *gso; gso = (struct xen_netif_extra_info *) RING_GET_REQUEST(&np->tx, ++i); if (extra) extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; else tx->flags |= XEN_NETTXF_extra_info; gso->u.gso.size = skb_shinfo(skb)->gso_size; gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; gso->u.gso.pad = 0; gso->u.gso.features = 0; gso->type = XEN_NETIF_EXTRA_TYPE_GSO; gso->flags = 0; extra = gso; } np->tx.req_prod_pvt = i + 1; xennet_make_frags(skb, dev, tx); tx->size = skb->len; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); if (notify) notify_remote_via_irq(np->netdev->irq); u64_stats_update_begin(&stats->syncp); stats->tx_bytes += skb->len; stats->tx_packets++; u64_stats_update_end(&stats->syncp); xennet_tx_buf_gc(dev); if (!netfront_tx_slot_available(np)) netif_stop_queue(dev); spin_unlock_irqrestore(&np->tx_lock, flags); return NETDEV_TX_OK; drop: dev->stats.tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; }
void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm, int offset, int len, icv_update_fn_t icv_update) { int start = skb_headlen(skb); int i, copy = start - offset; struct scatterlist sg; /* Checksum header. */ if (copy > 0) { if (copy > len) copy = len; sg.page = virt_to_page(skb->data + offset); sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE; sg.length = copy; icv_update(tfm, &sg, 1); if ((len -= copy) == 0) return; offset += copy; } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { int end; BUG_TRAP(start <= offset + len); end = start + skb_shinfo(skb)->frags[i].size; if ((copy = end - offset) > 0) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; if (copy > len) copy = len; sg.page = frag->page; sg.offset = frag->page_offset + offset-start; sg.length = copy; icv_update(tfm, &sg, 1); if (!(len -= copy)) return; offset += copy; } start = end; } if (skb_shinfo(skb)->frag_list) { struct sk_buff *list = skb_shinfo(skb)->frag_list; for (; list; list = list->next) { int end; BUG_TRAP(start <= offset + len); end = start + list->len; if ((copy = end - offset) > 0) { if (copy > len) copy = len; skb_icv_walk(list, tfm, offset-start, copy, icv_update); if ((len -= copy) == 0) return; offset += copy; } start = end; } } if (len) BUG(); }
static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev) { struct fm10k_intfc *interface = netdev_priv(dev); unsigned int r_idx = skb->queue_mapping; int err; if ((skb->protocol == htons(ETH_P_8021Q)) && !skb_vlan_tag_present(skb)) { /* FM10K only supports hardware tagging, any tags in frame * are considered 2nd level or "outer" tags */ struct vlan_hdr *vhdr; __be16 proto; /* make sure skb is not shared */ skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) return NETDEV_TX_OK; /* make sure there is enough room to move the ethernet header */ if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN))) return NETDEV_TX_OK; /* verify the skb head is not shared */ err = skb_cow_head(skb, 0); if (err) return NETDEV_TX_OK; /* locate vlan header */ vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); /* pull the 2 key pieces of data out of it */ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(vhdr->h_vlan_TCI)); proto = vhdr->h_vlan_encapsulated_proto; skb->protocol = (ntohs(proto) >= 1536) ? proto : htons(ETH_P_802_2); /* squash it by moving the ethernet addresses up 4 bytes */ memmove(skb->data + VLAN_HLEN, skb->data, 12); __skb_pull(skb, VLAN_HLEN); skb_reset_mac_header(skb); } /* The minimum packet size for a single buffer is 17B so pad the skb * in order to meet this minimum size requirement. */ if (unlikely(skb->len < 17)) { int pad_len = 17 - skb->len; if (skb_pad(skb, pad_len)) return NETDEV_TX_OK; __skb_put(skb, pad_len); } /* prepare packet for hardware time stamping */ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) fm10k_ts_tx_enqueue(interface, skb); if (r_idx >= interface->num_tx_queues) r_idx %= interface->num_tx_queues; err = fm10k_xmit_frame_ring(skb, interface->tx_ring[r_idx]); return err; }
int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) { int start = skb_headlen(skb); int i, copy = start - offset; int elt = 0; if (copy > 0) { if (copy > len) copy = len; sg[elt].page = virt_to_page(skb->data + offset); sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE; sg[elt].length = copy; elt++; if ((len -= copy) == 0) return elt; offset += copy; } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { int end; BUG_TRAP(start <= offset + len); end = start + skb_shinfo(skb)->frags[i].size; if ((copy = end - offset) > 0) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; if (copy > len) copy = len; sg[elt].page = frag->page; sg[elt].offset = frag->page_offset+offset-start; sg[elt].length = copy; elt++; if (!(len -= copy)) return elt; offset += copy; } start = end; } if (skb_shinfo(skb)->frag_list) { struct sk_buff *list = skb_shinfo(skb)->frag_list; for (; list; list = list->next) { int end; BUG_TRAP(start <= offset + len); end = start + list->len; if ((copy = end - offset) > 0) { if (copy > len) copy = len; elt += skb_to_sgvec(list, sg+elt, offset - start, copy); if ((len -= copy) == 0) return elt; offset += copy; } start = end; } } if (len) BUG(); return elt; }
static int netdev_send(struct vport *vport, struct sk_buff *skb) { struct netdev_vport *netdev_vport = netdev_vport_priv(vport); int mtu = netdev_vport->dev->mtu; int len; if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) { net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n", netdev_vport->dev->name, packet_length(skb), mtu); goto error; } if (unlikely(skb_warn_if_lro(skb))) goto error; skb->dev = netdev_vport->dev; forward_ip_summed(skb, true); if (vlan_tx_tag_present(skb) && !dev_supports_vlan_tx(skb->dev)) { int features; features = netif_skb_features(skb); if (!vlan_tso) features &= ~(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO | NETIF_F_FSO); if (netif_needs_gso(skb, features)) { struct sk_buff *nskb; nskb = skb_gso_segment(skb, features); if (!nskb) { if (unlikely(skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) { kfree_skb(skb); return 0; } skb_shinfo(skb)->gso_type &= ~SKB_GSO_DODGY; goto tag; } if (IS_ERR(nskb)) { kfree_skb(skb); return 0; } consume_skb(skb); skb = nskb; len = 0; do { nskb = skb->next; skb->next = NULL; skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); if (likely(skb)) { len += skb->len; vlan_set_tci(skb, 0); dev_queue_xmit(skb); } skb = nskb; } while (skb); return len; } tag: skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); if (unlikely(!skb)) return 0; vlan_set_tci(skb, 0); } len = skb->len; dev_queue_xmit(skb); return len; error: kfree_skb(skb); ovs_vport_record_error(vport, VPORT_E_TX_DROPPED); return 0; }
int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) { int copyflag; int elt; struct sk_buff *skb1, **skb_p; /* If skb is cloned or its head is paged, reallocate * head pulling out all the pages (pages are considered not writable * at the moment even if they are anonymous). */ if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) return -ENOMEM; /* Easy case. Most of packets will go this way. */ if (!skb_shinfo(skb)->frag_list) { /* A little of trouble, not enough of space for trailer. * This should not happen, when stack is tuned to generate * good frames. OK, on miss we reallocate and reserve even more * space, 128 bytes is fair. */ if (skb_tailroom(skb) < tailbits && pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) return -ENOMEM; /* Voila! */ *trailer = skb; return 1; } /* Misery. We are in troubles, going to mincer fragments... */ elt = 1; skb_p = &skb_shinfo(skb)->frag_list; copyflag = 0; while ((skb1 = *skb_p) != NULL) { int ntail = 0; /* The fragment is partially pulled by someone, * this can happen on input. Copy it and everything * after it. */ if (skb_shared(skb1)) copyflag = 1; /* If the skb is the last, worry about trailer. */ if (skb1->next == NULL && tailbits) { if (skb_shinfo(skb1)->nr_frags || skb_shinfo(skb1)->frag_list || skb_tailroom(skb1) < tailbits) ntail = tailbits + 128; } if (copyflag || skb_cloned(skb1) || ntail || skb_shinfo(skb1)->nr_frags || skb_shinfo(skb1)->frag_list) { struct sk_buff *skb2; /* Fuck, we are miserable poor guys... */ if (ntail == 0) skb2 = skb_copy(skb1, GFP_ATOMIC); else skb2 = skb_copy_expand(skb1, skb_headroom(skb1), ntail, GFP_ATOMIC); if (unlikely(skb2 == NULL)) return -ENOMEM; if (skb1->sk) skb_set_owner_w(skb, skb1->sk); /* Looking around. Are we still alive? * OK, link new skb, drop old one */ skb2->next = skb1->next; *skb_p = skb2; kfree_skb(skb1); skb1 = skb2; } elt++; *trailer = skb1; skb_p = &skb1->next; } return elt; }
static void receive_skb(struct net_device *dev, struct sk_buff *skb, unsigned len) { struct virtio_net_hdr *hdr = skb_vnet_hdr(skb); if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { pr_debug("%s: short packet %i\n", dev->name, len); dev->stats.rx_length_errors++; goto drop; } len -= sizeof(struct virtio_net_hdr); BUG_ON(len > MAX_PACKET_LEN); skb_trim(skb, len); skb->protocol = eth_type_trans(skb, dev); pr_debug("Receiving skb proto 0x%04x len %i type %i\n", ntohs(skb->protocol), skb->len, skb->pkt_type); dev->stats.rx_bytes += skb->len; dev->stats.rx_packets++; if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { pr_debug("Needs csum!\n"); if (!skb_partial_csum_set(skb,hdr->csum_start,hdr->csum_offset)) goto frame_err; } if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { pr_debug("GSO!\n"); switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { case VIRTIO_NET_HDR_GSO_TCPV4: skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; break; case VIRTIO_NET_HDR_GSO_UDP: skb_shinfo(skb)->gso_type = SKB_GSO_UDP; break; case VIRTIO_NET_HDR_GSO_TCPV6: skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; break; default: if (net_ratelimit()) printk(KERN_WARNING "%s: bad gso type %u.\n", dev->name, hdr->gso_type); goto frame_err; } if (hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; skb_shinfo(skb)->gso_size = hdr->gso_size; if (skb_shinfo(skb)->gso_size == 0) { if (net_ratelimit()) printk(KERN_WARNING "%s: zero gso size.\n", dev->name); goto frame_err; } /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; skb_shinfo(skb)->gso_segs = 0; } netif_receive_skb(skb); return; frame_err: dev->stats.rx_frame_errors++; drop: dev_kfree_skb(skb); }
static void xennet_alloc_rx_buffers(struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; struct page *page; int i, batch_target, notify; RING_IDX req_prod = np->rx.req_prod_pvt; grant_ref_t ref; unsigned long pfn; void *vaddr; struct xen_netif_rx_request *req; if (unlikely(!netif_carrier_ok(dev))) return; /* * Allocate skbuffs greedily, even though we batch updates to the * receive ring. This creates a less bursty demand on the memory * allocator, so should reduce the chance of failed allocation requests * both for ourself and for other kernel subsystems. */ batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) goto no_skb; /* Align ip header to a 16 bytes boundary */ skb_reserve(skb, NET_IP_ALIGN); page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (!page) { kfree_skb(skb); no_skb: /* Any skbuffs queued for refill? Force them out. */ if (i != 0) goto refill; /* Could not allocate any skbuffs. Try again later. */ mod_timer(&np->rx_refill_timer, jiffies + (HZ/10)); break; } __skb_fill_page_desc(skb, 0, page, 0, 0); skb_shinfo(skb)->nr_frags = 1; __skb_queue_tail(&np->rx_batch, skb); } /* Is the batch large enough to be worthwhile? */ if (i < (np->rx_target/2)) { if (req_prod > np->rx.sring->req_prod) goto push; return; } /* Adjust our fill target if we risked running out of buffers. */ if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && ((np->rx_target *= 2) > np->rx_max_target)) np->rx_target = np->rx_max_target; refill: for (i = 0; ; i++) { skb = __skb_dequeue(&np->rx_batch); if (skb == NULL) break; skb->dev = dev; id = xennet_rxidx(req_prod + i); BUG_ON(np->rx_skbs[id]); np->rx_skbs[id] = skb; ref = gnttab_claim_grant_reference(&np->gref_rx_head); BUG_ON((signed short)ref < 0); np->grant_rx_ref[id] = ref; pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0])); vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0])); req = RING_GET_REQUEST(&np->rx, req_prod + i); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, pfn_to_mfn(pfn), 0); req->id = id; req->gref = ref; } wmb(); /* barrier so backend seens requests */ /* Above is a suitable barrier to ensure backend will see requests. */ np->rx.req_prod_pvt = req_prod + i; push: RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); if (notify) notify_remote_via_irq(np->netdev->irq); }
/** * skb_copy_datagram_iter - Copy a datagram to an iovec iterator. * @skb: buffer to copy * @offset: offset in the buffer to start copying from * @to: iovec iterator to copy to * @len: amount of data to copy from buffer to iovec */ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset, struct iov_iter *to, int len) { int start = skb_headlen(skb); int i, copy = start - offset, start_off = offset, n; struct sk_buff *frag_iter; trace_skb_copy_datagram_iovec(skb, len); /* Copy header. */ if (copy > 0) { if (copy > len) copy = len; n = copy_to_iter(skb->data + offset, copy, to); offset += n; if (n != copy) goto short_copy; if ((len -= copy) == 0) return 0; } /* Copy paged appendix. Hmm... why does this look so complicated? */ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { int end; const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; WARN_ON(start > offset + len); end = start + skb_frag_size(frag); if ((copy = end - offset) > 0) { if (copy > len) copy = len; n = copy_page_to_iter(skb_frag_page(frag), frag->page_offset + offset - start, copy, to); offset += n; if (n != copy) goto short_copy; if (!(len -= copy)) return 0; } start = end; } skb_walk_frags(skb, frag_iter) { int end; WARN_ON(start > offset + len); end = start + frag_iter->len; if ((copy = end - offset) > 0) { if (copy > len) copy = len; if (skb_copy_datagram_iter(frag_iter, offset - start, to, copy)) goto fault; if ((len -= copy) == 0) return 0; offset += copy; } start = end; }
static void greth_clean_rings(struct greth_private *greth) { int i; struct greth_bd *rx_bdp = greth->rx_bd_base; struct greth_bd *tx_bdp = greth->tx_bd_base; if (greth->gbit_mac) { /* Free and unmap RX buffers */ for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) { if (greth->rx_skbuff[i] != NULL) { dev_kfree_skb(greth->rx_skbuff[i]); dma_unmap_single(greth->dev, greth_read_bd(&rx_bdp->addr), MAX_FRAME_SIZE+NET_IP_ALIGN, DMA_FROM_DEVICE); } } /* TX buffers */ while (greth->tx_free < GRETH_TXBD_NUM) { struct sk_buff *skb = greth->tx_skbuff[greth->tx_last]; int nr_frags = skb_shinfo(skb)->nr_frags; tx_bdp = greth->tx_bd_base + greth->tx_last; greth->tx_last = NEXT_TX(greth->tx_last); dma_unmap_single(greth->dev, greth_read_bd(&tx_bdp->addr), skb_headlen(skb), DMA_TO_DEVICE); for (i = 0; i < nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; tx_bdp = greth->tx_bd_base + greth->tx_last; dma_unmap_page(greth->dev, greth_read_bd(&tx_bdp->addr), frag->size, DMA_TO_DEVICE); greth->tx_last = NEXT_TX(greth->tx_last); } greth->tx_free += nr_frags+1; dev_kfree_skb(skb); } } else { /* 10/100 Mbps MAC */ for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) { kfree(greth->rx_bufs[i]); dma_unmap_single(greth->dev, greth_read_bd(&rx_bdp->addr), MAX_FRAME_SIZE, DMA_FROM_DEVICE); } for (i = 0; i < GRETH_TXBD_NUM; i++, tx_bdp++) { kfree(greth->tx_bufs[i]); dma_unmap_single(greth->dev, greth_read_bd(&tx_bdp->addr), MAX_FRAME_SIZE, DMA_TO_DEVICE); } } }